repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
zangree/ryu | ryu/services/protocols/bgp/rtconf/base.py | 4 | 25357 | # Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Running or runtime configuration base classes.
"""
from abc import ABCMeta
from abc import abstractmethod
import functools
import numbers
import logging
import six
import uuid
from ryu.services.protocols.bgp.base import add_bgp_error_metadata
from ryu.services.protocols.bgp.base import BGPSException
from ryu.services.protocols.bgp.base import get_validator
from ryu.services.protocols.bgp.base import RUNTIME_CONF_ERROR_CODE
from ryu.services.protocols.bgp.base import validate
from ryu.services.protocols.bgp.utils import validation
from ryu.services.protocols.bgp.utils.validation import is_valid_old_asn
LOG = logging.getLogger('bgpspeaker.rtconf.base')
#
# Nested settings.
#
CAP_REFRESH = 'cap_refresh'
CAP_ENHANCED_REFRESH = 'cap_enhanced_refresh'
CAP_MBGP_IPV4 = 'cap_mbgp_ipv4'
CAP_MBGP_IPV6 = 'cap_mbgp_ipv6'
CAP_MBGP_VPNV4 = 'cap_mbgp_vpnv4'
CAP_MBGP_VPNV6 = 'cap_mbgp_vpnv6'
CAP_RTC = 'cap_rtc'
RTC_AS = 'rtc_as'
HOLD_TIME = 'hold_time'
# To control how many prefixes can be received from a neighbor.
# 0 value indicates no limit and other related options will be ignored.
# Current behavior is to log that limit has reached.
MAX_PREFIXES = 'max_prefixes'
# Has same meaning as: http://www.juniper.net/techpubs/software/junos/junos94
# /swconfig-routing/disabling-suppression-of-route-
# advertisements.html#id-13255463
ADVERTISE_PEER_AS = 'advertise_peer_as'
# MED - MULTI_EXIT_DISC
MULTI_EXIT_DISC = 'multi_exit_disc'
# Extended community attribute route origin.
SITE_OF_ORIGINS = 'site_of_origins'
# Constants related to errors.
CONF_NAME = 'conf_name'
CONF_VALUE = 'conf_value'
# Max. value limits
MAX_NUM_IMPORT_RT = 1000
MAX_NUM_EXPORT_RT = 250
MAX_NUM_SOO = 10
# =============================================================================
# Runtime configuration errors or exceptions.
# =============================================================================
@add_bgp_error_metadata(code=RUNTIME_CONF_ERROR_CODE, sub_code=1,
def_desc='Error with runtime-configuration.')
class RuntimeConfigError(BGPSException):
"""Base class for all runtime configuration errors.
"""
pass
@add_bgp_error_metadata(code=RUNTIME_CONF_ERROR_CODE, sub_code=2,
def_desc='Missing required configuration.')
class MissingRequiredConf(RuntimeConfigError):
"""Exception raised when trying to configure with missing required
settings.
"""
def __init__(self, **kwargs):
conf_name = kwargs.get('conf_name')
if conf_name:
super(MissingRequiredConf, self).__init__(
desc='Missing required configuration: %s' % conf_name)
else:
super(MissingRequiredConf, self).__init__(desc=kwargs.get('desc'))
@add_bgp_error_metadata(code=RUNTIME_CONF_ERROR_CODE, sub_code=3,
def_desc='Incorrect Type for configuration.')
class ConfigTypeError(RuntimeConfigError):
"""Exception raised when configuration value type miss-match happens.
"""
def __init__(self, **kwargs):
conf_name = kwargs.get(CONF_NAME)
conf_value = kwargs.get(CONF_VALUE)
if conf_name and conf_value:
super(ConfigTypeError, self).__init__(
desc='Incorrect Type %s for configuration: %s' %
(conf_value, conf_name))
elif conf_name:
super(ConfigTypeError, self).__init__(
desc='Incorrect Type for configuration: %s' % conf_name)
else:
super(ConfigTypeError, self).__init__(desc=kwargs.get('desc'))
@add_bgp_error_metadata(code=RUNTIME_CONF_ERROR_CODE, sub_code=4,
def_desc='Incorrect Value for configuration.')
class ConfigValueError(RuntimeConfigError):
"""Exception raised when configuration value is of correct type but
incorrect value.
"""
def __init__(self, **kwargs):
conf_name = kwargs.get(CONF_NAME)
conf_value = kwargs.get(CONF_VALUE)
if conf_name and conf_value:
super(ConfigValueError, self).__init__(
desc='Incorrect Value %s for configuration: %s' %
(conf_value, conf_name))
elif conf_name:
super(ConfigValueError, self).__init__(
desc='Incorrect Value for configuration: %s' % conf_name)
else:
super(ConfigValueError, self).__init__(desc=kwargs.get('desc'))
# =============================================================================
# Configuration base classes.
# =============================================================================
class BaseConf(object):
"""Base class for a set of configuration values.
Configurations can be required or optional. Also acts as a container of
configuration change listeners.
"""
__metaclass__ = ABCMeta
def __init__(self, **kwargs):
self._req_settings = self.get_req_settings()
self._opt_settings = self.get_opt_settings()
self._valid_evts = self.get_valid_evts()
self._listeners = {}
self._settings = {}
# validate required and unknown settings
self._validate_req_unknown_settings(**kwargs)
# Initialize configuration settings.
self._init_req_settings(**kwargs)
self._init_opt_settings(**kwargs)
@property
def settings(self):
"""Returns a copy of current settings."""
return self._settings.copy()
@classmethod
def get_valid_evts(self):
return set()
@classmethod
def get_req_settings(self):
return set()
@classmethod
def get_opt_settings(self):
return set()
@abstractmethod
def _init_opt_settings(self, **kwargs):
"""Sub-classes should override this method to initialize optional
settings.
"""
pass
@abstractmethod
def update(self, **kwargs):
# Validate given values
self._validate_req_unknown_settings(**kwargs)
def _validate_req_unknown_settings(self, **kwargs):
"""Checks if required settings are present.
Also checks if unknown requirements are present.
"""
# Validate given configuration.
self._all_attrs = (self._req_settings | self._opt_settings)
if not kwargs and len(self._req_settings) > 0:
raise MissingRequiredConf(desc='Missing all required attributes.')
given_attrs = frozenset(kwargs.keys())
unknown_attrs = given_attrs - self._all_attrs
if unknown_attrs:
raise RuntimeConfigError(desc=(
'Unknown attributes: %s' %
', '.join([str(i) for i in unknown_attrs]))
)
missing_req_settings = self._req_settings - given_attrs
if missing_req_settings:
raise MissingRequiredConf(conf_name=list(missing_req_settings))
def _init_req_settings(self, **kwargs):
for req_attr in self._req_settings:
req_attr_value = kwargs.get(req_attr)
if req_attr_value is None:
raise MissingRequiredConf(conf_name=req_attr_value)
# Validate attribute value
req_attr_value = get_validator(req_attr)(req_attr_value)
self._settings[req_attr] = req_attr_value
def add_listener(self, evt, callback):
# if (evt not in self.get_valid_evts()):
# raise RuntimeConfigError(desc=('Unknown event %s' % evt))
listeners = self._listeners.get(evt, None)
if not listeners:
listeners = set()
self._listeners[evt] = listeners
listeners.update([callback])
def remove_listener(self, evt, callback):
if evt in self.get_valid_evts():
listeners = self._listeners.get(evt, None)
if listeners and (callback in listeners):
listeners.remove(callback)
return True
return False
def _notify_listeners(self, evt, value):
listeners = self._listeners.get(evt, [])
for callback in listeners:
callback(ConfEvent(self, evt, value))
def __repr__(self):
return '%s(%r)' % (self.__class__, self._settings)
class ConfWithId(BaseConf):
"""Configuration settings related to identity."""
# Config./resource identifier.
ID = 'id'
# Config./resource name.
NAME = 'name'
# Config./resource description.
DESCRIPTION = 'description'
UPDATE_NAME_EVT = 'update_name_evt'
UPDATE_DESCRIPTION_EVT = 'update_description_evt'
VALID_EVT = frozenset([UPDATE_NAME_EVT, UPDATE_DESCRIPTION_EVT])
OPTIONAL_SETTINGS = frozenset([ID, NAME, DESCRIPTION])
def __init__(self, **kwargs):
super(ConfWithId, self).__init__(**kwargs)
@classmethod
def get_opt_settings(cls):
self_confs = super(ConfWithId, cls).get_opt_settings()
self_confs.update(ConfWithId.OPTIONAL_SETTINGS)
return self_confs
@classmethod
def get_req_settings(cls):
self_confs = super(ConfWithId, cls).get_req_settings()
return self_confs
@classmethod
def get_valid_evts(cls):
self_valid_evts = super(ConfWithId, cls).get_valid_evts()
self_valid_evts.update(ConfWithId.VALID_EVT)
return self_valid_evts
def _init_opt_settings(self, **kwargs):
super(ConfWithId, self)._init_opt_settings(**kwargs)
self._settings[ConfWithId.ID] = \
compute_optional_conf(ConfWithId.ID, str(uuid.uuid4()), **kwargs)
self._settings[ConfWithId.NAME] = \
compute_optional_conf(ConfWithId.NAME, str(self), **kwargs)
self._settings[ConfWithId.DESCRIPTION] = \
compute_optional_conf(ConfWithId.DESCRIPTION, str(self), **kwargs)
@property
def id(self):
return self._settings[ConfWithId.ID]
@property
def name(self):
return self._settings[ConfWithId.NAME]
@name.setter
def name(self, new_name):
old_name = self.name
if not new_name:
new_name = repr(self)
else:
get_validator(ConfWithId.NAME)(new_name)
if old_name != new_name:
self._settings[ConfWithId.NAME] = new_name
self._notify_listeners(ConfWithId.UPDATE_NAME_EVT,
(old_name, self.name))
@property
def description(self):
return self._settings[ConfWithId.DESCRIPTION]
@description.setter
def description(self, new_description):
old_desc = self.description
if not new_description:
new_description = str(self)
else:
get_validator(ConfWithId.DESCRIPTION)(new_description)
if old_desc != new_description:
self._settings[ConfWithId.DESCRIPTION] = new_description
self._notify_listeners(ConfWithId.UPDATE_DESCRIPTION_EVT,
(old_desc, self.description))
def update(self, **kwargs):
# Update inherited configurations
super(ConfWithId, self).update(**kwargs)
self.name = compute_optional_conf(ConfWithId.NAME,
str(self),
**kwargs)
self.description = compute_optional_conf(ConfWithId.DESCRIPTION,
str(self),
**kwargs)
class ConfWithStats(BaseConf):
"""Configuration settings related to statistics collection."""
# Enable or disable statistics logging.
STATS_LOG_ENABLED = 'statistics_log_enabled'
DEFAULT_STATS_LOG_ENABLED = False
# Statistics logging time.
STATS_TIME = 'statistics_interval'
DEFAULT_STATS_TIME = 60
UPDATE_STATS_LOG_ENABLED_EVT = 'update_stats_log_enabled_evt'
UPDATE_STATS_TIME_EVT = 'update_stats_time_evt'
VALID_EVT = frozenset([UPDATE_STATS_LOG_ENABLED_EVT,
UPDATE_STATS_TIME_EVT])
OPTIONAL_SETTINGS = frozenset([STATS_LOG_ENABLED, STATS_TIME])
def __init__(self, **kwargs):
super(ConfWithStats, self).__init__(**kwargs)
def _init_opt_settings(self, **kwargs):
super(ConfWithStats, self)._init_opt_settings(**kwargs)
self._settings[ConfWithStats.STATS_LOG_ENABLED] = \
compute_optional_conf(ConfWithStats.STATS_LOG_ENABLED,
ConfWithStats.DEFAULT_STATS_LOG_ENABLED,
**kwargs)
self._settings[ConfWithStats.STATS_TIME] = \
compute_optional_conf(ConfWithStats.STATS_TIME,
ConfWithStats.DEFAULT_STATS_TIME,
**kwargs)
@property
def stats_log_enabled(self):
return self._settings[ConfWithStats.STATS_LOG_ENABLED]
@stats_log_enabled.setter
def stats_log_enabled(self, enabled):
get_validator(ConfWithStats.STATS_LOG_ENABLED)(enabled)
if enabled != self.stats_log_enabled:
self._settings[ConfWithStats.STATS_LOG_ENABLED] = enabled
self._notify_listeners(ConfWithStats.UPDATE_STATS_LOG_ENABLED_EVT,
enabled)
@property
def stats_time(self):
return self._settings[ConfWithStats.STATS_TIME]
@stats_time.setter
def stats_time(self, stats_time):
get_validator(ConfWithStats.STATS_TIME)(stats_time)
if stats_time != self.stats_time:
self._settings[ConfWithStats.STATS_TIME] = stats_time
self._notify_listeners(ConfWithStats.UPDATE_STATS_TIME_EVT,
stats_time)
@classmethod
def get_opt_settings(cls):
confs = super(ConfWithStats, cls).get_opt_settings()
confs.update(ConfWithStats.OPTIONAL_SETTINGS)
return confs
@classmethod
def get_valid_evts(cls):
valid_evts = super(ConfWithStats, cls).get_valid_evts()
valid_evts.update(ConfWithStats.VALID_EVT)
return valid_evts
def update(self, **kwargs):
# Update inherited configurations
super(ConfWithStats, self).update(**kwargs)
self.stats_log_enabled = \
compute_optional_conf(ConfWithStats.STATS_LOG_ENABLED,
ConfWithStats.DEFAULT_STATS_LOG_ENABLED,
**kwargs)
self.stats_time = \
compute_optional_conf(ConfWithStats.STATS_TIME,
ConfWithStats.DEFAULT_STATS_TIME,
**kwargs)
class BaseConfListener(object):
"""Base class of all configuration listeners."""
__metaclass__ = ABCMeta
def __init__(self, base_conf):
pass
# TODO(PH): re-vist later and check if we need this check
# if not isinstance(base_conf, BaseConf):
# raise TypeError('Currently we only support listening to '
# 'instances of BaseConf')
class ConfWithIdListener(BaseConfListener):
def __init__(self, conf_with_id):
assert conf_with_id
super(ConfWithIdListener, self).__init__(conf_with_id)
conf_with_id.add_listener(ConfWithId.UPDATE_NAME_EVT,
self.on_chg_name_conf_with_id)
conf_with_id.add_listener(ConfWithId.UPDATE_DESCRIPTION_EVT,
self.on_chg_desc_conf_with_id)
def on_chg_name_conf_with_id(self, conf_evt):
# Note did not makes this method abstract as this is not important
# event.
raise NotImplementedError()
def on_chg_desc_conf_with_id(self, conf_evt):
# Note did not makes this method abstract as this is not important
# event.
raise NotImplementedError()
class ConfWithStatsListener(BaseConfListener):
def __init__(self, conf_with_stats):
assert conf_with_stats
super(ConfWithStatsListener, self).__init__(conf_with_stats)
conf_with_stats.add_listener(
ConfWithStats.UPDATE_STATS_LOG_ENABLED_EVT,
self.on_chg_stats_enabled_conf_with_stats)
conf_with_stats.add_listener(ConfWithStats.UPDATE_STATS_TIME_EVT,
self.on_chg_stats_time_conf_with_stats)
@abstractmethod
def on_chg_stats_time_conf_with_stats(self, conf_evt):
raise NotImplementedError()
@abstractmethod
def on_chg_stats_enabled_conf_with_stats(self, conf_evt):
raise NotImplementedError()
@functools.total_ordering
class ConfEvent(object):
"""Encapsulates configuration settings change/update event."""
def __init__(self, evt_src, evt_name, evt_value):
"""Creates an instance using given parameters.
Parameters:
-`evt_src`: (BaseConf) source of the event
-`evt_name`: (str) name of event, has to be one of the valid
event of `evt_src`
- `evt_value`: (tuple) event context that helps event handler
"""
if evt_name not in evt_src.get_valid_evts():
raise ValueError('Event %s is not a valid event for type %s.' %
(evt_name, type(evt_src)))
self._src = evt_src
self._name = evt_name
self._value = evt_value
@property
def src(self):
return self._src
@property
def name(self):
return self._name
@property
def value(self):
return self._value
def __repr__(self):
return '<ConfEvent(%s, %s, %s)>' % (self.src, self.name, self.value)
def __str__(self):
return ('ConfEvent(src=%s, name=%s, value=%s)' %
(self.src, self.name, self.value))
def __lt__(self, other):
return ((self.src, self.name, self.value) <
(other.src, other.name, other.value))
def __eq__(self, other):
return ((self.src, self.name, self.value) ==
(other.src, other.name, other.value))
# =============================================================================
# Runtime configuration setting validators and their registry.
# =============================================================================
@validate(name=ConfWithId.ID)
def validate_conf_id(identifier):
if not isinstance(identifier, str):
raise ConfigTypeError(conf_name=ConfWithId.ID, conf_value=identifier)
if len(identifier) > 128:
raise ConfigValueError(conf_name=ConfWithId.ID, conf_value=identifier)
return identifier
@validate(name=ConfWithId.NAME)
def validate_conf_name(name):
if not isinstance(name, str):
raise ConfigTypeError(conf_name=ConfWithId.NAME, conf_value=name)
if len(name) > 128:
raise ConfigValueError(conf_name=ConfWithId.NAME, conf_value=name)
return name
@validate(name=ConfWithId.DESCRIPTION)
def validate_conf_desc(description):
if not isinstance(description, str):
raise ConfigTypeError(conf_name=ConfWithId.DESCRIPTION,
conf_value=description)
return description
@validate(name=ConfWithStats.STATS_LOG_ENABLED)
def validate_stats_log_enabled(stats_log_enabled):
if stats_log_enabled not in (True, False):
raise ConfigTypeError(desc='Statistics log enabled settings can only'
' be boolean type.')
return stats_log_enabled
@validate(name=ConfWithStats.STATS_TIME)
def validate_stats_time(stats_time):
if not isinstance(stats_time, numbers.Integral):
raise ConfigTypeError(desc='Statistics log timer value has to be of '
'integral type but got: %r' % stats_time)
if stats_time < 10:
raise ConfigValueError(desc='Statistics log timer cannot be set to '
'less then 10 sec, given timer value %s.' %
stats_time)
return stats_time
@validate(name=CAP_REFRESH)
def validate_cap_refresh(crefresh):
if crefresh not in (True, False):
raise ConfigTypeError(desc='Invalid Refresh capability settings: %s '
' boolean value expected' % crefresh)
return crefresh
@validate(name=CAP_ENHANCED_REFRESH)
def validate_cap_enhanced_refresh(cer):
if cer not in (True, False):
raise ConfigTypeError(desc='Invalid Enhanced Refresh capability '
'settings: %s boolean value expected' % cer)
return cer
@validate(name=CAP_MBGP_IPV4)
def validate_cap_mbgp_ipv4(cmv4):
if cmv4 not in (True, False):
raise ConfigTypeError(desc='Invalid Enhanced Refresh capability '
'settings: %s boolean value expected' % cmv4)
return cmv4
@validate(name=CAP_MBGP_IPV6)
def validate_cap_mbgp_ipv6(cmv6):
if cmv6 not in (True, False):
raise ConfigTypeError(desc='Invalid Enhanced Refresh capability '
'settings: %s boolean value expected' % cmv6)
return cmv6
@validate(name=CAP_MBGP_VPNV4)
def validate_cap_mbgp_vpnv4(cmv4):
if cmv4 not in (True, False):
raise ConfigTypeError(desc='Invalid Enhanced Refresh capability '
'settings: %s boolean value expected' % cmv4)
return cmv4
@validate(name=CAP_MBGP_VPNV6)
def validate_cap_mbgp_vpnv6(cmv6):
if cmv6 not in (True, False):
raise ConfigTypeError(desc='Invalid Enhanced Refresh capability '
'settings: %s boolean value expected' % cmv6)
return cmv6
@validate(name=CAP_RTC)
def validate_cap_rtc(cap_rtc):
if cap_rtc not in (True, False):
raise ConfigTypeError(desc='Invalid type for specifying RTC '
'capability. Expected boolean got: %s' %
type(cap_rtc))
return cap_rtc
@validate(name=RTC_AS)
def validate_cap_rtc_as(rtc_as):
if not is_valid_old_asn(rtc_as):
raise ConfigValueError(desc='Invalid RTC AS configuration value: %s'
% rtc_as)
return rtc_as
@validate(name=HOLD_TIME)
def validate_hold_time(hold_time):
if ((hold_time is None) or (not isinstance(hold_time, int)) or
hold_time < 10):
raise ConfigValueError(desc='Invalid hold_time configuration value %s'
% hold_time)
return hold_time
@validate(name=MULTI_EXIT_DISC)
def validate_med(med):
if med is not None and not validation.is_valid_med(med):
raise ConfigValueError(desc='Invalid multi-exit-discriminatory (med)'
' value: %s.' % med)
return med
@validate(name=SITE_OF_ORIGINS)
def validate_soo_list(soo_list):
if not isinstance(soo_list, list):
raise ConfigTypeError(conf_name=SITE_OF_ORIGINS, conf_value=soo_list)
if not (len(soo_list) <= MAX_NUM_SOO):
raise ConfigValueError(desc='Max. SOO is limited to %s' %
MAX_NUM_SOO)
if not all(validation.is_valid_ext_comm_attr(attr) for attr in soo_list):
raise ConfigValueError(conf_name=SITE_OF_ORIGINS,
conf_value=soo_list)
# Check if we have duplicates
unique_rts = set(soo_list)
if len(unique_rts) != len(soo_list):
raise ConfigValueError(desc='Duplicate value provided in %s' %
(soo_list))
return soo_list
@validate(name=MAX_PREFIXES)
def validate_max_prefixes(max_prefixes):
if not isinstance(max_prefixes, six.integer_types):
raise ConfigTypeError(desc='Max. prefixes value should be of type '
'int or long but found %s' % type(max_prefixes))
if max_prefixes < 0:
raise ConfigValueError(desc='Invalid max. prefixes value: %s' %
max_prefixes)
return max_prefixes
@validate(name=ADVERTISE_PEER_AS)
def validate_advertise_peer_as(advertise_peer_as):
if not isinstance(advertise_peer_as, bool):
raise ConfigTypeError(desc='Invalid type for advertise-peer-as, '
'expected bool got %s' %
type(advertise_peer_as))
return advertise_peer_as
# =============================================================================
# Other utils.
# =============================================================================
def compute_optional_conf(conf_name, default_value, **all_config):
"""Returns *conf_name* settings if provided in *all_config*, else returns
*default_value*.
Validates *conf_name* value if provided.
"""
conf_value = all_config.get(conf_name)
if conf_value is not None:
# Validate configuration value.
conf_value = get_validator(conf_name)(conf_value)
else:
conf_value = default_value
return conf_value
| apache-2.0 | -8,062,770,780,220,454,000 | 34.120499 | 79 | 0.606736 | false |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/asyncio/base_events.py | 1 | 72001 | """Base implementation of event loop.
The event loop can be broken up into a multiplexer (the part
responsible for notifying us of I/O events) and the event loop proper,
which wraps a multiplexer with functionality for scheduling callbacks,
immediately or at a given time in the future.
Whenever a public API takes a callback, subsequent positional
arguments will be passed to the callback if/when it is called. This
avoids the proliferation of trivial lambdas implementing closures.
Keyword arguments for the callback are not supported; this is a
conscious design decision, leaving the door open for keyword arguments
to modify the meaning of the API call itself.
"""
import collections
import collections.abc
import concurrent.futures
import functools
import heapq
import itertools
import os
import socket
import stat
import subprocess
import threading
import time
import traceback
import sys
import warnings
import weakref
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import constants
from . import coroutines
from . import events
from . import exceptions
from . import futures
from . import protocols
from . import sslproto
from . import staggered
from . import tasks
from . import transports
from . import trsock
from .log import logger
__all__ = 'BaseEventLoop',
# Minimum number of _scheduled timer handles before cleanup of
# cancelled handles is performed.
_MIN_SCHEDULED_TIMER_HANDLES = 100
# Minimum fraction of _scheduled timer handles that are cancelled
# before cleanup of cancelled handles is performed.
_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
_HAS_IPv6 = hasattr(socket, 'AF_INET6')
# Maximum timeout passed to select to avoid OS limitations
MAXIMUM_SELECT_TIMEOUT = 24 * 3600
def _format_handle(handle):
cb = handle._callback
if isinstance(getattr(cb, '__self__', None), tasks.Task):
# format the task
return repr(cb.__self__)
else:
return str(handle)
def _format_pipe(fd):
if fd == subprocess.PIPE:
return '<pipe>'
elif fd == subprocess.STDOUT:
return '<stdout>'
else:
return repr(fd)
def _set_reuseport(sock):
if not hasattr(socket, 'SO_REUSEPORT'):
raise ValueError('reuse_port not supported by socket module')
else:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except OSError:
raise ValueError('reuse_port not supported by socket module, '
'SO_REUSEPORT defined but not implemented.')
def _ipaddr_info(host, port, family, type, proto, flowinfo=0, scopeid=0):
# Try to skip getaddrinfo if "host" is already an IP. Users might have
# handled name resolution in their own code and pass in resolved IPs.
if not hasattr(socket, 'inet_pton'):
return
if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \
host is None:
return None
if type == socket.SOCK_STREAM:
proto = socket.IPPROTO_TCP
elif type == socket.SOCK_DGRAM:
proto = socket.IPPROTO_UDP
else:
return None
if port is None:
port = 0
elif isinstance(port, bytes) and port == b'':
port = 0
elif isinstance(port, str) and port == '':
port = 0
else:
# If port's a service name like "http", don't skip getaddrinfo.
try:
port = int(port)
except (TypeError, ValueError):
return None
if family == socket.AF_UNSPEC:
afs = [socket.AF_INET]
if _HAS_IPv6:
afs.append(socket.AF_INET6)
else:
afs = [family]
if isinstance(host, bytes):
host = host.decode('idna')
if '%' in host:
# Linux's inet_pton doesn't accept an IPv6 zone index after host,
# like '::1%lo0'.
return None
for af in afs:
try:
socket.inet_pton(af, host)
# The host has already been resolved.
if _HAS_IPv6 and af == socket.AF_INET6:
return af, type, proto, '', (host, port, flowinfo, scopeid)
else:
return af, type, proto, '', (host, port)
except OSError:
pass
# "host" is not an IP address.
return None
def _interleave_addrinfos(addrinfos, first_address_family_count=1):
"""Interleave list of addrinfo tuples by family."""
# Group addresses by family
addrinfos_by_family = collections.OrderedDict()
for addr in addrinfos:
family = addr[0]
if family not in addrinfos_by_family:
addrinfos_by_family[family] = []
addrinfos_by_family[family].append(addr)
addrinfos_lists = list(addrinfos_by_family.values())
reordered = []
if first_address_family_count > 1:
reordered.extend(addrinfos_lists[0][:first_address_family_count - 1])
del addrinfos_lists[0][:first_address_family_count - 1]
reordered.extend(
a for a in itertools.chain.from_iterable(
itertools.zip_longest(*addrinfos_lists)
) if a is not None)
return reordered
def _run_until_complete_cb(fut):
if not fut.cancelled():
exc = fut.exception()
if isinstance(exc, (SystemExit, KeyboardInterrupt)):
# Issue #22429: run_forever() already finished, no need to
# stop it.
return
futures._get_loop(fut).stop()
if hasattr(socket, 'TCP_NODELAY'):
def _set_nodelay(sock):
if (sock.family in {socket.AF_INET, socket.AF_INET6} and
sock.type == socket.SOCK_STREAM and
sock.proto == socket.IPPROTO_TCP):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
else:
def _set_nodelay(sock):
pass
class _SendfileFallbackProtocol(protocols.Protocol):
def __init__(self, transp):
if not isinstance(transp, transports._FlowControlMixin):
raise TypeError("transport should be _FlowControlMixin instance")
self._transport = transp
self._proto = transp.get_protocol()
self._should_resume_reading = transp.is_reading()
self._should_resume_writing = transp._protocol_paused
transp.pause_reading()
transp.set_protocol(self)
if self._should_resume_writing:
self._write_ready_fut = self._transport._loop.create_future()
else:
self._write_ready_fut = None
async def drain(self):
if self._transport.is_closing():
raise ConnectionError("Connection closed by peer")
fut = self._write_ready_fut
if fut is None:
return
await fut
def connection_made(self, transport):
raise RuntimeError("Invalid state: "
"connection should have been established already.")
def connection_lost(self, exc):
if self._write_ready_fut is not None:
# Never happens if peer disconnects after sending the whole content
# Thus disconnection is always an exception from user perspective
if exc is None:
self._write_ready_fut.set_exception(
ConnectionError("Connection is closed by peer"))
else:
self._write_ready_fut.set_exception(exc)
self._proto.connection_lost(exc)
def pause_writing(self):
if self._write_ready_fut is not None:
return
self._write_ready_fut = self._transport._loop.create_future()
def resume_writing(self):
if self._write_ready_fut is None:
return
self._write_ready_fut.set_result(False)
self._write_ready_fut = None
def data_received(self, data):
raise RuntimeError("Invalid state: reading should be paused")
def eof_received(self):
raise RuntimeError("Invalid state: reading should be paused")
async def restore(self):
self._transport.set_protocol(self._proto)
if self._should_resume_reading:
self._transport.resume_reading()
if self._write_ready_fut is not None:
# Cancel the future.
# Basically it has no effect because protocol is switched back,
# no code should wait for it anymore.
self._write_ready_fut.cancel()
if self._should_resume_writing:
self._proto.resume_writing()
class Server(events.AbstractServer):
def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog,
ssl_handshake_timeout):
self._loop = loop
self._sockets = sockets
self._active_count = 0
self._waiters = []
self._protocol_factory = protocol_factory
self._backlog = backlog
self._ssl_context = ssl_context
self._ssl_handshake_timeout = ssl_handshake_timeout
self._serving = False
self._serving_forever_fut = None
def __repr__(self):
return f'<{self.__class__.__name__} sockets={self.sockets!r}>'
def _attach(self):
assert self._sockets is not None
self._active_count += 1
def _detach(self):
assert self._active_count > 0
self._active_count -= 1
if self._active_count == 0 and self._sockets is None:
self._wakeup()
def _wakeup(self):
waiters = self._waiters
self._waiters = None
for waiter in waiters:
if not waiter.done():
waiter.set_result(waiter)
def _start_serving(self):
if self._serving:
return
self._serving = True
for sock in self._sockets:
sock.listen(self._backlog)
self._loop._start_serving(
self._protocol_factory, sock, self._ssl_context,
self, self._backlog, self._ssl_handshake_timeout)
def get_loop(self):
return self._loop
def is_serving(self):
return self._serving
@property
def sockets(self):
if self._sockets is None:
return ()
return tuple(trsock.TransportSocket(s) for s in self._sockets)
def close(self):
sockets = self._sockets
if sockets is None:
return
self._sockets = None
for sock in sockets:
self._loop._stop_serving(sock)
self._serving = False
if (self._serving_forever_fut is not None and
not self._serving_forever_fut.done()):
self._serving_forever_fut.cancel()
self._serving_forever_fut = None
if self._active_count == 0:
self._wakeup()
async def start_serving(self):
self._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0, loop=self._loop)
async def serve_forever(self):
if self._serving_forever_fut is not None:
raise RuntimeError(
f'server {self!r} is already being awaited on serve_forever()')
if self._sockets is None:
raise RuntimeError(f'server {self!r} is closed')
self._start_serving()
self._serving_forever_fut = self._loop.create_future()
try:
await self._serving_forever_fut
except exceptions.CancelledError:
try:
self.close()
await self.wait_closed()
finally:
raise
finally:
self._serving_forever_fut = None
async def wait_closed(self):
if self._sockets is None or self._waiters is None:
return
waiter = self._loop.create_future()
self._waiters.append(waiter)
await waiter
class BaseEventLoop(events.AbstractEventLoop):
def __init__(self):
self._timer_cancelled_count = 0
self._closed = False
self._stopping = False
self._ready = collections.deque()
self._scheduled = []
self._default_executor = None
self._internal_fds = 0
# Identifier of the thread running the event loop, or None if the
# event loop is not running
self._thread_id = None
self._clock_resolution = time.get_clock_info('monotonic').resolution
self._exception_handler = None
self.set_debug(coroutines._is_debug_mode())
# In debug mode, if the execution of a callback or a step of a task
# exceed this duration in seconds, the slow callback/task is logged.
self.slow_callback_duration = 0.1
self._current_handle = None
self._task_factory = None
self._coroutine_origin_tracking_enabled = False
self._coroutine_origin_tracking_saved_depth = None
# A weak set of all asynchronous generators that are
# being iterated by the loop.
self._asyncgens = weakref.WeakSet()
# Set to True when `loop.shutdown_asyncgens` is called.
self._asyncgens_shutdown_called = False
def __repr__(self):
return (
f'<{self.__class__.__name__} running={self.is_running()} '
f'closed={self.is_closed()} debug={self.get_debug()}>'
)
def create_future(self):
"""Create a Future object attached to the loop."""
return futures.Future(loop=self)
def create_task(self, coro, *, name=None):
"""Schedule a coroutine object.
Return a task object.
"""
self._check_closed()
if self._task_factory is None:
task = tasks.Task(coro, loop=self, name=name)
if task._source_traceback:
del task._source_traceback[-1]
else:
task = self._task_factory(self, coro)
tasks._set_task_name(task, name)
return task
def set_task_factory(self, factory):
"""Set a task factory that will be used by loop.create_task().
If factory is None the default task factory will be set.
If factory is a callable, it should have a signature matching
'(loop, coro)', where 'loop' will be a reference to the active
event loop, 'coro' will be a coroutine object. The callable
must return a Future.
"""
if factory is not None and not callable(factory):
raise TypeError('task factory must be a callable or None')
self._task_factory = factory
def get_task_factory(self):
"""Return a task factory, or None if the default one is in use."""
return self._task_factory
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
"""Create socket transport."""
raise NotImplementedError
def _make_ssl_transport(
self, rawsock, protocol, sslcontext, waiter=None,
*, server_side=False, server_hostname=None,
extra=None, server=None,
ssl_handshake_timeout=None,
call_connection_made=True):
"""Create SSL transport."""
raise NotImplementedError
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
"""Create datagram transport."""
raise NotImplementedError
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create read pipe transport."""
raise NotImplementedError
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create write pipe transport."""
raise NotImplementedError
async def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
"""Create subprocess transport."""
raise NotImplementedError
def _write_to_self(self):
"""Write a byte to self-pipe, to wake up the event loop.
This may be called from a different thread.
The subclass is responsible for implementing the self-pipe.
"""
raise NotImplementedError
def _process_events(self, event_list):
"""Process selector events."""
raise NotImplementedError
def _check_closed(self):
if self._closed:
raise RuntimeError('Event loop is closed')
def _asyncgen_finalizer_hook(self, agen):
self._asyncgens.discard(agen)
if not self.is_closed():
self.call_soon_threadsafe(self.create_task, agen.aclose())
def _asyncgen_firstiter_hook(self, agen):
if self._asyncgens_shutdown_called:
warnings.warn(
f"asynchronous generator {agen!r} was scheduled after "
f"loop.shutdown_asyncgens() call",
ResourceWarning, source=self)
self._asyncgens.add(agen)
async def shutdown_asyncgens(self):
"""Shutdown all active asynchronous generators."""
self._asyncgens_shutdown_called = True
if not len(self._asyncgens):
# If Python version is <3.6 or we don't have any asynchronous
# generators alive.
return
closing_agens = list(self._asyncgens)
self._asyncgens.clear()
results = await tasks.gather(
*[ag.aclose() for ag in closing_agens],
return_exceptions=True,
loop=self)
for result, agen in zip(results, closing_agens):
if isinstance(result, Exception):
self.call_exception_handler({
'message': f'an error occurred during closing of '
f'asynchronous generator {agen!r}',
'exception': result,
'asyncgen': agen
})
def run_forever(self):
"""Run until stop() is called."""
self._check_closed()
if self.is_running():
raise RuntimeError('This event loop is already running')
if events._get_running_loop() is not None:
raise RuntimeError(
'Cannot run the event loop while another loop is running')
self._set_coroutine_origin_tracking(self._debug)
self._thread_id = threading.get_ident()
old_agen_hooks = sys.get_asyncgen_hooks()
sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook,
finalizer=self._asyncgen_finalizer_hook)
try:
events._set_running_loop(self)
while True:
self._run_once()
if self._stopping:
break
finally:
self._stopping = False
self._thread_id = None
events._set_running_loop(None)
self._set_coroutine_origin_tracking(False)
sys.set_asyncgen_hooks(*old_agen_hooks)
def run_until_complete(self, future):
"""Run until the Future is done.
If the argument is a coroutine, it is wrapped in a Task.
WARNING: It would be disastrous to call run_until_complete()
with the same coroutine twice -- it would wrap it in two
different Tasks and that can't be good.
Return the Future's result, or raise its exception.
"""
self._check_closed()
new_task = not futures.isfuture(future)
future = tasks.ensure_future(future, loop=self)
if new_task:
# An exception is raised if the future didn't complete, so there
# is no need to log the "destroy pending task" message
future._log_destroy_pending = False
future.add_done_callback(_run_until_complete_cb)
try:
self.run_forever()
except:
if new_task and future.done() and not future.cancelled():
# The coroutine raised a BaseException. Consume the exception
# to not log a warning, the caller doesn't have access to the
# local task.
future.exception()
raise
finally:
future.remove_done_callback(_run_until_complete_cb)
if not future.done():
raise RuntimeError('Event loop stopped before Future completed.')
return future.result()
def stop(self):
"""Stop running the event loop.
Every callback already scheduled will still run. This simply informs
run_forever to stop looping after a complete iteration.
"""
self._stopping = True
def close(self):
"""Close the event loop.
This clears the queues and shuts down the executor,
but does not wait for the executor to finish.
The event loop must not be running.
"""
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self._closed:
return
if self._debug:
logger.debug("Close %r", self)
self._closed = True
self._ready.clear()
self._scheduled.clear()
executor = self._default_executor
if executor is not None:
self._default_executor = None
executor.shutdown(wait=False)
def is_closed(self):
"""Returns True if the event loop was closed."""
return self._closed
def __del__(self, _warn=warnings.warn):
if not self.is_closed():
_warn(f"unclosed event loop {self!r}", ResourceWarning, source=self)
if not self.is_running():
self.close()
def is_running(self):
"""Returns True if the event loop is running."""
return (self._thread_id is not None)
def time(self):
"""Return the time according to the event loop's clock.
This is a float expressed in seconds since an epoch, but the
epoch, precision, accuracy and drift are unspecified and may
differ per event loop.
"""
return time.monotonic()
def call_later(self, delay, callback, *args, context=None):
"""Arrange for a callback to be called at a given time.
Return a Handle: an opaque object with a cancel() method that
can be used to cancel the call.
The delay can be an int or float, expressed in seconds. It is
always relative to the current time.
Each callback will be called exactly once. If two callbacks
are scheduled for exactly the same time, it undefined which
will be called first.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
timer = self.call_at(self.time() + delay, callback, *args,
context=context)
if timer._source_traceback:
del timer._source_traceback[-1]
return timer
def call_at(self, when, callback, *args, context=None):
"""Like call_later(), but uses an absolute time.
Absolute time corresponds to the event loop's time() method.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_at')
timer = events.TimerHandle(when, callback, args, self, context)
if timer._source_traceback:
del timer._source_traceback[-1]
heapq.heappush(self._scheduled, timer)
timer._scheduled = True
return timer
def call_soon(self, callback, *args, context=None):
"""Arrange for a callback to be called as soon as possible.
This operates as a FIFO queue: callbacks are called in the
order in which they are registered. Each callback will be
called exactly once.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_soon')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
return handle
def _check_callback(self, callback, method):
if (coroutines.iscoroutine(callback) or
coroutines.iscoroutinefunction(callback)):
raise TypeError(
f"coroutines cannot be used with {method}()")
if not callable(callback):
raise TypeError(
f'a callable object was expected by {method}(), '
f'got {callback!r}')
def _call_soon(self, callback, args, context):
handle = events.Handle(callback, args, self, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._ready.append(handle)
return handle
def _check_thread(self):
"""Check that the current thread is the thread running the event loop.
Non-thread-safe methods of this class make this assumption and will
likely behave incorrectly when the assumption is violated.
Should only be called when (self._debug == True). The caller is
responsible for checking this condition for performance reasons.
"""
if self._thread_id is None:
return
thread_id = threading.get_ident()
if thread_id != self._thread_id:
raise RuntimeError(
"Non-thread-safe operation invoked on an event loop other "
"than the current one")
def call_soon_threadsafe(self, callback, *args, context=None):
"""Like call_soon(), but thread-safe."""
self._check_closed()
if self._debug:
self._check_callback(callback, 'call_soon_threadsafe')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._write_to_self()
return handle
def run_in_executor(self, executor, func, *args):
self._check_closed()
if self._debug:
self._check_callback(func, 'run_in_executor')
if executor is None:
executor = self._default_executor
if executor is None:
executor = concurrent.futures.ThreadPoolExecutor()
self._default_executor = executor
return futures.wrap_future(
executor.submit(func, *args), loop=self)
def set_default_executor(self, executor):
if not isinstance(executor, concurrent.futures.ThreadPoolExecutor):
warnings.warn(
'Using the default executor that is not an instance of '
'ThreadPoolExecutor is deprecated and will be prohibited '
'in Python 3.9',
DeprecationWarning, 2)
self._default_executor = executor
def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
msg = [f"{host}:{port!r}"]
if family:
msg.append(f'family={family!r}')
if type:
msg.append(f'type={type!r}')
if proto:
msg.append(f'proto={proto!r}')
if flags:
msg.append(f'flags={flags!r}')
msg = ', '.join(msg)
logger.debug('Get address info %s', msg)
t0 = self.time()
addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
dt = self.time() - t0
msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}'
if dt >= self.slow_callback_duration:
logger.info(msg)
else:
logger.debug(msg)
return addrinfo
async def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
if self._debug:
getaddr_func = self._getaddrinfo_debug
else:
getaddr_func = socket.getaddrinfo
return await self.run_in_executor(
None, getaddr_func, host, port, family, type, proto, flags)
async def getnameinfo(self, sockaddr, flags=0):
return await self.run_in_executor(
None, socket.getnameinfo, sockaddr, flags)
async def sock_sendfile(self, sock, file, offset=0, count=None,
*, fallback=True):
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
self._check_sendfile_params(sock, file, offset, count)
try:
return await self._sock_sendfile_native(sock, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
return await self._sock_sendfile_fallback(sock, file,
offset, count)
async def _sock_sendfile_native(self, sock, file, offset, count):
# NB: sendfile syscall is not supported for SSL sockets and
# non-mmap files even if sendfile is supported by OS
raise exceptions.SendfileNotAvailableError(
f"syscall sendfile is not available for socket {sock!r} "
"and file {file!r} combination")
async def _sock_sendfile_fallback(self, sock, file, offset, count):
if offset:
file.seek(offset)
blocksize = (
min(count, constants.SENDFILE_FALLBACK_READBUFFER_SIZE)
if count else constants.SENDFILE_FALLBACK_READBUFFER_SIZE
)
buf = bytearray(blocksize)
total_sent = 0
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
break
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
break # EOF
await self.sock_sendall(sock, view[:read])
total_sent += read
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
def _check_sendfile_params(self, sock, file, offset, count):
if 'b' not in getattr(file, 'mode', 'b'):
raise ValueError("file should be opened in binary mode")
if not sock.type == socket.SOCK_STREAM:
raise ValueError("only SOCK_STREAM type sockets are supported")
if count is not None:
if not isinstance(count, int):
raise TypeError(
"count must be a positive integer (got {!r})".format(count))
if count <= 0:
raise ValueError(
"count must be a positive integer (got {!r})".format(count))
if not isinstance(offset, int):
raise TypeError(
"offset must be a non-negative integer (got {!r})".format(
offset))
if offset < 0:
raise ValueError(
"offset must be a non-negative integer (got {!r})".format(
offset))
async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None):
"""Create, bind and connect one socket."""
my_exceptions = []
exceptions.append(my_exceptions)
family, type_, proto, _, address = addr_info
sock = None
try:
sock = socket.socket(family=family, type=type_, proto=proto)
sock.setblocking(False)
if local_addr_infos is not None:
for _, _, _, _, laddr in local_addr_infos:
try:
sock.bind(laddr)
break
except OSError as exc:
msg = (
f'error while attempting to bind on '
f'address {laddr!r}: '
f'{exc.strerror.lower()}'
)
exc = OSError(exc.errno, msg)
my_exceptions.append(exc)
else: # all bind attempts failed
raise my_exceptions.pop()
await self.sock_connect(sock, address)
return sock
except OSError as exc:
my_exceptions.append(exc)
if sock is not None:
sock.close()
raise
except:
if sock is not None:
sock.close()
raise
async def create_connection(
self, protocol_factory, host=None, port=None,
*, ssl=None, family=0,
proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None,
ssl_handshake_timeout=None,
happy_eyeballs_delay=None, interleave=None):
"""Connect to a TCP server.
Create a streaming transport connection to a given Internet host and
port: socket family AF_INET or socket.AF_INET6 depending on host (or
family if specified), socket type SOCK_STREAM. protocol_factory must be
a callable returning a protocol instance.
This method is a coroutine which will try to establish the connection
in the background. When successful, the coroutine returns a
(transport, protocol) pair.
"""
if server_hostname is not None and not ssl:
raise ValueError('server_hostname is only meaningful with ssl')
if server_hostname is None and ssl:
# Use host as default for server_hostname. It is an error
# if host is empty or not set, e.g. when an
# already-connected socket was passed or when only a port
# is given. To avoid this error, you can pass
# server_hostname='' -- this will bypass the hostname
# check. (This also means that if host is a numeric
# IP/IPv6 address, we will attempt to verify that exact
# address; this will probably fail, but it is possible to
# create a certificate for a specific IP address, so we
# don't judge it here.)
if not host:
raise ValueError('You must set server_hostname '
'when using ssl without a host')
server_hostname = host
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if happy_eyeballs_delay is not None and interleave is None:
# If using happy eyeballs, default to interleave addresses by family
interleave = 1
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
infos = await self._ensure_resolved(
(host, port), family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
if local_addr is not None:
laddr_infos = await self._ensure_resolved(
local_addr, family=family,
type=socket.SOCK_STREAM, proto=proto,
flags=flags, loop=self)
if not laddr_infos:
raise OSError('getaddrinfo() returned empty list')
else:
laddr_infos = None
if interleave:
infos = _interleave_addrinfos(infos, interleave)
exceptions = []
if happy_eyeballs_delay is None:
# not using happy eyeballs
for addrinfo in infos:
try:
sock = await self._connect_sock(
exceptions, addrinfo, laddr_infos)
break
except OSError:
continue
else: # using happy eyeballs
sock, _, _ = await staggered.staggered_race(
(functools.partial(self._connect_sock,
exceptions, addrinfo, laddr_infos)
for addrinfo in infos),
happy_eyeballs_delay, loop=self)
if sock is None:
exceptions = [exc for sub in exceptions for exc in sub]
if len(exceptions) == 1:
raise exceptions[0]
else:
# If they all have the same str(), raise one.
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions):
raise exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
raise OSError('Multiple exceptions: {}'.format(
', '.join(str(exc) for exc in exceptions)))
else:
if sock is None:
raise ValueError(
'host and port was not specified and no sock specified')
if sock.type != socket.SOCK_STREAM:
# We allow AF_INET, AF_INET6, AF_UNIX as long as they
# are SOCK_STREAM.
# We support passing AF_UNIX sockets even though we have
# a dedicated API for that: create_unix_connection.
# Disallowing AF_UNIX in this method, breaks backwards
# compatibility.
raise ValueError(
f'A Stream Socket was expected, got {sock!r}')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r connected to %s:%r: (%r, %r)",
sock, host, port, transport, protocol)
return transport, protocol
async def _create_connection_transport(
self, sock, protocol_factory, ssl,
server_hostname, server_side=False,
ssl_handshake_timeout=None):
sock.setblocking(False)
protocol = protocol_factory()
waiter = self.create_future()
if ssl:
sslcontext = None if isinstance(ssl, bool) else ssl
transport = self._make_ssl_transport(
sock, protocol, sslcontext, waiter,
server_side=server_side, server_hostname=server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
else:
transport = self._make_socket_transport(sock, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def sendfile(self, transport, file, offset=0, count=None,
*, fallback=True):
"""Send a file to transport.
Return the total number of bytes which were sent.
The method uses high-performance os.sendfile if available.
file must be a regular file object opened in binary mode.
offset tells from where to start reading the file. If specified,
count is the total number of bytes to transmit as opposed to
sending the file until EOF is reached. File position is updated on
return or also in case of error in which case file.tell()
can be used to figure out the number of bytes
which were sent.
fallback set to True makes asyncio to manually read and send
the file when the platform does not support the sendfile syscall
(e.g. Windows or SSL socket on Unix).
Raise SendfileNotAvailableError if the system does not support
sendfile syscall and fallback is False.
"""
if transport.is_closing():
raise RuntimeError("Transport is closing")
mode = getattr(transport, '_sendfile_compatible',
constants._SendfileMode.UNSUPPORTED)
if mode is constants._SendfileMode.UNSUPPORTED:
raise RuntimeError(
f"sendfile is not supported for transport {transport!r}")
if mode is constants._SendfileMode.TRY_NATIVE:
try:
return await self._sendfile_native(transport, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
if not fallback:
raise RuntimeError(
f"fallback is disabled and native sendfile is not "
f"supported for transport {transport!r}")
return await self._sendfile_fallback(transport, file,
offset, count)
async def _sendfile_native(self, transp, file, offset, count):
raise exceptions.SendfileNotAvailableError(
"sendfile syscall is not supported")
async def _sendfile_fallback(self, transp, file, offset, count):
if offset:
file.seek(offset)
blocksize = min(count, 16384) if count else 16384
buf = bytearray(blocksize)
total_sent = 0
proto = _SendfileFallbackProtocol(transp)
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
return total_sent
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
return total_sent # EOF
await proto.drain()
transp.write(view[:read])
total_sent += read
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
await proto.restore()
async def start_tls(self, transport, protocol, sslcontext, *,
server_side=False,
server_hostname=None,
ssl_handshake_timeout=None):
"""Upgrade transport to TLS.
Return a new transport that *protocol* should start using
immediately.
"""
if ssl is None:
raise RuntimeError('Python ssl module is not available')
if not isinstance(sslcontext, ssl.SSLContext):
raise TypeError(
f'sslcontext is expected to be an instance of ssl.SSLContext, '
f'got {sslcontext!r}')
if not getattr(transport, '_start_tls_compatible', False):
raise TypeError(
f'transport {transport!r} is not supported by start_tls()')
waiter = self.create_future()
ssl_protocol = sslproto.SSLProtocol(
self, protocol, sslcontext, waiter,
server_side, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout,
call_connection_made=False)
# Pause early so that "ssl_protocol.data_received()" doesn't
# have a chance to get called before "ssl_protocol.connection_made()".
transport.pause_reading()
transport.set_protocol(ssl_protocol)
conmade_cb = self.call_soon(ssl_protocol.connection_made, transport)
resume_cb = self.call_soon(transport.resume_reading)
try:
await waiter
except BaseException:
transport.close()
conmade_cb.cancel()
resume_cb.cancel()
raise
return ssl_protocol._app_transport
async def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0,
reuse_address=None, reuse_port=None,
allow_broadcast=None, sock=None):
"""Create datagram connection."""
if sock is not None:
if sock.type != socket.SOCK_DGRAM:
raise ValueError(
f'A UDP Socket was expected, got {sock!r}')
if (local_addr or remote_addr or
family or proto or flags or
reuse_address or reuse_port or allow_broadcast):
# show the problematic kwargs in exception msg
opts = dict(local_addr=local_addr, remote_addr=remote_addr,
family=family, proto=proto, flags=flags,
reuse_address=reuse_address, reuse_port=reuse_port,
allow_broadcast=allow_broadcast)
problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v)
raise ValueError(
f'socket modifier keyword arguments can not be used '
f'when sock is specified. ({problems})')
sock.setblocking(False)
r_addr = None
else:
if not (local_addr or remote_addr):
if family == 0:
raise ValueError('unexpected address family')
addr_pairs_info = (((family, proto), (None, None)),)
elif hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX:
for addr in (local_addr, remote_addr):
if addr is not None and not isinstance(addr, str):
raise TypeError('string is expected')
if local_addr and local_addr[0] not in (0, '\x00'):
try:
if stat.S_ISSOCK(os.stat(local_addr).st_mode):
os.remove(local_addr)
except FileNotFoundError:
pass
except OSError as err:
# Directory may have permissions only to create socket.
logger.error('Unable to check or remove stale UNIX '
'socket %r: %r',
local_addr, err)
addr_pairs_info = (((family, proto),
(local_addr, remote_addr)), )
else:
# join address by (family, protocol)
addr_infos = {} # Using order preserving dict
for idx, addr in ((0, local_addr), (1, remote_addr)):
if addr is not None:
assert isinstance(addr, tuple) and len(addr) == 2, (
'2-tuple is expected')
infos = await self._ensure_resolved(
addr, family=family, type=socket.SOCK_DGRAM,
proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
for fam, _, pro, _, address in infos:
key = (fam, pro)
if key not in addr_infos:
addr_infos[key] = [None, None]
addr_infos[key][idx] = address
# each addr has to have info for each (family, proto) pair
addr_pairs_info = [
(key, addr_pair) for key, addr_pair in addr_infos.items()
if not ((local_addr and addr_pair[0] is None) or
(remote_addr and addr_pair[1] is None))]
if not addr_pairs_info:
raise ValueError('can not get address information')
exceptions = []
if reuse_address is None:
reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
for ((family, proto),
(local_address, remote_address)) in addr_pairs_info:
sock = None
r_addr = None
try:
sock = socket.socket(
family=family, type=socket.SOCK_DGRAM, proto=proto)
if reuse_address:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if reuse_port:
_set_reuseport(sock)
if allow_broadcast:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.setblocking(False)
if local_addr:
sock.bind(local_address)
if remote_addr:
if not allow_broadcast:
await self.sock_connect(sock, remote_address)
r_addr = remote_address
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except:
if sock is not None:
sock.close()
raise
else:
break
else:
raise exceptions[0]
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_datagram_transport(
sock, protocol, r_addr, waiter)
if self._debug:
if local_addr:
logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
"created: (%r, %r)",
local_addr, remote_addr, transport, protocol)
else:
logger.debug("Datagram endpoint remote_addr=%r created: "
"(%r, %r)",
remote_addr, transport, protocol)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def _ensure_resolved(self, address, *,
family=0, type=socket.SOCK_STREAM,
proto=0, flags=0, loop):
host, port = address[:2]
info = _ipaddr_info(host, port, family, type, proto, *address[2:])
if info is not None:
# "host" is already a resolved IP.
return [info]
else:
return await loop.getaddrinfo(host, port, family=family, type=type,
proto=proto, flags=flags)
async def _create_server_getaddrinfo(self, host, port, family, flags):
infos = await self._ensure_resolved((host, port), family=family,
type=socket.SOCK_STREAM,
flags=flags, loop=self)
if not infos:
raise OSError(f'getaddrinfo({host!r}) returned empty list')
return infos
async def create_server(
self, protocol_factory, host=None, port=None,
*,
family=socket.AF_UNSPEC,
flags=socket.AI_PASSIVE,
sock=None,
backlog=100,
ssl=None,
reuse_address=None,
reuse_port=None,
ssl_handshake_timeout=None,
start_serving=True):
"""Create a TCP server.
The host parameter can be a string, in that case the TCP server is
bound to host and port.
The host parameter can also be a sequence of strings and in that case
the TCP server is bound to all hosts of the sequence. If a host
appears multiple times (possibly indirectly e.g. when hostnames
resolve to the same IP address), the server is only bound once to that
host.
Return a Server object which can be used to stop the service.
This method is a coroutine.
"""
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
if ssl_handshake_timeout is not None and ssl is None:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
if reuse_address is None:
reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
sockets = []
if host == '':
hosts = [None]
elif (isinstance(host, str) or
not isinstance(host, collections.abc.Iterable)):
hosts = [host]
else:
hosts = host
fs = [self._create_server_getaddrinfo(host, port, family=family,
flags=flags)
for host in hosts]
infos = await tasks.gather(*fs, loop=self)
infos = set(itertools.chain.from_iterable(infos))
completed = False
try:
for res in infos:
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error:
# Assume it's a bad family/type/protocol combination.
if self._debug:
logger.warning('create_server() failed to create '
'socket.socket(%r, %r, %r)',
af, socktype, proto, exc_info=True)
continue
sockets.append(sock)
if reuse_address:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
if reuse_port:
_set_reuseport(sock)
# Disable IPv4/IPv6 dual stack support (enabled by
# default on Linux) which makes a single socket
# listen on both address families.
if (_HAS_IPv6 and
af == socket.AF_INET6 and
hasattr(socket, 'IPPROTO_IPV6')):
sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_V6ONLY,
True)
try:
sock.bind(sa)
except OSError as err:
raise OSError(err.errno, 'error while attempting '
'to bind on address %r: %s'
% (sa, err.strerror.lower())) from None
completed = True
finally:
if not completed:
for sock in sockets:
sock.close()
else:
if sock is None:
raise ValueError('Neither host/port nor sock were specified')
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
sockets = [sock]
for sock in sockets:
sock.setblocking(False)
server = Server(self, sockets, protocol_factory,
ssl, backlog, ssl_handshake_timeout)
if start_serving:
server._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0, loop=self)
if self._debug:
logger.info("%r is serving", server)
return server
async def connect_accepted_socket(
self, protocol_factory, sock,
*, ssl=None,
ssl_handshake_timeout=None):
"""Handle an accepted connection.
This is used by servers that accept connections outside of
asyncio but that use asyncio to handle connections.
This method is a coroutine. When completed, the coroutine
returns a (transport, protocol) pair.
"""
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, '', server_side=True,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
return transport, protocol
async def connect_read_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Read pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
async def connect_write_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Write pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
def _log_subprocess(self, msg, stdin, stdout, stderr):
info = [msg]
if stdin is not None:
info.append(f'stdin={_format_pipe(stdin)}')
if stdout is not None and stderr == subprocess.STDOUT:
info.append(f'stdout=stderr={_format_pipe(stdout)}')
else:
if stdout is not None:
info.append(f'stdout={_format_pipe(stdout)}')
if stderr is not None:
info.append(f'stderr={_format_pipe(stderr)}')
logger.debug(' '.join(info))
async def subprocess_shell(self, protocol_factory, cmd, *,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=False,
shell=True, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if not isinstance(cmd, (bytes, str)):
raise ValueError("cmd must be a string")
if universal_newlines:
raise ValueError("universal_newlines must be False")
if not shell:
raise ValueError("shell must be True")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = 'run shell command %r' % cmd
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
async def subprocess_exec(self, protocol_factory, program, *args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=False,
shell=False, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if universal_newlines:
raise ValueError("universal_newlines must be False")
if shell:
raise ValueError("shell must be False")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
popen_args = (program,) + args
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = f'execute program {program!r}'
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr,
bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
def get_exception_handler(self):
"""Return an exception handler, or None if the default one is in use.
"""
return self._exception_handler
def set_exception_handler(self, handler):
"""Set handler as the new event loop exception handler.
If handler is None, the default exception handler will
be set.
If handler is a callable object, it should have a
signature matching '(loop, context)', where 'loop'
will be a reference to the active event loop, 'context'
will be a dict object (see `call_exception_handler()`
documentation for details about context).
"""
if handler is not None and not callable(handler):
raise TypeError(f'A callable object or None is expected, '
f'got {handler!r}')
self._exception_handler = handler
def default_exception_handler(self, context):
"""Default exception handler.
This is called when an exception occurs and no exception
handler is set, and can be called by a custom exception
handler that wants to defer to the default behavior.
This default handler logs the error message and other
context-dependent information. In debug mode, a truncated
stack trace is also appended showing where the given object
(e.g. a handle or future or task) was created, if any.
The context parameter has the same meaning as in
`call_exception_handler()`.
"""
message = context.get('message')
if not message:
message = 'Unhandled exception in event loop'
exception = context.get('exception')
if exception is not None:
exc_info = (type(exception), exception, exception.__traceback__)
else:
exc_info = False
if ('source_traceback' not in context and
self._current_handle is not None and
self._current_handle._source_traceback):
context['handle_traceback'] = \
self._current_handle._source_traceback
log_lines = [message]
for key in sorted(context):
if key in {'message', 'exception'}:
continue
value = context[key]
if key == 'source_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Object created at (most recent call last):\n'
value += tb.rstrip()
elif key == 'handle_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Handle created at (most recent call last):\n'
value += tb.rstrip()
else:
value = repr(value)
log_lines.append(f'{key}: {value}')
logger.error('\n'.join(log_lines), exc_info=exc_info)
def call_exception_handler(self, context):
"""Call the current event loop's exception handler.
The context argument is a dict containing the following keys:
- 'message': Error message;
- 'exception' (optional): Exception object;
- 'future' (optional): Future instance;
- 'task' (optional): Task instance;
- 'handle' (optional): Handle instance;
- 'protocol' (optional): Protocol instance;
- 'transport' (optional): Transport instance;
- 'socket' (optional): Socket instance;
- 'asyncgen' (optional): Asynchronous generator that caused
the exception.
New keys maybe introduced in the future.
Note: do not overload this method in an event loop subclass.
For custom exception handling, use the
`set_exception_handler()` method.
"""
if self._exception_handler is None:
try:
self.default_exception_handler(context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
# Second protection layer for unexpected errors
# in the default implementation, as well as for subclassed
# event loops with overloaded "default_exception_handler".
logger.error('Exception in default exception handler',
exc_info=True)
else:
try:
self._exception_handler(self, context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
# Exception in the user set custom exception handler.
try:
# Let's try default handler.
self.default_exception_handler({
'message': 'Unhandled error in exception handler',
'exception': exc,
'context': context,
})
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
# Guard 'default_exception_handler' in case it is
# overloaded.
logger.error('Exception in default exception handler '
'while handling an unexpected error '
'in custom exception handler',
exc_info=True)
def _add_callback(self, handle):
"""Add a Handle to _scheduled (TimerHandle) or _ready."""
assert isinstance(handle, events.Handle), 'A Handle is required here'
if handle._cancelled:
return
assert not isinstance(handle, events.TimerHandle)
self._ready.append(handle)
def _add_callback_signalsafe(self, handle):
"""Like _add_callback() but called from a signal handler."""
self._add_callback(handle)
self._write_to_self()
def _timer_handle_cancelled(self, handle):
"""Notification that a TimerHandle has been cancelled."""
if handle._scheduled:
self._timer_cancelled_count += 1
def _run_once(self):
"""Run one full iteration of the event loop.
This calls all currently ready callbacks, polls for I/O,
schedules the resulting callbacks, and finally schedules
'call_later' callbacks.
"""
sched_count = len(self._scheduled)
if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
self._timer_cancelled_count / sched_count >
_MIN_CANCELLED_TIMER_HANDLES_FRACTION):
# Remove delayed calls that were cancelled if their number
# is too high
new_scheduled = []
for handle in self._scheduled:
if handle._cancelled:
handle._scheduled = False
else:
new_scheduled.append(handle)
heapq.heapify(new_scheduled)
self._scheduled = new_scheduled
self._timer_cancelled_count = 0
else:
# Remove delayed calls that were cancelled from head of queue.
while self._scheduled and self._scheduled[0]._cancelled:
self._timer_cancelled_count -= 1
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
timeout = None
if self._ready or self._stopping:
timeout = 0
elif self._scheduled:
# Compute the desired timeout.
when = self._scheduled[0]._when
timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT)
event_list = self._selector.select(timeout)
self._process_events(event_list)
# Handle 'later' callbacks that are ready.
end_time = self.time() + self._clock_resolution
while self._scheduled:
handle = self._scheduled[0]
if handle._when >= end_time:
break
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
self._ready.append(handle)
# This is the only place where callbacks are actually *called*.
# All other places just add them to ready.
# Note: We run all currently scheduled callbacks, but not any
# callbacks scheduled by callbacks run this time around --
# they will be run the next time (after another I/O poll).
# Use an idiom that is thread-safe without using locks.
ntodo = len(self._ready)
for i in range(ntodo):
handle = self._ready.popleft()
if handle._cancelled:
continue
if self._debug:
try:
self._current_handle = handle
t0 = self.time()
handle._run()
dt = self.time() - t0
if dt >= self.slow_callback_duration:
logger.warning('Executing %s took %.3f seconds',
_format_handle(handle), dt)
finally:
self._current_handle = None
else:
handle._run()
handle = None # Needed to break cycles when an exception occurs.
def _set_coroutine_origin_tracking(self, enabled):
if bool(enabled) == bool(self._coroutine_origin_tracking_enabled):
return
if enabled:
self._coroutine_origin_tracking_saved_depth = (
sys.get_coroutine_origin_tracking_depth())
sys.set_coroutine_origin_tracking_depth(
constants.DEBUG_STACK_DEPTH)
else:
sys.set_coroutine_origin_tracking_depth(
self._coroutine_origin_tracking_saved_depth)
self._coroutine_origin_tracking_enabled = enabled
def get_debug(self):
return self._debug
def set_debug(self, enabled):
self._debug = enabled
if self.is_running():
self.call_soon_threadsafe(self._set_coroutine_origin_tracking, enabled)
| apache-2.0 | -3,916,017,537,165,643,000 | 37.52381 | 83 | 0.554909 | false |
kave/collab | core/search_indexes.py | 4 | 1208 | import datetime
from haystack import indexes
from core.models import Person
class PersonIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.EdgeNgramField(document=True, use_template=True)
user = indexes.CharField(model_attr='user')
org_group = indexes.CharField(model_attr='org_group', null=True)
display = indexes.CharField(model_attr='full_name', null=True)
description = indexes.CharField(model_attr='title', null=True)
index_name = indexes.CharField(indexed=False)
index_priority = indexes.IntegerField(indexed=False)
url = indexes.CharField(indexed=False, null=True)
image = indexes.CharField(indexed=False, null=True)
PRIORITY = 1
def prepare_index_name(self, obj):
return "Staff Directory"
def prepare_index_priority(self, obj):
return self.PRIORITY
def prepare_url(self, obj):
return obj.get_absolute_url()
def prepare_image(self, obj):
return obj.photo_file.url_125x125
def get_model(self):
return Person
def index_queryset(self, using=None):
"""Used when the entire index for model is updated."""
return self.get_model().objects.filter(user__is_active=True)
| cc0-1.0 | 5,563,580,700,484,831,000 | 32.555556 | 68 | 0.699503 | false |
special/telepathy-qt-upstream | tools/glib-ginterface-gen.py | 12 | 30017 | #!/usr/bin/python
# glib-ginterface-gen.py: service-side interface generator
#
# Generate dbus-glib 0.x service GInterfaces from the Telepathy specification.
# The master copy of this program is in the telepathy-glib repository -
# please make any changes there.
#
# Copyright (C) 2006, 2007 Collabora Limited
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import sys
import os.path
import xml.dom.minidom
from libglibcodegen import Signature, type_to_gtype, cmp_by_name, \
NS_TP, dbus_gutils_wincaps_to_uscore, \
signal_to_marshal_name, method_to_glue_marshal_name
NS_TP = "http://telepathy.freedesktop.org/wiki/DbusSpec#extensions-v0"
class Generator(object):
def __init__(self, dom, prefix, basename, signal_marshal_prefix,
headers, end_headers, not_implemented_func,
allow_havoc):
self.dom = dom
self.__header = []
self.__body = []
assert prefix.endswith('_')
assert not signal_marshal_prefix.endswith('_')
# The main_prefix, sub_prefix thing is to get:
# FOO_ -> (FOO_, _)
# FOO_SVC_ -> (FOO_, _SVC_)
# but
# FOO_BAR/ -> (FOO_BAR_, _)
# FOO_BAR/SVC_ -> (FOO_BAR_, _SVC_)
if '/' in prefix:
main_prefix, sub_prefix = prefix.upper().split('/', 1)
prefix = prefix.replace('/', '_')
else:
main_prefix, sub_prefix = prefix.upper().split('_', 1)
self.MAIN_PREFIX_ = main_prefix + '_'
self._SUB_PREFIX_ = '_' + sub_prefix
self.Prefix_ = prefix
self.Prefix = prefix.replace('_', '')
self.prefix_ = prefix.lower()
self.PREFIX_ = prefix.upper()
self.basename = basename
self.signal_marshal_prefix = signal_marshal_prefix
self.headers = headers
self.end_headers = end_headers
self.not_implemented_func = not_implemented_func
self.allow_havoc = allow_havoc
def h(self, s):
self.__header.append(s)
def b(self, s):
self.__body.append(s)
def do_node(self, node):
node_name = node.getAttribute('name').replace('/', '')
node_name_mixed = self.node_name_mixed = node_name.replace('_', '')
node_name_lc = self.node_name_lc = node_name.lower()
node_name_uc = self.node_name_uc = node_name.upper()
interfaces = node.getElementsByTagName('interface')
assert len(interfaces) == 1, interfaces
interface = interfaces[0]
self.iface_name = interface.getAttribute('name')
tmp = interface.getAttribute('tp:implement-service')
if tmp == "no":
return
tmp = interface.getAttribute('tp:causes-havoc')
if tmp and not self.allow_havoc:
raise AssertionError('%s is %s' % (self.iface_name, tmp))
self.b('static const DBusGObjectInfo _%s%s_object_info;'
% (self.prefix_, node_name_lc))
self.b('')
methods = interface.getElementsByTagName('method')
signals = interface.getElementsByTagName('signal')
properties = interface.getElementsByTagName('property')
# Don't put properties in dbus-glib glue
glue_properties = []
self.b('struct _%s%sClass {' % (self.Prefix, node_name_mixed))
self.b(' GTypeInterface parent_class;')
for method in methods:
self.b(' %s %s;' % self.get_method_impl_names(method))
self.b('};')
self.b('')
if signals:
self.b('enum {')
for signal in signals:
self.b(' %s,' % self.get_signal_const_entry(signal))
self.b(' N_%s_SIGNALS' % node_name_uc)
self.b('};')
self.b('static guint %s_signals[N_%s_SIGNALS] = {0};'
% (node_name_lc, node_name_uc))
self.b('')
self.b('static void %s%s_base_init (gpointer klass);'
% (self.prefix_, node_name_lc))
self.b('')
self.b('GType')
self.b('%s%s_get_type (void)'
% (self.prefix_, node_name_lc))
self.b('{')
self.b(' static GType type = 0;')
self.b('')
self.b(' if (G_UNLIKELY (type == 0))')
self.b(' {')
self.b(' static const GTypeInfo info = {')
self.b(' sizeof (%s%sClass),' % (self.Prefix, node_name_mixed))
self.b(' %s%s_base_init, /* base_init */'
% (self.prefix_, node_name_lc))
self.b(' NULL, /* base_finalize */')
self.b(' NULL, /* class_init */')
self.b(' NULL, /* class_finalize */')
self.b(' NULL, /* class_data */')
self.b(' 0,')
self.b(' 0, /* n_preallocs */')
self.b(' NULL /* instance_init */')
self.b(' };')
self.b('')
self.b(' type = g_type_register_static (G_TYPE_INTERFACE,')
self.b(' "%s%s", &info, 0);' % (self.Prefix, node_name_mixed))
self.b(' }')
self.b('')
self.b(' return type;')
self.b('}')
self.b('')
self.h('/**')
self.h(' * %s%s:' % (self.Prefix, node_name_mixed))
self.h(' *')
self.h(' * Dummy typedef representing any implementation of this '
'interface.')
self.h(' */')
self.h('typedef struct _%s%s %s%s;'
% (self.Prefix, node_name_mixed, self.Prefix, node_name_mixed))
self.h('')
self.h('/**')
self.h(' * %s%sClass:' % (self.Prefix, node_name_mixed))
self.h(' *')
self.h(' * The class of %s%s.' % (self.Prefix, node_name_mixed))
if methods:
self.h(' *')
self.h(' * In a full implementation of this interface (i.e. all')
self.h(' * methods implemented), the interface initialization')
self.h(' * function used in G_IMPLEMENT_INTERFACE() would')
self.h(' * typically look like this:')
self.h(' *')
self.h(' * <programlisting>')
self.h(' * static void')
self.h(' * implement_%s (gpointer klass,' % self.node_name_lc)
self.h(' * gpointer unused G_GNUC_UNUSED)')
self.h(' * {')
# "#" is special to gtkdoc under some circumstances; it appears
# that escaping "##" as "#<!---->#" or "##" doesn't work,
# but adding an extra hash symbol does. Thanks, gtkdoc :-(
self.h(' * #define IMPLEMENT(x) %s%s_implement_###x (\\'
% (self.prefix_, self.node_name_lc))
self.h(' * klass, my_object_###x)')
for method in methods:
class_member_name = method.getAttribute('tp:name-for-bindings')
class_member_name = class_member_name.lower()
self.h(' * IMPLEMENT (%s);' % class_member_name)
self.h(' * #undef IMPLEMENT')
self.h(' * }')
self.h(' * </programlisting>')
else:
self.h(' * This interface has no D-Bus methods, so an')
self.h(' * implementation can typically pass %NULL to')
self.h(' * G_IMPLEMENT_INTERFACE() as the interface')
self.h(' * initialization function.')
self.h(' */')
self.h('typedef struct _%s%sClass %s%sClass;'
% (self.Prefix, node_name_mixed, self.Prefix, node_name_mixed))
self.h('')
self.h('GType %s%s_get_type (void);'
% (self.prefix_, node_name_lc))
gtype = self.current_gtype = \
self.MAIN_PREFIX_ + 'TYPE' + self._SUB_PREFIX_ + node_name_uc
classname = self.Prefix + node_name_mixed
self.h('#define %s \\\n (%s%s_get_type ())'
% (gtype, self.prefix_, node_name_lc))
self.h('#define %s%s(obj) \\\n'
' (G_TYPE_CHECK_INSTANCE_CAST((obj), %s, %s))'
% (self.PREFIX_, node_name_uc, gtype, classname))
self.h('#define %sIS%s%s(obj) \\\n'
' (G_TYPE_CHECK_INSTANCE_TYPE((obj), %s))'
% (self.MAIN_PREFIX_, self._SUB_PREFIX_, node_name_uc, gtype))
self.h('#define %s%s_GET_CLASS(obj) \\\n'
' (G_TYPE_INSTANCE_GET_INTERFACE((obj), %s, %sClass))'
% (self.PREFIX_, node_name_uc, gtype, classname))
self.h('')
self.h('')
base_init_code = []
for method in methods:
self.do_method(method)
for signal in signals:
base_init_code.extend(self.do_signal(signal))
self.b('static inline void')
self.b('%s%s_base_init_once (gpointer klass G_GNUC_UNUSED)'
% (self.prefix_, node_name_lc))
self.b('{')
if properties:
self.b(' static TpDBusPropertiesMixinPropInfo properties[%d] = {'
% (len(properties) + 1))
for m in properties:
access = m.getAttribute('access')
assert access in ('read', 'write', 'readwrite')
if access == 'read':
flags = 'TP_DBUS_PROPERTIES_MIXIN_FLAG_READ'
elif access == 'write':
flags = 'TP_DBUS_PROPERTIES_MIXIN_FLAG_WRITE'
else:
flags = ('TP_DBUS_PROPERTIES_MIXIN_FLAG_READ | '
'TP_DBUS_PROPERTIES_MIXIN_FLAG_WRITE')
self.b(' { 0, %s, "%s", 0, NULL, NULL }, /* %s */'
% (flags, m.getAttribute('type'), m.getAttribute('name')))
self.b(' { 0, 0, NULL, 0, NULL, NULL }')
self.b(' };')
self.b(' static TpDBusPropertiesMixinIfaceInfo interface =')
self.b(' { 0, properties, NULL, NULL };')
self.b('')
self.b(' dbus_g_object_type_install_info (%s%s_get_type (),'
% (self.prefix_, node_name_lc))
self.b(' &_%s%s_object_info);'
% (self.prefix_, node_name_lc))
self.b('')
if properties:
self.b(' interface.dbus_interface = g_quark_from_static_string '
'("%s");' % self.iface_name)
for i, m in enumerate(properties):
self.b(' properties[%d].name = g_quark_from_static_string ("%s");'
% (i, m.getAttribute('name')))
self.b(' properties[%d].type = %s;'
% (i, type_to_gtype(m.getAttribute('type'))[1]))
self.b(' tp_svc_interface_set_dbus_properties_info (%s, &interface);'
% self.current_gtype)
self.b('')
for s in base_init_code:
self.b(s)
self.b('}')
self.b('static void')
self.b('%s%s_base_init (gpointer klass)'
% (self.prefix_, node_name_lc))
self.b('{')
self.b(' static gboolean initialized = FALSE;')
self.b('')
self.b(' if (!initialized)')
self.b(' {')
self.b(' initialized = TRUE;')
self.b(' %s%s_base_init_once (klass);'
% (self.prefix_, node_name_lc))
self.b(' }')
# insert anything we need to do per implementation here
self.b('}')
self.h('')
self.b('static const DBusGMethodInfo _%s%s_methods[] = {'
% (self.prefix_, node_name_lc))
method_blob, offsets = self.get_method_glue(methods)
for method, offset in zip(methods, offsets):
self.do_method_glue(method, offset)
if len(methods) == 0:
# empty arrays are a gcc extension, so put in a dummy member
self.b(" { NULL, NULL, 0 }")
self.b('};')
self.b('')
self.b('static const DBusGObjectInfo _%s%s_object_info = {'
% (self.prefix_, node_name_lc))
self.b(' 0,') # version
self.b(' _%s%s_methods,' % (self.prefix_, node_name_lc))
self.b(' %d,' % len(methods))
self.b('"' + method_blob.replace('\0', '\\0') + '",')
self.b('"' + self.get_signal_glue(signals).replace('\0', '\\0') + '",')
self.b('"' +
self.get_property_glue(glue_properties).replace('\0', '\\0') +
'",')
self.b('};')
self.b('')
self.node_name_mixed = None
self.node_name_lc = None
self.node_name_uc = None
def get_method_glue(self, methods):
info = []
offsets = []
for method in methods:
offsets.append(len(''.join(info)))
info.append(self.iface_name + '\0')
info.append(method.getAttribute('name') + '\0')
info.append('A\0') # async
counter = 0
for arg in method.getElementsByTagName('arg'):
out = arg.getAttribute('direction') == 'out'
name = arg.getAttribute('name')
if not name:
assert out
name = 'arg%u' % counter
counter += 1
info.append(name + '\0')
if out:
info.append('O\0')
else:
info.append('I\0')
if out:
info.append('F\0') # not const
info.append('N\0') # not error or return
info.append(arg.getAttribute('type') + '\0')
info.append('\0')
return ''.join(info) + '\0', offsets
def do_method_glue(self, method, offset):
lc_name = method.getAttribute('tp:name-for-bindings')
if method.getAttribute('name') != lc_name.replace('_', ''):
raise AssertionError('Method %s tp:name-for-bindings (%s) does '
'not match' % (method.getAttribute('name'), lc_name))
lc_name = lc_name.lower()
marshaller = method_to_glue_marshal_name(method,
self.signal_marshal_prefix)
wrapper = self.prefix_ + self.node_name_lc + '_' + lc_name
self.b(" { (GCallback) %s, %s, %d }," % (wrapper, marshaller, offset))
def get_signal_glue(self, signals):
info = []
for signal in signals:
info.append(self.iface_name)
info.append(signal.getAttribute('name'))
return '\0'.join(info) + '\0\0'
# the implementation can be the same
get_property_glue = get_signal_glue
def get_method_impl_names(self, method):
dbus_method_name = method.getAttribute('name')
class_member_name = method.getAttribute('tp:name-for-bindings')
if dbus_method_name != class_member_name.replace('_', ''):
raise AssertionError('Method %s tp:name-for-bindings (%s) does '
'not match' % (dbus_method_name, class_member_name))
class_member_name = class_member_name.lower()
stub_name = (self.prefix_ + self.node_name_lc + '_' +
class_member_name)
return (stub_name + '_impl', class_member_name)
def do_method(self, method):
assert self.node_name_mixed is not None
in_class = []
# Examples refer to Thing.DoStuff (su) -> ii
# DoStuff
dbus_method_name = method.getAttribute('name')
# do_stuff
class_member_name = method.getAttribute('tp:name-for-bindings')
if dbus_method_name != class_member_name.replace('_', ''):
raise AssertionError('Method %s tp:name-for-bindings (%s) does '
'not match' % (dbus_method_name, class_member_name))
class_member_name = class_member_name.lower()
# void tp_svc_thing_do_stuff (TpSvcThing *, const char *, guint,
# DBusGMethodInvocation *);
stub_name = (self.prefix_ + self.node_name_lc + '_' +
class_member_name)
# typedef void (*tp_svc_thing_do_stuff_impl) (TpSvcThing *,
# const char *, guint, DBusGMethodInvocation);
impl_name = stub_name + '_impl'
# void tp_svc_thing_return_from_do_stuff (DBusGMethodInvocation *,
# gint, gint);
ret_name = (self.prefix_ + self.node_name_lc + '_return_from_' +
class_member_name)
# Gather arguments
in_args = []
out_args = []
for i in method.getElementsByTagName('arg'):
name = i.getAttribute('name')
direction = i.getAttribute('direction') or 'in'
dtype = i.getAttribute('type')
assert direction in ('in', 'out')
if name:
name = direction + '_' + name
elif direction == 'in':
name = direction + str(len(in_args))
else:
name = direction + str(len(out_args))
ctype, gtype, marshaller, pointer = type_to_gtype(dtype)
if pointer:
ctype = 'const ' + ctype
struct = (ctype, name)
if direction == 'in':
in_args.append(struct)
else:
out_args.append(struct)
# Implementation type declaration (in header, docs in body)
self.b('/**')
self.b(' * %s:' % impl_name)
self.b(' * @self: The object implementing this interface')
for (ctype, name) in in_args:
self.b(' * @%s: %s (FIXME, generate documentation)'
% (name, ctype))
self.b(' * @context: Used to return values or throw an error')
self.b(' *')
self.b(' * The signature of an implementation of the D-Bus method')
self.b(' * %s on interface %s.' % (dbus_method_name, self.iface_name))
self.b(' */')
self.h('typedef void (*%s) (%s%s *self,'
% (impl_name, self.Prefix, self.node_name_mixed))
for (ctype, name) in in_args:
self.h(' %s%s,' % (ctype, name))
self.h(' DBusGMethodInvocation *context);')
# Class member (in class definition)
in_class.append(' %s %s;' % (impl_name, class_member_name))
# Stub definition (in body only - it's static)
self.b('static void')
self.b('%s (%s%s *self,'
% (stub_name, self.Prefix, self.node_name_mixed))
for (ctype, name) in in_args:
self.b(' %s%s,' % (ctype, name))
self.b(' DBusGMethodInvocation *context)')
self.b('{')
self.b(' %s impl = (%s%s_GET_CLASS (self)->%s);'
% (impl_name, self.PREFIX_, self.node_name_uc, class_member_name))
self.b('')
self.b(' if (impl != NULL)')
tmp = ['self'] + [name for (ctype, name) in in_args] + ['context']
self.b(' {')
self.b(' (impl) (%s);' % ',\n '.join(tmp))
self.b(' }')
self.b(' else')
self.b(' {')
if self.not_implemented_func:
self.b(' %s (context);' % self.not_implemented_func)
else:
self.b(' GError e = { DBUS_GERROR, ')
self.b(' DBUS_GERROR_UNKNOWN_METHOD,')
self.b(' "Method not implemented" };')
self.b('')
self.b(' dbus_g_method_return_error (context, &e);')
self.b(' }')
self.b('}')
self.b('')
# Implementation registration (in both header and body)
self.h('void %s%s_implement_%s (%s%sClass *klass, %s impl);'
% (self.prefix_, self.node_name_lc, class_member_name,
self.Prefix, self.node_name_mixed, impl_name))
self.b('/**')
self.b(' * %s%s_implement_%s:'
% (self.prefix_, self.node_name_lc, class_member_name))
self.b(' * @klass: A class whose instances implement this interface')
self.b(' * @impl: A callback used to implement the %s D-Bus method'
% dbus_method_name)
self.b(' *')
self.b(' * Register an implementation for the %s method in the vtable'
% dbus_method_name)
self.b(' * of an implementation of this interface. To be called from')
self.b(' * the interface init function.')
self.b(' */')
self.b('void')
self.b('%s%s_implement_%s (%s%sClass *klass, %s impl)'
% (self.prefix_, self.node_name_lc, class_member_name,
self.Prefix, self.node_name_mixed, impl_name))
self.b('{')
self.b(' klass->%s = impl;' % class_member_name)
self.b('}')
self.b('')
# Return convenience function (static inline, in header)
self.h('/**')
self.h(' * %s:' % ret_name)
self.h(' * @context: The D-Bus method invocation context')
for (ctype, name) in out_args:
self.h(' * @%s: %s (FIXME, generate documentation)'
% (name, ctype))
self.h(' *')
self.h(' * Return successfully by calling dbus_g_method_return().')
self.h(' * This inline function exists only to provide type-safety.')
self.h(' */')
tmp = (['DBusGMethodInvocation *context'] +
[ctype + name for (ctype, name) in out_args])
self.h('static inline')
self.h('/* this comment is to stop gtkdoc realising this is static */')
self.h(('void %s (' % ret_name) + (',\n '.join(tmp)) + ');')
self.h('static inline void')
self.h(('%s (' % ret_name) + (',\n '.join(tmp)) + ')')
self.h('{')
tmp = ['context'] + [name for (ctype, name) in out_args]
self.h(' dbus_g_method_return (' + ',\n '.join(tmp) + ');')
self.h('}')
self.h('')
return in_class
def get_signal_const_entry(self, signal):
assert self.node_name_uc is not None
return ('SIGNAL_%s_%s'
% (self.node_name_uc, signal.getAttribute('name')))
def do_signal(self, signal):
assert self.node_name_mixed is not None
in_base_init = []
# for signal: Thing::StuffHappened (s, u)
# we want to emit:
# void tp_svc_thing_emit_stuff_happened (gpointer instance,
# const char *arg0, guint arg1);
dbus_name = signal.getAttribute('name')
ugly_name = signal.getAttribute('tp:name-for-bindings')
if dbus_name != ugly_name.replace('_', ''):
raise AssertionError('Signal %s tp:name-for-bindings (%s) does '
'not match' % (dbus_name, ugly_name))
stub_name = (self.prefix_ + self.node_name_lc + '_emit_' +
ugly_name.lower())
const_name = self.get_signal_const_entry(signal)
# Gather arguments
args = []
for i in signal.getElementsByTagName('arg'):
name = i.getAttribute('name')
dtype = i.getAttribute('type')
tp_type = i.getAttribute('tp:type')
if name:
name = 'arg_' + name
else:
name = 'arg' + str(len(args))
ctype, gtype, marshaller, pointer = type_to_gtype(dtype)
if pointer:
ctype = 'const ' + ctype
struct = (ctype, name, gtype)
args.append(struct)
tmp = (['gpointer instance'] +
[ctype + name for (ctype, name, gtype) in args])
self.h(('void %s (' % stub_name) + (',\n '.join(tmp)) + ');')
# FIXME: emit docs
self.b('/**')
self.b(' * %s:' % stub_name)
self.b(' * @instance: The object implementing this interface')
for (ctype, name, gtype) in args:
self.b(' * @%s: %s (FIXME, generate documentation)'
% (name, ctype))
self.b(' *')
self.b(' * Type-safe wrapper around g_signal_emit to emit the')
self.b(' * %s signal on interface %s.'
% (dbus_name, self.iface_name))
self.b(' */')
self.b('void')
self.b(('%s (' % stub_name) + (',\n '.join(tmp)) + ')')
self.b('{')
self.b(' g_assert (instance != NULL);')
self.b(' g_assert (G_TYPE_CHECK_INSTANCE_TYPE (instance, %s));'
% (self.current_gtype))
tmp = (['instance', '%s_signals[%s]' % (self.node_name_lc, const_name),
'0'] + [name for (ctype, name, gtype) in args])
self.b(' g_signal_emit (' + ',\n '.join(tmp) + ');')
self.b('}')
self.b('')
signal_name = dbus_gutils_wincaps_to_uscore(dbus_name).replace('_',
'-')
in_base_init.append(' /**')
in_base_init.append(' * %s%s::%s:'
% (self.Prefix, self.node_name_mixed, signal_name))
for (ctype, name, gtype) in args:
in_base_init.append(' * @%s: %s (FIXME, generate documentation)'
% (name, ctype))
in_base_init.append(' *')
in_base_init.append(' * The %s D-Bus signal is emitted whenever '
'this GObject signal is.' % dbus_name)
in_base_init.append(' */')
in_base_init.append(' %s_signals[%s] ='
% (self.node_name_lc, const_name))
in_base_init.append(' g_signal_new ("%s",' % signal_name)
in_base_init.append(' G_OBJECT_CLASS_TYPE (klass),')
in_base_init.append(' G_SIGNAL_RUN_LAST|G_SIGNAL_DETAILED,')
in_base_init.append(' 0,')
in_base_init.append(' NULL, NULL,')
in_base_init.append(' %s,'
% signal_to_marshal_name(signal, self.signal_marshal_prefix))
in_base_init.append(' G_TYPE_NONE,')
tmp = ['%d' % len(args)] + [gtype for (ctype, name, gtype) in args]
in_base_init.append(' %s);' % ',\n '.join(tmp))
in_base_init.append('')
return in_base_init
def have_properties(self, nodes):
for node in nodes:
interface = node.getElementsByTagName('interface')[0]
if interface.getElementsByTagName('property'):
return True
return False
def __call__(self):
nodes = self.dom.getElementsByTagName('node')
nodes.sort(cmp_by_name)
self.h('#include <glib-object.h>')
self.h('#include <dbus/dbus-glib.h>')
if self.have_properties(nodes):
self.h('#include <telepathy-glib/dbus-properties-mixin.h>')
self.h('')
self.h('G_BEGIN_DECLS')
self.h('')
self.b('#include "%s.h"' % self.basename)
self.b('')
for header in self.headers:
self.b('#include %s' % header)
self.b('')
for node in nodes:
self.do_node(node)
self.h('')
self.h('G_END_DECLS')
self.b('')
for header in self.end_headers:
self.b('#include %s' % header)
self.h('')
self.b('')
open(self.basename + '.h', 'w').write('\n'.join(self.__header))
open(self.basename + '.c', 'w').write('\n'.join(self.__body))
def cmdline_error():
print """\
usage:
gen-ginterface [OPTIONS] xmlfile Prefix_
options:
--include='<header.h>' (may be repeated)
--include='"header.h"' (ditto)
--include-end='"header.h"' (ditto)
Include extra headers in the generated .c file
--signal-marshal-prefix='prefix'
Use the given prefix on generated signal marshallers (default is
prefix.lower()).
--filename='BASENAME'
Set the basename for the output files (default is prefix.lower()
+ 'ginterfaces')
--not-implemented-func='symbol'
Set action when methods not implemented in the interface vtable are
called. symbol must have signature
void symbol (DBusGMethodInvocation *context)
and return some sort of "not implemented" error via
dbus_g_method_return_error (context, ...)
"""
sys.exit(1)
if __name__ == '__main__':
from getopt import gnu_getopt
options, argv = gnu_getopt(sys.argv[1:], '',
['filename=', 'signal-marshal-prefix=',
'include=', 'include-end=',
'allow-unstable',
'not-implemented-func='])
try:
prefix = argv[1]
except IndexError:
cmdline_error()
basename = prefix.lower() + 'ginterfaces'
signal_marshal_prefix = prefix.lower().rstrip('_')
headers = []
end_headers = []
not_implemented_func = ''
allow_havoc = False
for option, value in options:
if option == '--filename':
basename = value
elif option == '--signal-marshal-prefix':
signal_marshal_prefix = value
elif option == '--include':
if value[0] not in '<"':
value = '"%s"' % value
headers.append(value)
elif option == '--include-end':
if value[0] not in '<"':
value = '"%s"' % value
end_headers.append(value)
elif option == '--not-implemented-func':
not_implemented_func = value
elif option == '--allow-unstable':
allow_havoc = True
try:
dom = xml.dom.minidom.parse(argv[0])
except IndexError:
cmdline_error()
Generator(dom, prefix, basename, signal_marshal_prefix, headers,
end_headers, not_implemented_func, allow_havoc)()
| lgpl-2.1 | 4,937,660,815,411,856,000 | 36.427681 | 83 | 0.513909 | false |
AlpacaDB/chainer | tests/chainer_tests/function_hooks_tests/test_timer.py | 10 | 2986 | import unittest
import numpy
import chainer
from chainer import cuda
from chainer import function_hooks
from chainer import functions
from chainer.functions.connection import linear
from chainer import links
from chainer import testing
from chainer.testing import attr
def check_history(self, t, function_type, return_type):
self.assertIsInstance(t[0], function_type)
self.assertIsInstance(t[1], return_type)
class TestTimerHookToLink(unittest.TestCase):
def setUp(self):
self.h = function_hooks.TimerHook()
self.l = links.Linear(5, 5)
self.x = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
self.gy = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
def test_name(self):
self.assertEqual(self.h.name, 'TimerHook')
def check_forward(self, x):
with self.h:
self.l(chainer.Variable(x))
self.assertEqual(1, len(self.h.call_history))
check_history(self, self.h.call_history[0],
linear.LinearFunction, float)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.l.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x, gy):
x = chainer.Variable(x)
y = self.l(x)
y.grad = gy
with self.h:
y.backward()
self.assertEqual(1, len(self.h.call_history))
check_history(self, self.h.call_history[0],
linear.LinearFunction, float)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.l.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
class TestTimerHookToFunction(unittest.TestCase):
def setUp(self):
self.h = function_hooks.TimerHook()
self.f = functions.Exp()
self.f.add_hook(self.h)
self.x = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
self.gy = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
def check_forward(self, x):
self.f(chainer.Variable(x))
self.assertEqual(1, len(self.h.call_history))
check_history(self, self.h.call_history[0], functions.Exp, float)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_fowward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x, gy):
x = chainer.Variable(x)
y = self.f(x)
y.grad = gy
y.backward()
self.assertEqual(2, len(self.h.call_history))
check_history(self, self.h.call_history[1], functions.Exp, float)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| mit | 2,024,507,142,029,024,000 | 28.27451 | 79 | 0.625251 | false |
mikeboers/PyAV | tests/test_doctests.py | 1 | 1491 | from unittest import TestCase
import doctest
import pkgutil
import re
import av
def fix_doctests(suite):
for case in suite._tests:
# Add some more flags.
case._dt_optionflags = (
(case._dt_optionflags or 0) |
doctest.IGNORE_EXCEPTION_DETAIL |
doctest.ELLIPSIS |
doctest.NORMALIZE_WHITESPACE
)
case._dt_test.globs['av'] = av
case._dt_test.globs['video_path'] = av.datasets.curated('pexels/time-lapse-video-of-night-sky-857195.mp4')
for example in case._dt_test.examples:
# Remove b prefix from strings.
if example.want.startswith("b'"):
example.want = example.want[1:]
def register_doctests(mod):
if isinstance(mod, str):
mod = __import__(mod, fromlist=[''])
try:
suite = doctest.DocTestSuite(mod)
except ValueError:
return
fix_doctests(suite)
cls_name = 'Test' + ''.join(x.title() for x in mod.__name__.split('.'))
cls = type(cls_name, (TestCase, ), {})
for test in suite._tests:
def func(self):
return test.runTest()
name = str('test_' + re.sub('[^a-zA-Z0-9]+', '_', test.id()).strip('_'))
func.__name__ = name
setattr(cls, name, func)
globals()[cls_name] = cls
for importer, mod_name, ispkg in pkgutil.walk_packages(
path=av.__path__,
prefix=av.__name__ + '.',
onerror=lambda x: None
):
register_doctests(mod_name)
| bsd-3-clause | 3,947,449,277,095,484,000 | 23.442623 | 114 | 0.570758 | false |
mozilla/treeherder | treeherder/etl/artifact.py | 2 | 3467 | import logging
import simplejson as json
from django.db import transaction
from django.db.utils import IntegrityError
from treeherder.model import error_summary
from treeherder.etl.perf import store_performance_artifact
from treeherder.etl.text import astral_filter
from treeherder.model.models import Job, TextLogError
logger = logging.getLogger(__name__)
def store_text_log_summary_artifact(job, text_log_summary_artifact):
"""
Store the contents of the text log summary artifact
"""
errors = json.loads(text_log_summary_artifact['blob'])['errors']
with transaction.atomic():
for error in errors:
obj, created = TextLogError.objects.get_or_create(
job=job,
line_number=error['linenumber'],
line=astral_filter(error['line']),
)
if not created:
logger.warning('duplicate error lines processed for job %s', job.id)
# get error summary immediately (to warm the cache)
error_summary.get_error_summary(job)
def store_job_artifacts(artifact_data):
"""
Store a list of job artifacts. All of the datums in artifact_data need
to be in the following format:
{
'type': 'json',
'name': 'my-artifact-name',
# blob can be any kind of structured data
'blob': { 'stuff': [1, 2, 3, 4, 5] },
'job_guid': 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
}
"""
for artifact in artifact_data:
# Determine what type of artifact we have received
if artifact:
artifact_name = artifact.get('name')
if not artifact_name:
logger.error("load_job_artifacts: Unnamed job artifact, skipping")
continue
job_guid = artifact.get('job_guid')
if not job_guid:
logger.error(
"load_job_artifacts: Artifact '%s' with no " "job guid set, skipping",
artifact_name,
)
continue
try:
job = Job.objects.get(guid=job_guid)
except Job.DoesNotExist:
logger.error('load_job_artifacts: No job_id for guid %s', job_guid)
continue
if artifact_name == 'performance_data':
store_performance_artifact(job, artifact)
elif artifact_name == 'text_log_summary':
try:
store_text_log_summary_artifact(job, artifact)
except IntegrityError:
logger.warning(
"Couldn't insert text log information "
"for job with guid %s, this probably "
"means the job was already parsed",
job_guid,
)
else:
logger.warning(
"Unknown artifact type: %s submitted with job %s", artifact_name, job.guid
)
else:
logger.error('store_job_artifacts: artifact type %s not understood', artifact_name)
def serialize_artifact_json_blobs(artifacts):
"""
Ensure that JSON artifact blobs passed as dicts are converted to JSON
"""
for artifact in artifacts:
blob = artifact['blob']
if artifact['type'].lower() == 'json' and not isinstance(blob, str):
artifact['blob'] = json.dumps(blob)
return artifacts
| mpl-2.0 | -7,114,368,446,797,099,000 | 33.67 | 95 | 0.56908 | false |
baylee-d/osf.io | osf/migrations/0163_populate_conference_submissions.py | 10 | 1608 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-06-08 15:12
from __future__ import unicode_literals
import logging
from django.db import migrations
logger = logging.getLogger(__file__)
def forward(state, *args, **kwargs):
"""
For every conference, fetches all AbstractNodes with a case-insensitive matching Tag
to the conference endpoint. Adds these nodes to conference.submissions.
"""
Conference = state.get_model('osf', 'Conference')
AbstractNode = state.get_model('osf', 'AbstractNode')
Tag = state.get_model('osf', 'Tag')
# Small number of conferences
for conference in Conference.objects.all():
tags = Tag.objects.filter(system=False, name__iexact=conference.endpoint).values_list('pk', flat=True)
# Not restricting on public/deleted here, just adding all nodes with meeting tags
# and then API will restrict to only public, non-deleted nodes
for node in AbstractNode.objects.filter(tags__in=tags):
conference.submissions.add(node)
logger.info('Finished adding submissions to conferences.')
def backward(state, *args, **kwargs):
Conference = state.get_model('osf', 'Conference')
for conference in Conference.objects.all():
for submission in conference.submissions.all():
conference.submissions.remove(submission)
logger.info('Finished clearing submissions from conferences.')
class Migration(migrations.Migration):
dependencies = [
('osf', '0162_conference_submissions'),
]
operations = [
migrations.RunPython(forward, backward)
]
| apache-2.0 | 5,176,610,038,532,886,000 | 36.395349 | 110 | 0.692786 | false |
elin-moco/ffclub | ffclub/person/migrations/0004_copied_email_data.py | 1 | 4736 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for profile in orm.Person.objects.all():
if not profile.email:
profile.email = profile.user.email
profile.save()
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
def backwards(self, orm):
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'person.person': {
'Meta': {'object_name': 'Person'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'fullname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '7'}),
'nickname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'normal'", 'max_length': '20'}),
'subscribing': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['person']
symmetrical = True
| bsd-3-clause | 8,386,407,393,023,537,000 | 64.777778 | 182 | 0.551731 | false |
hkariti/ansible | lib/ansible/modules/cloud/openstack/os_flavor_facts.py | 5 | 7061 | #!/usr/bin/python
# Copyright (c) 2015 IBM
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_flavor_facts
short_description: Retrieve facts about one or more flavors
author: "David Shrewsbury (@Shrews)"
version_added: "2.1"
description:
- Retrieve facts about available OpenStack instance flavors. By default,
facts about ALL flavors are retrieved. Filters can be applied to get
facts for only matching flavors. For example, you can filter on the
amount of RAM available to the flavor, or the number of virtual CPUs
available to the flavor, or both. When specifying multiple filters,
*ALL* filters must match on a flavor before that flavor is returned as
a fact.
notes:
- This module creates a new top-level C(openstack_flavors) fact, which
contains a list of unsorted flavors.
requirements:
- "python >= 2.6"
- "shade"
options:
name:
description:
- A flavor name. Cannot be used with I(ram) or I(vcpus) or I(ephemeral).
required: false
default: None
ram:
description:
- "A string used for filtering flavors based on the amount of RAM
(in MB) desired. This string accepts the following special values:
'MIN' (return flavors with the minimum amount of RAM), and 'MAX'
(return flavors with the maximum amount of RAM)."
- "A specific amount of RAM may also be specified. Any flavors with this
exact amount of RAM will be returned."
- "A range of acceptable RAM may be given using a special syntax. Simply
prefix the amount of RAM with one of these acceptable range values:
'<', '>', '<=', '>='. These values represent less than, greater than,
less than or equal to, and greater than or equal to, respectively."
required: false
default: false
vcpus:
description:
- A string used for filtering flavors based on the number of virtual
CPUs desired. Format is the same as the I(ram) parameter.
required: false
default: false
limit:
description:
- Limits the number of flavors returned. All matching flavors are
returned by default.
required: false
default: None
ephemeral:
description:
- A string used for filtering flavors based on the amount of ephemeral
storage. Format is the same as the I(ram) parameter
required: false
default: false
version_added: "2.3"
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
extends_documentation_fragment: openstack
'''
EXAMPLES = '''
# Gather facts about all available flavors
- os_flavor_facts:
cloud: mycloud
# Gather facts for the flavor named "xlarge-flavor"
- os_flavor_facts:
cloud: mycloud
name: "xlarge-flavor"
# Get all flavors that have exactly 512 MB of RAM.
- os_flavor_facts:
cloud: mycloud
ram: "512"
# Get all flavors that have 1024 MB or more of RAM.
- os_flavor_facts:
cloud: mycloud
ram: ">=1024"
# Get a single flavor that has the minimum amount of RAM. Using the 'limit'
# option will guarantee only a single flavor is returned.
- os_flavor_facts:
cloud: mycloud
ram: "MIN"
limit: 1
# Get all flavors with 1024 MB of RAM or more, AND exactly 2 virtual CPUs.
- os_flavor_facts:
cloud: mycloud
ram: ">=1024"
vcpus: "2"
# Get all flavors with 1024 MB of RAM or more, exactly 2 virtual CPUs, and
# less than 30gb of ephemeral storage.
- os_flavor_facts:
cloud: mycloud
ram: ">=1024"
vcpus: "2"
ephemeral: "<30"
'''
RETURN = '''
openstack_flavors:
description: Dictionary describing the flavors.
returned: On success.
type: complex
contains:
id:
description: Flavor ID.
returned: success
type: string
sample: "515256b8-7027-4d73-aa54-4e30a4a4a339"
name:
description: Flavor name.
returned: success
type: string
sample: "tiny"
disk:
description: Size of local disk, in GB.
returned: success
type: int
sample: 10
ephemeral:
description: Ephemeral space size, in GB.
returned: success
type: int
sample: 10
ram:
description: Amount of memory, in MB.
returned: success
type: int
sample: 1024
swap:
description: Swap space size, in MB.
returned: success
type: int
sample: 100
vcpus:
description: Number of virtual CPUs.
returned: success
type: int
sample: 2
is_public:
description: Make flavor accessible to the public.
returned: success
type: bool
sample: true
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=False, default=None),
ram=dict(required=False, default=None),
vcpus=dict(required=False, default=None),
limit=dict(required=False, default=None, type='int'),
ephemeral=dict(required=False, default=None),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['name', 'ram'],
['name', 'vcpus'],
['name', 'ephemeral']
]
)
module = AnsibleModule(argument_spec, **module_kwargs)
name = module.params['name']
vcpus = module.params['vcpus']
ram = module.params['ram']
ephemeral = module.params['ephemeral']
limit = module.params['limit']
filters = {}
if vcpus:
filters['vcpus'] = vcpus
if ram:
filters['ram'] = ram
if ephemeral:
filters['ephemeral'] = ephemeral
if filters:
# Range search added in 1.5.0
min_version = '1.5.0'
else:
min_version = None
shade, cloud = openstack_cloud_from_module(module, min_version=min_version)
try:
if name:
flavors = cloud.search_flavors(filters={'name': name})
else:
flavors = cloud.list_flavors()
if filters:
flavors = cloud.range_search(flavors, filters)
if limit is not None:
flavors = flavors[:limit]
module.exit_json(changed=False,
ansible_facts=dict(openstack_flavors=flavors))
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 | 2,900,197,149,758,664,700 | 28.919492 | 125 | 0.618184 | false |
bowang/tensorflow | tensorflow/contrib/slim/python/slim/data/dataset_data_provider.py | 47 | 4039 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A DataProvider that provides data from a Dataset.
DatasetDataProviders provide data from datasets. The provide can be configured
to use multiple readers simultaneously or read via a single reader.
Additionally, the data being read can be optionally shuffled.
For example, to read data using a single thread without shuffling:
pascal_voc_data_provider = DatasetDataProvider(
slim.datasets.pascal_voc.get_split('train'),
shuffle=False)
images, labels = pascal_voc_data_provider.get(['images', 'labels'])
To read data using multiple readers simultaneous with shuffling:
pascal_voc_data_provider = DatasetDataProvider(
slim.datasets.pascal_voc.Dataset(),
num_readers=10,
shuffle=True)
images, labels = pascal_voc_data_provider.get(['images', 'labels'])
Equivalently, one may request different fields of the same sample separately:
[images] = pascal_voc_data_provider.get(['images'])
[labels] = pascal_voc_data_provider.get(['labels'])
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.slim.python.slim.data import data_provider
from tensorflow.contrib.slim.python.slim.data import parallel_reader
class DatasetDataProvider(data_provider.DataProvider):
def __init__(self,
dataset,
num_readers=1,
reader_kwargs=None,
shuffle=True,
num_epochs=None,
common_queue_capacity=256,
common_queue_min=128,
record_key='record_key',
seed=None,
scope=None):
"""Creates a DatasetDataProvider.
Args:
dataset: An instance of the Dataset class.
num_readers: The number of parallel readers to use.
reader_kwargs: An optional dict of kwargs for the reader.
shuffle: Whether to shuffle the data sources and common queue when
reading.
num_epochs: The number of times each data source is read. If left as None,
the data will be cycled through indefinitely.
common_queue_capacity: The capacity of the common queue.
common_queue_min: The minimum number of elements in the common queue after
a dequeue.
record_key: The item name to use for the dataset record keys in the
provided tensors.
seed: The seed to use if shuffling.
scope: Optional name scope for the ops.
Raises:
ValueError: If `record_key` matches one of the items in the dataset.
"""
key, data = parallel_reader.parallel_read(
dataset.data_sources,
reader_class=dataset.reader,
num_epochs=num_epochs,
num_readers=num_readers,
reader_kwargs=reader_kwargs,
shuffle=shuffle,
capacity=common_queue_capacity,
min_after_dequeue=common_queue_min,
seed=seed,
scope=scope)
items = dataset.decoder.list_items()
tensors = dataset.decoder.decode(data, items)
if record_key in items:
raise ValueError('The item name used for `record_key` cannot also be '
'used for a dataset item: %s', record_key)
items.append(record_key)
tensors.append(key)
super(DatasetDataProvider, self).__init__(
items_to_tensors=dict(zip(items, tensors)),
num_samples=dataset.num_samples)
| apache-2.0 | 3,926,764,833,057,020,400 | 36.747664 | 80 | 0.672939 | false |
legumeinfo/lis_context_viewer | search/microservices/chromosome_region/chromosomeregion_pb2.py | 2 | 5741 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: chromosomeregion.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='chromosomeregion.proto',
package='chromosomeregion',
syntax='proto3',
serialized_options=b'\n\030lis.gcv.chromosomeregionB\025ChromosomeRegionProtoP\001\242\002\002GS',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x16\x63hromosomeregion.proto\x12\x10\x63hromosomeregion\"@\n\rRegionRequest\x12\x12\n\nchromosome\x18\x01 \x01(\t\x12\r\n\x05start\x18\x02 \x01(\r\x12\x0c\n\x04stop\x18\x03 \x01(\r\".\n\x0bRegionReply\x12\x0c\n\x04gene\x18\x01 \x01(\t\x12\x11\n\tneighbors\x18\x02 \x01(\x05\x32\x61\n\x10\x43hromosomeRegion\x12M\n\tGetRegion\x12\x1f.chromosomeregion.RegionRequest\x1a\x1d.chromosomeregion.RegionReply\"\x00\x42\x38\n\x18lis.gcv.chromosomeregionB\x15\x43hromosomeRegionProtoP\x01\xa2\x02\x02GSb\x06proto3'
)
_REGIONREQUEST = _descriptor.Descriptor(
name='RegionRequest',
full_name='chromosomeregion.RegionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='chromosome', full_name='chromosomeregion.RegionRequest.chromosome', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='start', full_name='chromosomeregion.RegionRequest.start', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='stop', full_name='chromosomeregion.RegionRequest.stop', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=44,
serialized_end=108,
)
_REGIONREPLY = _descriptor.Descriptor(
name='RegionReply',
full_name='chromosomeregion.RegionReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='gene', full_name='chromosomeregion.RegionReply.gene', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='neighbors', full_name='chromosomeregion.RegionReply.neighbors', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=110,
serialized_end=156,
)
DESCRIPTOR.message_types_by_name['RegionRequest'] = _REGIONREQUEST
DESCRIPTOR.message_types_by_name['RegionReply'] = _REGIONREPLY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RegionRequest = _reflection.GeneratedProtocolMessageType('RegionRequest', (_message.Message,), {
'DESCRIPTOR' : _REGIONREQUEST,
'__module__' : 'chromosomeregion_pb2'
# @@protoc_insertion_point(class_scope:chromosomeregion.RegionRequest)
})
_sym_db.RegisterMessage(RegionRequest)
RegionReply = _reflection.GeneratedProtocolMessageType('RegionReply', (_message.Message,), {
'DESCRIPTOR' : _REGIONREPLY,
'__module__' : 'chromosomeregion_pb2'
# @@protoc_insertion_point(class_scope:chromosomeregion.RegionReply)
})
_sym_db.RegisterMessage(RegionReply)
DESCRIPTOR._options = None
_CHROMOSOMEREGION = _descriptor.ServiceDescriptor(
name='ChromosomeRegion',
full_name='chromosomeregion.ChromosomeRegion',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=158,
serialized_end=255,
methods=[
_descriptor.MethodDescriptor(
name='GetRegion',
full_name='chromosomeregion.ChromosomeRegion.GetRegion',
index=0,
containing_service=None,
input_type=_REGIONREQUEST,
output_type=_REGIONREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_CHROMOSOMEREGION)
DESCRIPTOR.services_by_name['ChromosomeRegion'] = _CHROMOSOMEREGION
# @@protoc_insertion_point(module_scope)
| gpl-3.0 | -110,379,991,950,546,560 | 35.335443 | 525 | 0.732973 | false |
CyberReboot/vent | vent/menus/help.py | 2 | 7957 | import npyscreen
class HelpForm(npyscreen.ActionFormWithMenus):
""" Help form for the Vent CLI """
@staticmethod
def switch(page):
def popup(page):
info_str = ''
if page == 'Menu':
info_str = """
Menu interactions are simple! Here is a quick guide to get you
familiar.
Navigation of a page: Up, Down, Left, Right, or TAB. Note that
SHIFT+TAB can be used to reverse cycle!
Editing a page: Simply navigating to an editable field and
typing should be enough to edit most pages. ENTER can you be
used to select or deselect options, or to open drop down menus.
CTRL+T: Will toggle between two pages.
CTRL+Q: Will take you back to main. Or from main, will exit the
application.
CTRL+X: Can be used to open up menus on certain pages.
"""
elif page == 'Plugins':
info_str = """
Plugins are user created software hosted on GitHub that Vent
can install and run. Plugins are developed following a hybrid
of requirements specified both by Docker and Vent. Vent uses
Docker to run all plugins so all plugins should be designed to
run as a system of containers. Knowledge of linking docker
containers may be necessary for more complex tasks that require
creating multiple containers for your plugin. For Help on
building Plugins, check out the Working with Plugins section in
our Help Menu."""
elif page == 'Tools':
info_str = """
Tools are the individual building blocks of a Plugin. Each tool
should follow S.R.P, and over the entirety of the Plugin should
be able accomplish any task desired! For Help on building
Tools, check out the Working with Plugins section in our Help
Menu."""
elif page == 'Filetypes':
info_str = """
The filetypes Vent can support are entirely based on the
installed Plugins. Each plugin is ultimately responsible for
doing some form of processing."""
elif page == 'Status':
info_str = """
You'll notice Vent offers several status types amongst
tools/plugins. Built means that each tool has a Docker image
successfully built based off the provided specs for that
tool/plugin. Enabled/Disabled correspond to user defined
settings to enable or disable a tool or set of tools (plugin).
Installed means simply that the plugin has been cloned from
GitHub and installed to the Vent filesystem. No Docker image
has been created yet. Running means that a Docker container has
successfully been created from the corresponding Docker image
for a specific tool in a Plugin."""
elif page == 'Plugin Adding':
info_str = """
To add a plugin that you've created, simply open up the Menu
from the main page using ^X. After, press "p" to open up the
Plugin menu and then "a" to drop down into our Plugin
installation screen. To add a Plugin, we require a valid
GitHub repository. If your repository is private, you will
need to enter a username and password. Once you have finished
that, select OK. If we are successfully able to connect, you
should see your repositories branches listed in our Plugin
options menu. From here, press TAB to cycle between the
options, and ENTER to select different branches to install and
build from. You can even choose a specific commit if you like!
Once you've selected those tools and selected OK, Vent will
notify you about all tools it has detected. For more
information about how Vent detects tools, see our "Building a
Plugin" section. You may select or deselect the tools you wish
to install as part of your Plugin. When you are done, select
OK. If everything works you should get a successful Add. Select
OK, to be returned to the main screen!"""
elif page == 'Plugin Building':
# !! TODO
info_str = """Stay tuned!"""
npyscreen.notify_confirm(info_str,
title='About Vent ' + page,
wide=True)
popup(page)
def create(self):
""" Override method for creating FormBaseNew form """
self.add_handlers({'^T': self.change_forms, '^Q': self.exit})
self.addfield = self.add(npyscreen.TitleFixedText, name='Vent',
labelColor='DEFAULT', editable=False)
self.multifield1 = self.add(npyscreen.MultiLineEdit, editable=False,
value="""
About Vent
Vent is a library that includes a CLI designed to serve as a general
platform for analyzing network traffic. Built with some basic
functionality, Vent serves as a user-friendly platform to build custom
plugins on to perform user-defined processing on incoming network data.
Vent supports any filetype, but only processes ones based on the types
of plugins installed for that instance of Vent. Simply create your
plugins, point vent to them & install them, and drop a file in vent to
begin processing!
For a detailed explanation of Vent Concepts, check out the General
section in our Help Menu. Topics include: Vent Plugins, Tools,
Filetypes, and Statuses! Use ^X to access the menu and ESC to
close it.
Select CANCEL or ^Q to return to the Main Menu. Select OK or ^T to
return to your previous menu.
PRO TIP: You can use TAB to cycle through options.
""")
self.m2 = self.add_menu(name='Vent Basics', shortcut='b')
self.m2.addItem(text='Menu Interactions', onSelect=HelpForm.switch,
arguments=['Menu'], shortcut='m')
self.m2.addItem(text='Plugins', onSelect=HelpForm.switch,
arguments=['Plugins'], shortcut='p')
self.m2.addItem(text='Tools', onSelect=HelpForm.switch,
arguments=['Tools'], shortcut='t')
self.m2.addItem(text='Filetypes', onSelect=HelpForm.switch,
arguments=['Filetypes'], shortcut='f')
self.m2.addItem(text='Statuses', onSelect=HelpForm.switch,
arguments=['Status'], shortcut='s')
self.m3 = self.add_menu(name='Working with Plugins', shortcut='p')
self.m3.addItem(text='Adding a Plugin', onSelect=HelpForm.switch,
arguments=['Plugin Adding'], shortcut='a')
self.m3.addItem(text='Building a Plugin', onSelect=HelpForm.switch,
arguments=['Plugin Building'], shortcut='b')
def exit(self, *args, **keywords):
self.parentApp.switchForm('MAIN')
def on_cancel(self):
self.exit()
def on_ok(self):
self.change_forms()
def change_forms(self, *args, **keywords):
"""
Checks which form is currently displayed and toggles to the other one
"""
# Returns to previous Form in history if there is a previous Form
try:
self.parentApp.switchFormPrevious()
except Exception as e: # pragma: no cover
self.parentApp.switchForm('MAIN')
| apache-2.0 | 4,892,065,726,473,548,000 | 51.006536 | 79 | 0.588538 | false |
tmn505/source | scripts/dl_github_archive.py | 15 | 14565 | #!/usr/bin/env python
#
# Copyright (c) 2018 Yousong Zhou <[email protected]>
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
import argparse
import calendar
import datetime
import errno
import fcntl
import hashlib
import json
import os
import os.path
import re
import shutil
import ssl
import subprocess
import sys
import time
import urllib2
TMPDIR = os.environ.get('TMP_DIR') or '/tmp'
TMPDIR_DL = os.path.join(TMPDIR, 'dl')
class PathException(Exception): pass
class DownloadGitHubError(Exception): pass
class Path(object):
"""Context class for preparing and cleaning up directories.
If ```preclean` is ``False``, ``path`` will NOT be removed on context enter
If ``path`` ``isdir``, then it will be created on context enter.
If ``keep`` is True, then ``path`` will NOT be removed on context exit
"""
def __init__(self, path, isdir=True, preclean=False, keep=False):
self.path = path
self.isdir = isdir
self.preclean = preclean
self.keep = keep
def __enter__(self):
if self.preclean:
self.rm_all(self.path)
if self.isdir:
self.mkdir_all(self.path)
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self.keep:
self.rm_all(self.path)
@staticmethod
def mkdir_all(path):
"""Same as mkdir -p."""
names = os.path.split(path)
p = ''
for name in names:
p = os.path.join(p, name)
Path._mkdir(p)
@staticmethod
def _rmdir_dir(dir_):
names = Path._listdir(dir_)
for name in names:
p = os.path.join(dir_, name)
Path.rm_all(p)
Path._rmdir(dir_)
@staticmethod
def _mkdir(path):
Path._os_func(os.mkdir, path, errno.EEXIST)
@staticmethod
def _rmdir(path):
Path._os_func(os.rmdir, path, errno.ENOENT)
@staticmethod
def _remove(path):
Path._os_func(os.remove, path, errno.ENOENT)
@staticmethod
def _listdir(path):
return Path._os_func(os.listdir, path, errno.ENOENT, default=[])
@staticmethod
def _os_func(func, path, errno, default=None):
"""Call func(path) in an idempotent way.
On exception ``ex``, if the type is OSError and ``ex.errno == errno``,
return ``default``, otherwise, re-raise
"""
try:
return func(path)
except OSError as e:
if e.errno == errno:
return default
else:
raise
@staticmethod
def rm_all(path):
"""Same as rm -r."""
if os.path.islink(path):
Path._remove(path)
elif os.path.isdir(path):
Path._rmdir_dir(path)
else:
Path._remove(path)
@staticmethod
def untar(path, into=None):
"""Extract tarball at ``path`` into subdir ``into``.
return subdir name if and only if there exists one, otherwise raise PathException
"""
args = ('tar', '-C', into, '-xzf', path, '--no-same-permissions')
subprocess.check_call(args, preexec_fn=lambda: os.umask(0o22))
dirs = os.listdir(into)
if len(dirs) == 1:
return dirs[0]
else:
raise PathException('untar %s: expecting a single subdir, got %s' % (path, dirs))
@staticmethod
def tar(path, subdir, into=None, ts=None):
"""Pack ``path`` into tarball ``into``."""
# --sort=name requires a recent build of GNU tar
args = ['tar', '--numeric-owner', '--owner=0', '--group=0', '--sort=name']
args += ['-C', path, '-cf', into, subdir]
envs = os.environ.copy()
if ts is not None:
args.append('--mtime=@%d' % ts)
if into.endswith('.xz'):
envs['XZ_OPT'] = '-7e'
args.append('-J')
elif into.endswith('.bz2'):
args.append('-j')
elif into.endswith('.gz'):
args.append('-z')
envs['GZIP'] = '-n'
else:
raise PathException('unknown compression type %s' % into)
subprocess.check_call(args, env=envs)
class GitHubCommitTsCache(object):
__cachef = 'github.commit.ts.cache'
__cachen = 2048
def __init__(self):
Path.mkdir_all(TMPDIR_DL)
self.cachef = os.path.join(TMPDIR_DL, self.__cachef)
self.cache = {}
def get(self, k):
"""Get timestamp with key ``k``."""
fileno = os.open(self.cachef, os.O_RDONLY | os.O_CREAT)
with os.fdopen(fileno) as fin:
try:
fcntl.lockf(fileno, fcntl.LOCK_SH)
self._cache_init(fin)
if k in self.cache:
ts = self.cache[k][0]
return ts
finally:
fcntl.lockf(fileno, fcntl.LOCK_UN)
return None
def set(self, k, v):
"""Update timestamp with ``k``."""
fileno = os.open(self.cachef, os.O_RDWR | os.O_CREAT)
with os.fdopen(fileno, 'wb+') as f:
try:
fcntl.lockf(fileno, fcntl.LOCK_EX)
self._cache_init(f)
self.cache[k] = (v, int(time.time()))
self._cache_flush(f)
finally:
fcntl.lockf(fileno, fcntl.LOCK_UN)
def _cache_init(self, fin):
for line in fin:
k, ts, updated = line.split()
ts = int(ts)
updated = int(updated)
self.cache[k] = (ts, updated)
def _cache_flush(self, fout):
cache = sorted(self.cache.iteritems(), cmp=lambda a, b: b[1][1] - a[1][1])
cache = cache[:self.__cachen]
self.cache = {}
os.ftruncate(fout.fileno(), 0)
fout.seek(0, os.SEEK_SET)
for k, ent in cache:
ts = ent[0]
updated = ent[1]
line = '{0} {1} {2}\n'.format(k, ts, updated)
fout.write(line)
class DownloadGitHubTarball(object):
"""Download and repack archive tarabll from GitHub.
Compared with the method of packing after cloning the whole repo, this
method is more friendly to users with fragile internet connection.
However, there are limitations with this method
- GitHub imposes a 60 reqs/hour limit for unauthenticated API access.
This affects fetching commit date for reproducible tarballs. Download
through the archive link is not affected.
- GitHub archives do not contain source codes for submodules.
- GitHub archives seem to respect .gitattributes and ignore pathes with
export-ignore attributes.
For the first two issues, the method will fail loudly to allow fallback to
clone-then-pack method.
As for the 3rd issue, to make sure that this method only produces identical
tarballs as the fallback method, we require the expected hash value to be
supplied. That means the first tarball will need to be prepared by the
clone-then-pack method
"""
__repo_url_regex = re.compile(r'^(?:https|git)://github.com/(?P<owner>[^/]+)/(?P<repo>[^/]+)')
def __init__(self, args):
self.dl_dir = args.dl_dir
self.version = args.version
self.subdir = args.subdir
self.source = args.source
self.url = args.url
self._init_owner_repo()
self.xhash = args.hash
self._init_hasher()
self.commit_ts = None # lazy load commit timestamp
self.commit_ts_cache = GitHubCommitTsCache()
self.name = 'github-tarball'
def download(self):
"""Download and repack GitHub archive tarball."""
self._init_commit_ts()
with Path(TMPDIR_DL, keep=True) as dir_dl:
# fetch tarball from GitHub
tarball_path = os.path.join(dir_dl.path, self.subdir + '.tar.gz.dl')
with Path(tarball_path, isdir=False):
self._fetch(tarball_path)
# unpack
d = os.path.join(dir_dl.path, self.subdir + '.untar')
with Path(d, preclean=True) as dir_untar:
tarball_prefix = Path.untar(tarball_path, into=dir_untar.path)
dir0 = os.path.join(dir_untar.path, tarball_prefix)
dir1 = os.path.join(dir_untar.path, self.subdir)
# submodules check
if self._has_submodule(dir0):
raise self._error('Fetching submodules is not yet supported')
# rename subdir
os.rename(dir0, dir1)
# repack
into=os.path.join(TMPDIR_DL, self.source)
Path.tar(dir_untar.path, self.subdir, into=into, ts=self.commit_ts)
try:
self._hash_check(into)
except Exception:
Path.rm_all(into)
raise
# move to target location
file1 = os.path.join(self.dl_dir, self.source)
if into != file1:
shutil.move(into, file1)
def _has_submodule(self, dir_):
m = os.path.join(dir_, '.gitmodules')
try:
st = os.stat(m)
return st.st_size > 0
except OSError as e:
return e.errno != errno.ENOENT
def _init_owner_repo(self):
m = self.__repo_url_regex.search(self.url)
if m is None:
raise self._error('Invalid github url: {}'.format(self.url))
owner = m.group('owner')
repo = m.group('repo')
if repo.endswith('.git'):
repo = repo[:-4]
self.owner = owner
self.repo = repo
def _init_hasher(self):
xhash = self.xhash
if len(xhash) == 64:
self.hasher = hashlib.sha256()
elif len(xhash) == 32:
self.hasher = hashlib.md5()
else:
raise self._error('Requires sha256sum for verification')
self.xhash = xhash
def _hash_check(self, f):
with open(f, 'rb') as fin:
while True:
d = fin.read(4096)
if not d:
break
self.hasher.update(d)
xhash = self.hasher.hexdigest()
if xhash != self.xhash:
raise self._error('Wrong hash (probably caused by .gitattributes), expecting {}, got {}'.format(self.xhash, xhash))
def _init_commit_ts(self):
if self.commit_ts is not None:
return
# GitHub provides 2 APIs[1,2] for fetching commit data. API[1] is more
# terse while API[2] provides more verbose info such as commit diff
# etc. That's the main reason why API[1] is preferred: the response
# size is predictable.
#
# However, API[1] only accepts complete commit sha1sum as the parameter
# while API[2] is more liberal accepting also partial commit id and
# tags, etc.
#
# [1] Get a single commit, Repositories, https://developer.github.com/v3/repos/commits/#get-a-single-commit
# [2] Git Commits, Git Data, https://developer.github.com/v3/git/commits/#get-a-commit
apis = [
{
'url': self._make_repo_url_path('git', 'commits', self.version),
'attr_path': ('committer', 'date'),
}, {
'url': self._make_repo_url_path('commits', self.version),
'attr_path': ('commit', 'committer', 'date'),
},
]
version_is_sha1sum = len(self.version) == 40
if not version_is_sha1sum:
apis.insert(0, apis.pop())
for api in apis:
url = api['url']
attr_path = api['attr_path']
try:
ct = self.commit_ts_cache.get(url)
if ct is not None:
self.commit_ts = ct
return
ct = self._init_commit_ts_remote_get(url, attr_path)
self.commit_ts = ct
self.commit_ts_cache.set(url, ct)
return
except Exception:
pass
raise self._error('Cannot fetch commit ts: {}'.format(url))
def _init_commit_ts_remote_get(self, url, attrpath):
resp = self._make_request(url)
data = resp.read()
date = json.loads(data)
for attr in attrpath:
date = date[attr]
date = datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M:%SZ')
date = date.timetuple()
ct = calendar.timegm(date)
return ct
def _fetch(self, path):
"""Fetch tarball of the specified version ref."""
ref = self.version
url = self._make_repo_url_path('tarball', ref)
resp = self._make_request(url)
with open(path, 'wb') as fout:
while True:
d = resp.read(4096)
if not d:
break
fout.write(d)
def _make_repo_url_path(self, *args):
url = '/repos/{0}/{1}'.format(self.owner, self.repo)
if args:
url += '/' + '/'.join(args)
return url
def _make_request(self, path):
"""Request GitHub API endpoint on ``path``."""
url = 'https://api.github.com' + path
headers = {
'Accept': 'application/vnd.github.v3+json',
'User-Agent': 'OpenWrt',
}
req = urllib2.Request(url, headers=headers)
sslcontext = ssl._create_unverified_context()
fileobj = urllib2.urlopen(req, context=sslcontext)
return fileobj
def _error(self, msg):
return DownloadGitHubError('{}: {}'.format(self.source, msg))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dl-dir', default=os.getcwd(), help='Download dir')
parser.add_argument('--url', help='Download URL')
parser.add_argument('--subdir', help='Source code subdir name')
parser.add_argument('--version', help='Source code version')
parser.add_argument('--source', help='Source tarball filename')
parser.add_argument('--hash', help='Source tarball\'s expected sha256sum')
args = parser.parse_args()
try:
method = DownloadGitHubTarball(args)
method.download()
except Exception as ex:
sys.stderr.write('{}: Download from {} failed\n'.format(args.source, args.url))
sys.stderr.write('{}\n'.format(ex))
sys.exit(1)
if __name__ == '__main__':
main()
| gpl-2.0 | 660,150,907,824,915,300 | 33.11007 | 127 | 0.549125 | false |
ctalbert/mozharness | mozharness/base/vcs/vcsbase.py | 1 | 3096 | #!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
"""Generic VCS support.
"""
from copy import deepcopy
import os
import sys
sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.dirname(sys.path[0]))))
from mozharness.base.errors import VCSException
from mozharness.base.script import BaseScript
from mozharness.base.vcs.mercurial import MercurialVCS
# Update this with supported VCS name : VCS object
VCS_DICT = {
'hg': MercurialVCS,
}
# VCSMixin {{{1
class VCSMixin(object):
"""Basic VCS methods that are vcs-agnostic.
The vcs_class handles all the vcs-specific tasks.
"""
def vcs_checkout(self, vcs=None, **kwargs):
""" Check out a single repo.
"""
c = self.config
if not vcs:
if c.get('default_vcs'):
vcs = c['default_vcs']
else:
try:
vcs = self.default_vcs
except AttributeError:
pass
vcs_class = VCS_DICT.get(vcs)
if not vcs_class:
self.error("Running vcs_checkout with kwargs %s" % str(**kwargs))
raise VCSException, "No VCS set!"
# need a better way to do this.
if 'dest' not in kwargs:
kwargs['dest'] = os.path.basename(kwargs['repo'])
if 'vcs_share_base' not in kwargs:
kwargs['vcs_share_base'] = c.get('vcs_share_base')
vcs_obj = vcs_class(
log_obj=self.log_obj,
config=self.config,
vcs_config=kwargs,
)
got_revision = vcs_obj.ensure_repo_and_revision()
if got_revision:
return got_revision
else:
raise VCSException, "No got_revision from ensure_repo_and_revision()"
def vcs_checkout_repos(self, repo_list, parent_dir=None,
tag_override=None, **kwargs):
"""Check out a list of repos.
"""
orig_dir = os.getcwd()
c = self.config
if not parent_dir:
parent_dir = os.path.join(c['base_work_dir'], c['work_dir'])
self.mkdir_p(parent_dir)
self.chdir(parent_dir)
try:
for repo_dict in repo_list:
kwargs = deepcopy(repo_dict)
if tag_override:
kwargs['revision'] = tag_override
self.vcs_checkout(**kwargs)
finally:
self.chdir(orig_dir)
class VCSScript(VCSMixin, BaseScript):
def __init__(self, **kwargs):
super(VCSScript, self).__init__(**kwargs)
# Specific VCS stubs {{{1
# For ease of use.
# This is here instead of mercurial.py because importing MercurialVCS into
# vcsbase from mercurial, and importing VCSScript into mercurial from
# vcsbase, was giving me issues.
class MercurialScript(VCSScript):
default_vcs = 'hg'
# __main__ {{{1
if __name__ == '__main__':
pass
| mpl-2.0 | -1,395,021,398,997,968,600 | 30.917526 | 82 | 0.585271 | false |
x2nie/odoo | addons/gamification/models/challenge.py | 13 | 42861 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools import ustr, DEFAULT_SERVER_DATE_FORMAT as DF
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
from datetime import date, datetime, timedelta
import calendar
import logging
_logger = logging.getLogger(__name__)
# display top 3 in ranking, could be db variable
MAX_VISIBILITY_RANKING = 3
def start_end_date_for_period(period, default_start_date=False, default_end_date=False):
"""Return the start and end date for a goal period based on today
:return: (start_date, end_date), datetime.date objects, False if the period is
not defined or unknown"""
today = date.today()
if period == 'daily':
start_date = today
end_date = start_date
elif period == 'weekly':
delta = timedelta(days=today.weekday())
start_date = today - delta
end_date = start_date + timedelta(days=7)
elif period == 'monthly':
month_range = calendar.monthrange(today.year, today.month)
start_date = today.replace(day=1)
end_date = today.replace(day=month_range[1])
elif period == 'yearly':
start_date = today.replace(month=1, day=1)
end_date = today.replace(month=12, day=31)
else: # period == 'once':
start_date = default_start_date # for manual goal, start each time
end_date = default_end_date
if start_date and end_date:
return (datetime.strftime(start_date, DF), datetime.strftime(end_date, DF))
else:
return (start_date, end_date)
class gamification_challenge(osv.Model):
"""Gamification challenge
Set of predifined objectives assigned to people with rules for recurrence and
rewards
If 'user_ids' is defined and 'period' is different than 'one', the set will
be assigned to the users for each period (eg: every 1st of each month if
'monthly' is selected)
"""
_name = 'gamification.challenge'
_description = 'Gamification challenge'
_inherit = 'mail.thread'
def _get_next_report_date(self, cr, uid, ids, field_name, arg, context=None):
"""Return the next report date based on the last report date and report
period.
:return: a string in DEFAULT_SERVER_DATE_FORMAT representing the date"""
res = {}
for challenge in self.browse(cr, uid, ids, context=context):
last = datetime.strptime(challenge.last_report_date, DF).date()
if challenge.report_message_frequency == 'daily':
next = last + timedelta(days=1)
res[challenge.id] = next.strftime(DF)
elif challenge.report_message_frequency == 'weekly':
next = last + timedelta(days=7)
res[challenge.id] = next.strftime(DF)
elif challenge.report_message_frequency == 'monthly':
month_range = calendar.monthrange(last.year, last.month)
next = last.replace(day=month_range[1]) + timedelta(days=1)
res[challenge.id] = next.strftime(DF)
elif challenge.report_message_frequency == 'yearly':
res[challenge.id] = last.replace(year=last.year + 1).strftime(DF)
# frequency == 'once', reported when closed only
else:
res[challenge.id] = False
return res
def _get_categories(self, cr, uid, context=None):
return [
('hr', 'Human Ressources / Engagement'),
('other', 'Settings / Gamification Tools'),
]
def _get_report_template(self, cr, uid, context=None):
try:
return self.pool.get('ir.model.data').get_object_reference(cr, uid, 'gamification', 'simple_report_template')[1]
except ValueError:
return False
_order = 'end_date, start_date, name, id'
_columns = {
'name': fields.char('Challenge Name', required=True, translate=True),
'description': fields.text('Description', translate=True),
'state': fields.selection([
('draft', 'Draft'),
('inprogress', 'In Progress'),
('done', 'Done'),
], copy=False,
string='State', required=True, track_visibility='onchange'),
'manager_id': fields.many2one('res.users',
string='Responsible', help="The user responsible for the challenge."),
'user_ids': fields.many2many('res.users', 'gamification_challenge_users_rel',
string='Users',
help="List of users participating to the challenge"),
'user_domain': fields.char('User domain', help="Alternative to a list of users"),
'period': fields.selection([
('once', 'Non recurring'),
('daily', 'Daily'),
('weekly', 'Weekly'),
('monthly', 'Monthly'),
('yearly', 'Yearly')
],
string='Periodicity',
help='Period of automatic goal assigment. If none is selected, should be launched manually.',
required=True),
'start_date': fields.date('Start Date',
help="The day a new challenge will be automatically started. If no periodicity is set, will use this date as the goal start date."),
'end_date': fields.date('End Date',
help="The day a new challenge will be automatically closed. If no periodicity is set, will use this date as the goal end date."),
'invited_user_ids': fields.many2many('res.users', 'gamification_invited_user_ids_rel',
string="Suggest to users"),
'line_ids': fields.one2many('gamification.challenge.line', 'challenge_id',
string='Lines',
help="List of goals that will be set",
required=True, copy=True),
'reward_id': fields.many2one('gamification.badge', string="For Every Succeding User"),
'reward_first_id': fields.many2one('gamification.badge', string="For 1st user"),
'reward_second_id': fields.many2one('gamification.badge', string="For 2nd user"),
'reward_third_id': fields.many2one('gamification.badge', string="For 3rd user"),
'reward_failure': fields.boolean('Reward Bests if not Succeeded?'),
'reward_realtime': fields.boolean('Reward as soon as every goal is reached',
help="With this option enabled, a user can receive a badge only once. The top 3 badges are still rewarded only at the end of the challenge."),
'visibility_mode': fields.selection([
('personal', 'Individual Goals'),
('ranking', 'Leader Board (Group Ranking)'),
],
string="Display Mode", required=True),
'report_message_frequency': fields.selection([
('never', 'Never'),
('onchange', 'On change'),
('daily', 'Daily'),
('weekly', 'Weekly'),
('monthly', 'Monthly'),
('yearly', 'Yearly')
],
string="Report Frequency", required=True),
'report_message_group_id': fields.many2one('mail.group',
string='Send a copy to',
help='Group that will receive a copy of the report in addition to the user'),
'report_template_id': fields.many2one('email.template', string="Report Template", required=True),
'remind_update_delay': fields.integer('Non-updated manual goals will be reminded after',
help="Never reminded if no value or zero is specified."),
'last_report_date': fields.date('Last Report Date'),
'next_report_date': fields.function(_get_next_report_date,
type='date', string='Next Report Date', store=True),
'category': fields.selection(lambda s, *a, **k: s._get_categories(*a, **k),
string="Appears in", help="Define the visibility of the challenge through menus", required=True),
}
_defaults = {
'period': 'once',
'state': 'draft',
'visibility_mode': 'personal',
'report_message_frequency': 'never',
'last_report_date': fields.date.today,
'manager_id': lambda s, cr, uid, c: uid,
'category': 'hr',
'reward_failure': False,
'report_template_id': lambda s, *a, **k: s._get_report_template(*a, **k),
'reward_realtime': True,
}
def create(self, cr, uid, vals, context=None):
"""Overwrite the create method to add the user of groups"""
if vals.get('user_domain'):
user_ids = self._get_challenger_users(cr, uid, vals.get('user_domain'), context=context)
if not vals.get('user_ids'):
vals['user_ids'] = []
vals['user_ids'] += [(4, user_id) for user_id in user_ids]
return super(gamification_challenge, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int,long)):
ids = [ids]
if vals.get('user_domain'):
user_ids = self._get_challenger_users(cr, uid, vals.get('user_domain'), context=context)
if not vals.get('user_ids'):
vals['user_ids'] = []
vals['user_ids'] += [(4, user_id) for user_id in user_ids]
write_res = super(gamification_challenge, self).write(cr, uid, ids, vals, context=context)
if vals.get('report_message_frequency', 'never') != 'never':
# _recompute_challenge_users do not set users for challenges with no reports, subscribing them now
for challenge in self.browse(cr, uid, ids, context=context):
self.message_subscribe(cr, uid, [challenge.id], [user.partner_id.id for user in challenge.user_ids], context=context)
if vals.get('state') == 'inprogress':
self._recompute_challenge_users(cr, uid, ids, context=context)
self._generate_goals_from_challenge(cr, uid, ids, context=context)
elif vals.get('state') == 'done':
self.check_challenge_reward(cr, uid, ids, force=True, context=context)
elif vals.get('state') == 'draft':
# resetting progress
if self.pool.get('gamification.goal').search(cr, uid, [('challenge_id', 'in', ids), ('state', '=', 'inprogress')], context=context):
raise osv.except_osv("Error", "You can not reset a challenge with unfinished goals.")
return write_res
##### Update #####
def _cron_update(self, cr, uid, context=None, ids=False):
"""Daily cron check.
- Start planned challenges (in draft and with start_date = today)
- Create the missing goals (eg: modified the challenge to add lines)
- Update every running challenge
"""
if context is None:
context = {}
# start scheduled challenges
planned_challenge_ids = self.search(cr, uid, [
('state', '=', 'draft'),
('start_date', '<=', fields.date.today())])
if planned_challenge_ids:
self.write(cr, uid, planned_challenge_ids, {'state': 'inprogress'}, context=context)
# close scheduled challenges
planned_challenge_ids = self.search(cr, uid, [
('state', '=', 'inprogress'),
('end_date', '>=', fields.date.today())])
if planned_challenge_ids:
self.write(cr, uid, planned_challenge_ids, {'state': 'done'}, context=context)
if not ids:
ids = self.search(cr, uid, [('state', '=', 'inprogress')], context=context)
# in cron mode, will do intermediate commits
# TODO in trunk: replace by parameter
context = dict(context, commit_gamification=True)
return self._update_all(cr, uid, ids, context=context)
def _update_all(self, cr, uid, ids, context=None):
"""Update the challenges and related goals
:param list(int) ids: the ids of the challenges to update, if False will
update only challenges in progress."""
if isinstance(ids, (int,long)):
ids = [ids]
goal_obj = self.pool.get('gamification.goal')
# we use yesterday to update the goals that just ended
yesterday = date.today() - timedelta(days=1)
goal_ids = goal_obj.search(cr, uid, [
('challenge_id', 'in', ids),
'|',
('state', '=', 'inprogress'),
'&',
('state', 'in', ('reached', 'failed')),
'|',
('end_date', '>=', yesterday.strftime(DF)),
('end_date', '=', False)
], context=context)
# update every running goal already generated linked to selected challenges
goal_obj.update(cr, uid, goal_ids, context=context)
self._recompute_challenge_users(cr, uid, ids, context=context)
self._generate_goals_from_challenge(cr, uid, ids, context=context)
for challenge in self.browse(cr, uid, ids, context=context):
if challenge.last_report_date != fields.date.today():
# goals closed but still opened at the last report date
closed_goals_to_report = goal_obj.search(cr, uid, [
('challenge_id', '=', challenge.id),
('start_date', '>=', challenge.last_report_date),
('end_date', '<=', challenge.last_report_date)
])
if challenge.next_report_date and fields.date.today() >= challenge.next_report_date:
self.report_progress(cr, uid, challenge, context=context)
elif len(closed_goals_to_report) > 0:
# some goals need a final report
self.report_progress(cr, uid, challenge, subset_goal_ids=closed_goals_to_report, context=context)
self.check_challenge_reward(cr, uid, ids, context=context)
return True
def quick_update(self, cr, uid, challenge_id, context=None):
"""Update all the goals of a specific challenge, no generation of new goals"""
goal_ids = self.pool.get('gamification.goal').search(cr, uid, [('challenge_id', '=', challenge_id)], context=context)
self.pool.get('gamification.goal').update(cr, uid, goal_ids, context=context)
return True
def _get_challenger_users(self, cr, uid, domain, context=None):
user_domain = eval(ustr(domain))
return self.pool['res.users'].search(cr, uid, user_domain, context=context)
def _recompute_challenge_users(self, cr, uid, challenge_ids, context=None):
"""Recompute the domain to add new users and remove the one no longer matching the domain"""
for challenge in self.browse(cr, uid, challenge_ids, context=context):
if challenge.user_domain:
old_user_ids = [user.id for user in challenge.user_ids]
new_user_ids = self._get_challenger_users(cr, uid, challenge.user_domain, context=context)
to_remove_ids = list(set(old_user_ids) - set(new_user_ids))
to_add_ids = list(set(new_user_ids) - set(old_user_ids))
write_op = [(3, user_id) for user_id in to_remove_ids]
write_op += [(4, user_id) for user_id in to_add_ids]
if write_op:
self.write(cr, uid, [challenge.id], {'user_ids': write_op}, context=context)
return True
def action_start(self, cr, uid, ids, context=None):
"""Start a challenge"""
return self.write(cr, uid, ids, {'state': 'inprogress'}, context=context)
def action_check(self, cr, uid, ids, context=None):
"""Check a challenge
Create goals that haven't been created yet (eg: if added users)
Recompute the current value for each goal related"""
return self._update_all(cr, uid, ids=ids, context=context)
def action_report_progress(self, cr, uid, ids, context=None):
"""Manual report of a goal, does not influence automatic report frequency"""
if isinstance(ids, (int,long)):
ids = [ids]
for challenge in self.browse(cr, uid, ids, context=context):
self.report_progress(cr, uid, challenge, context=context)
return True
##### Automatic actions #####
def _generate_goals_from_challenge(self, cr, uid, ids, context=None):
"""Generate the goals for each line and user.
If goals already exist for this line and user, the line is skipped. This
can be called after each change in the list of users or lines.
:param list(int) ids: the list of challenge concerned"""
goal_obj = self.pool.get('gamification.goal')
for challenge in self.browse(cr, uid, ids, context=context):
(start_date, end_date) = start_end_date_for_period(challenge.period)
to_update = []
# if no periodicity, use challenge dates
if not start_date and challenge.start_date:
start_date = challenge.start_date
if not end_date and challenge.end_date:
end_date = challenge.end_date
for line in challenge.line_ids:
# there is potentially a lot of users
# detect the ones with no goal linked to this line
date_clause = ""
query_params = [line.id]
if start_date:
date_clause += "AND g.start_date = %s"
query_params.append(start_date)
if end_date:
date_clause += "AND g.end_date = %s"
query_params.append(end_date)
query = """SELECT u.id AS user_id
FROM res_users u
LEFT JOIN gamification_goal g
ON (u.id = g.user_id)
WHERE line_id = %s
{date_clause}
""".format(date_clause=date_clause)
cr.execute(query, query_params)
user_with_goal_ids = cr.dictfetchall()
participant_user_ids = [user.id for user in challenge.user_ids]
user_without_goal_ids = list(set(participant_user_ids) - set([user['user_id'] for user in user_with_goal_ids]))
user_squating_challenge_ids = list(set([user['user_id'] for user in user_with_goal_ids]) - set(participant_user_ids))
if user_squating_challenge_ids:
# users that used to match the challenge
goal_to_remove_ids = goal_obj.search(cr, uid, [('challenge_id', '=', challenge.id), ('user_id', 'in', user_squating_challenge_ids)], context=context)
goal_obj.unlink(cr, uid, goal_to_remove_ids, context=context)
values = {
'definition_id': line.definition_id.id,
'line_id': line.id,
'target_goal': line.target_goal,
'state': 'inprogress',
}
if start_date:
values['start_date'] = start_date
if end_date:
values['end_date'] = end_date
# the goal is initialised over the limit to make sure we will compute it at least once
if line.condition == 'higher':
values['current'] = line.target_goal - 1
else:
values['current'] = line.target_goal + 1
if challenge.remind_update_delay:
values['remind_update_delay'] = challenge.remind_update_delay
for user_id in user_without_goal_ids:
values.update({'user_id': user_id})
goal_id = goal_obj.create(cr, uid, values, context=context)
to_update.append(goal_id)
goal_obj.update(cr, uid, to_update, context=context)
return True
##### JS utilities #####
def _get_serialized_challenge_lines(self, cr, uid, challenge, user_id=False, restrict_goal_ids=False, restrict_top=False, context=None):
"""Return a serialised version of the goals information if the user has not completed every goal
:challenge: browse record of challenge to compute
:user_id: res.users id of the user retrieving progress (False if no distinction, only for ranking challenges)
:restrict_goal_ids: <list(int)> compute only the results for this subset if gamification.goal ids, if False retrieve every goal of current running challenge
:restrict_top: <int> for challenge lines where visibility_mode == 'ranking', retrieve only these bests results and itself, if False retrieve all
restrict_goal_ids has priority over restrict_top
format list
# if visibility_mode == 'ranking'
{
'name': <gamification.goal.description name>,
'description': <gamification.goal.description description>,
'condition': <reach condition {lower,higher}>,
'computation_mode': <target computation {manually,count,sum,python}>,
'monetary': <{True,False}>,
'suffix': <value suffix>,
'action': <{True,False}>,
'display_mode': <{progress,boolean}>,
'target': <challenge line target>,
'own_goal_id': <gamification.goal id where user_id == uid>,
'goals': [
{
'id': <gamification.goal id>,
'rank': <user ranking>,
'user_id': <res.users id>,
'name': <res.users name>,
'state': <gamification.goal state {draft,inprogress,reached,failed,canceled}>,
'completeness': <percentage>,
'current': <current value>,
}
]
},
# if visibility_mode == 'personal'
{
'id': <gamification.goal id>,
'name': <gamification.goal.description name>,
'description': <gamification.goal.description description>,
'condition': <reach condition {lower,higher}>,
'computation_mode': <target computation {manually,count,sum,python}>,
'monetary': <{True,False}>,
'suffix': <value suffix>,
'action': <{True,False}>,
'display_mode': <{progress,boolean}>,
'target': <challenge line target>,
'state': <gamification.goal state {draft,inprogress,reached,failed,canceled}>,
'completeness': <percentage>,
'current': <current value>,
}
"""
goal_obj = self.pool.get('gamification.goal')
(start_date, end_date) = start_end_date_for_period(challenge.period)
res_lines = []
all_reached = True
for line in challenge.line_ids:
line_data = {
'name': line.definition_id.name,
'description': line.definition_id.description,
'condition': line.definition_id.condition,
'computation_mode': line.definition_id.computation_mode,
'monetary': line.definition_id.monetary,
'suffix': line.definition_id.suffix,
'action': True if line.definition_id.action_id else False,
'display_mode': line.definition_id.display_mode,
'target': line.target_goal,
}
domain = [
('line_id', '=', line.id),
('state', '!=', 'draft'),
]
if restrict_goal_ids:
domain.append(('ids', 'in', restrict_goal_ids))
else:
# if no subset goals, use the dates for restriction
if start_date:
domain.append(('start_date', '=', start_date))
if end_date:
domain.append(('end_date', '=', end_date))
if challenge.visibility_mode == 'personal':
if not user_id:
raise osv.except_osv(_('Error!'),_("Retrieving progress for personal challenge without user information"))
domain.append(('user_id', '=', user_id))
sorting = goal_obj._order
limit = 1
else:
line_data.update({
'own_goal_id': False,
'goals': [],
})
sorting = "completeness desc, current desc"
limit = False
goal_ids = goal_obj.search(cr, uid, domain, order=sorting, limit=limit, context=context)
ranking = 0
for goal in goal_obj.browse(cr, uid, goal_ids, context=context):
if challenge.visibility_mode == 'personal':
# limit=1 so only one result
line_data.update({
'id': goal.id,
'current': goal.current,
'completeness': goal.completeness,
'state': goal.state,
})
if goal.state != 'reached':
all_reached = False
else:
ranking += 1
if user_id and goal.user_id.id == user_id:
line_data['own_goal_id'] = goal.id
elif restrict_top and ranking > restrict_top:
# not own goal and too low to be in top
continue
line_data['goals'].append({
'id': goal.id,
'user_id': goal.user_id.id,
'name': goal.user_id.name,
'rank': ranking,
'current': goal.current,
'completeness': goal.completeness,
'state': goal.state,
})
if goal.state != 'reached':
all_reached = False
if goal_ids:
res_lines.append(line_data)
if all_reached:
return []
return res_lines
##### Reporting #####
def report_progress(self, cr, uid, challenge, context=None, users=False, subset_goal_ids=False):
"""Post report about the progress of the goals
:param challenge: the challenge object that need to be reported
:param users: the list(res.users) of users that are concerned by
the report. If False, will send the report to every user concerned
(goal users and group that receive a copy). Only used for challenge with
a visibility mode set to 'personal'.
:param goal_ids: the list(int) of goal ids linked to the challenge for
the report. If not specified, use the goals for the current challenge
period. This parameter can be used to produce report for previous challenge
periods.
:param subset_goal_ids: a list(int) of goal ids to restrict the report
"""
if context is None:
context = {}
temp_obj = self.pool.get('email.template')
ctx = context.copy()
if challenge.visibility_mode == 'ranking':
lines_boards = self._get_serialized_challenge_lines(cr, uid, challenge, user_id=False, restrict_goal_ids=subset_goal_ids, restrict_top=False, context=context)
ctx.update({'challenge_lines': lines_boards})
body_html = temp_obj.render_template(cr, uid, challenge.report_template_id.body_html, 'gamification.challenge', challenge.id, context=ctx)
# send to every follower and participant of the challenge
self.message_post(cr, uid, challenge.id,
body=body_html,
partner_ids=[user.partner_id.id for user in challenge.user_ids],
context=context,
subtype='mail.mt_comment')
if challenge.report_message_group_id:
self.pool.get('mail.group').message_post(cr, uid, challenge.report_message_group_id.id,
body=body_html,
context=context,
subtype='mail.mt_comment')
else:
# generate individual reports
for user in users or challenge.user_ids:
goals = self._get_serialized_challenge_lines(cr, uid, challenge, user.id, restrict_goal_ids=subset_goal_ids, context=context)
if not goals:
continue
ctx.update({'challenge_lines': goals})
body_html = temp_obj.render_template(cr, user.id, challenge.report_template_id.body_html, 'gamification.challenge', challenge.id, context=ctx)
# send message only to users, not on the challenge
self.message_post(cr, uid, 0,
body=body_html,
partner_ids=[(4, user.partner_id.id)],
context=context,
subtype='mail.mt_comment')
if challenge.report_message_group_id:
self.pool.get('mail.group').message_post(cr, uid, challenge.report_message_group_id.id,
body=body_html,
context=context,
subtype='mail.mt_comment')
return self.write(cr, uid, challenge.id, {'last_report_date': fields.date.today()}, context=context)
##### Challenges #####
# TODO in trunk, remove unused parameter user_id
def accept_challenge(self, cr, uid, challenge_ids, context=None, user_id=None):
"""The user accept the suggested challenge"""
return self._accept_challenge(cr, uid, uid, challenge_ids, context=context)
def _accept_challenge(self, cr, uid, user_id, challenge_ids, context=None):
user = self.pool.get('res.users').browse(cr, uid, user_id, context=context)
message = "%s has joined the challenge" % user.name
self.message_post(cr, SUPERUSER_ID, challenge_ids, body=message, context=context)
self.write(cr, SUPERUSER_ID, challenge_ids, {'invited_user_ids': [(3, user_id)], 'user_ids': [(4, user_id)]}, context=context)
return self._generate_goals_from_challenge(cr, SUPERUSER_ID, challenge_ids, context=context)
# TODO in trunk, remove unused parameter user_id
def discard_challenge(self, cr, uid, challenge_ids, context=None, user_id=None):
"""The user discard the suggested challenge"""
return self._discard_challenge(cr, uid, uid, challenge_ids, context=context)
def _discard_challenge(self, cr, uid, user_id, challenge_ids, context=None):
user = self.pool.get('res.users').browse(cr, uid, user_id, context=context)
message = "%s has refused the challenge" % user.name
self.message_post(cr, SUPERUSER_ID, challenge_ids, body=message, context=context)
return self.write(cr, SUPERUSER_ID, challenge_ids, {'invited_user_ids': (3, user_id)}, context=context)
def reply_challenge_wizard(self, cr, uid, challenge_id, context=None):
result = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'gamification', 'challenge_wizard')
id = result and result[1] or False
result = self.pool.get('ir.actions.act_window').read(cr, uid, [id], context=context)[0]
result['res_id'] = challenge_id
return result
def check_challenge_reward(self, cr, uid, ids, force=False, context=None):
"""Actions for the end of a challenge
If a reward was selected, grant it to the correct users.
Rewards granted at:
- the end date for a challenge with no periodicity
- the end of a period for challenge with periodicity
- when a challenge is manually closed
(if no end date, a running challenge is never rewarded)
"""
if isinstance(ids, (int,long)):
ids = [ids]
for challenge in self.browse(cr, uid, ids, context=context):
(start_date, end_date) = start_end_date_for_period(challenge.period, challenge.start_date, challenge.end_date)
yesterday = date.today() - timedelta(days=1)
rewarded_users = []
challenge_ended = end_date == yesterday.strftime(DF) or force
if challenge.reward_id and (challenge_ended or challenge.reward_realtime):
# not using start_date as intemportal goals have a start date but no end_date
reached_goals = self.pool.get('gamification.goal').read_group(cr, uid, [
('challenge_id', '=', challenge.id),
('end_date', '=', end_date),
('state', '=', 'reached')
], fields=['user_id'], groupby=['user_id'], context=context)
for reach_goals_user in reached_goals:
if reach_goals_user['user_id_count'] == len(challenge.line_ids):
# the user has succeeded every assigned goal
user_id = reach_goals_user['user_id'][0]
if challenge.reward_realtime:
badges = self.pool['gamification.badge.user'].search(cr, uid, [
('challenge_id', '=', challenge.id),
('badge_id', '=', challenge.reward_id.id),
('user_id', '=', user_id),
], count=True, context=context)
if badges > 0:
# has already recieved the badge for this challenge
continue
self.reward_user(cr, uid, user_id, challenge.reward_id.id, challenge.id, context=context)
rewarded_users.append(user_id)
if challenge_ended:
# open chatter message
message_body = _("The challenge %s is finished." % challenge.name)
if rewarded_users:
user_names = self.pool['res.users'].name_get(cr, uid, rewarded_users, context=context)
message_body += _("<br/>Reward (badge %s) for every succeeding user was sent to %s." % (challenge.reward_id.name, ", ".join([name for (user_id, name) in user_names])))
else:
message_body += _("<br/>Nobody has succeeded to reach every goal, no badge is rewared for this challenge.")
# reward bests
if challenge.reward_first_id:
(first_user, second_user, third_user) = self.get_top3_users(cr, uid, challenge, context=context)
if first_user:
self.reward_user(cr, uid, first_user.id, challenge.reward_first_id.id, challenge.id, context=context)
message_body += _("<br/>Special rewards were sent to the top competing users. The ranking for this challenge is :")
message_body += "<br/> 1. %s - %s" % (first_user.name, challenge.reward_first_id.name)
else:
message_body += _("Nobody reached the required conditions to receive special badges.")
if second_user and challenge.reward_second_id:
self.reward_user(cr, uid, second_user.id, challenge.reward_second_id.id, challenge.id, context=context)
message_body += "<br/> 2. %s - %s" % (second_user.name, challenge.reward_second_id.name)
if third_user and challenge.reward_third_id:
self.reward_user(cr, uid, third_user.id, challenge.reward_second_id.id, challenge.id, context=context)
message_body += "<br/> 3. %s - %s" % (third_user.name, challenge.reward_third_id.name)
self.message_post(cr, uid, challenge.id,
partner_ids=[user.partner_id.id for user in challenge.user_ids],
body=message_body,
context=context)
return True
def get_top3_users(self, cr, uid, challenge, context=None):
"""Get the top 3 users for a defined challenge
Ranking criterias:
1. succeed every goal of the challenge
2. total completeness of each goal (can be over 100)
Top 3 is computed only for users succeeding every goal of the challenge,
except if reward_failure is True, in which case every user is
considered.
:return: ('first', 'second', 'third'), tuple containing the res.users
objects of the top 3 users. If no user meets the criterias for a rank,
it is set to False. Nobody can receive a rank is noone receives the
higher one (eg: if 'second' == False, 'third' will be False)
"""
goal_obj = self.pool.get('gamification.goal')
(start_date, end_date) = start_end_date_for_period(challenge.period, challenge.start_date, challenge.end_date)
challengers = []
for user in challenge.user_ids:
all_reached = True
total_completness = 0
# every goal of the user for the running period
goal_ids = goal_obj.search(cr, uid, [
('challenge_id', '=', challenge.id),
('user_id', '=', user.id),
('start_date', '=', start_date),
('end_date', '=', end_date)
], context=context)
for goal in goal_obj.browse(cr, uid, goal_ids, context=context):
if goal.state != 'reached':
all_reached = False
if goal.definition_condition == 'higher':
# can be over 100
total_completness += 100.0 * goal.current / goal.target_goal
elif goal.state == 'reached':
# for lower goals, can not get percentage so 0 or 100
total_completness += 100
challengers.append({'user': user, 'all_reached': all_reached, 'total_completness': total_completness})
sorted_challengers = sorted(challengers, key=lambda k: (k['all_reached'], k['total_completness']), reverse=True)
if len(sorted_challengers) == 0 or (not challenge.reward_failure and not sorted_challengers[0]['all_reached']):
# nobody succeeded
return (False, False, False)
if len(sorted_challengers) == 1 or (not challenge.reward_failure and not sorted_challengers[1]['all_reached']):
# only one user succeeded
return (sorted_challengers[0]['user'], False, False)
if len(sorted_challengers) == 2 or (not challenge.reward_failure and not sorted_challengers[2]['all_reached']):
# only one user succeeded
return (sorted_challengers[0]['user'], sorted_challengers[1]['user'], False)
return (sorted_challengers[0]['user'], sorted_challengers[1]['user'], sorted_challengers[2]['user'])
def reward_user(self, cr, uid, user_id, badge_id, challenge_id=False, context=None):
"""Create a badge user and send the badge to him
:param user_id: the user to reward
:param badge_id: the concerned badge
"""
badge_user_obj = self.pool.get('gamification.badge.user')
user_badge_id = badge_user_obj.create(cr, uid, {'user_id': user_id, 'badge_id': badge_id, 'challenge_id':challenge_id}, context=context)
return badge_user_obj._send_badge(cr, uid, [user_badge_id], context=context)
class gamification_challenge_line(osv.Model):
"""Gamification challenge line
Predifined goal for 'gamification_challenge'
These are generic list of goals with only the target goal defined
Should only be created for the gamification_challenge object
"""
_name = 'gamification.challenge.line'
_description = 'Gamification generic goal for challenge'
_order = "sequence, id"
def on_change_definition_id(self, cr, uid, ids, definition_id=False, context=None):
goal_definition = self.pool.get('gamification.goal.definition')
if not definition_id:
return {'value': {'definition_id': False}}
goal_definition = goal_definition.browse(cr, uid, definition_id, context=context)
ret = {
'value': {
'condition': goal_definition.condition,
'definition_full_suffix': goal_definition.full_suffix
}
}
return ret
_columns = {
'name': fields.related('definition_id', 'name', string="Name", type="char"),
'challenge_id': fields.many2one('gamification.challenge',
string='Challenge',
required=True,
ondelete="cascade"),
'definition_id': fields.many2one('gamification.goal.definition',
string='Goal Definition',
required=True,
ondelete="cascade"),
'target_goal': fields.float('Target Value to Reach',
required=True),
'sequence': fields.integer('Sequence',
help='Sequence number for ordering'),
'condition': fields.related('definition_id', 'condition', type="selection",
readonly=True, string="Condition", selection=[('lower', '<='), ('higher', '>=')]),
'definition_suffix': fields.related('definition_id', 'suffix', type="char", readonly=True, string="Unit"),
'definition_monetary': fields.related('definition_id', 'monetary', type="boolean", readonly=True, string="Monetary"),
'definition_full_suffix': fields.related('definition_id', 'full_suffix', type="char", readonly=True, string="Suffix"),
}
_default = {
'sequence': 1,
}
| agpl-3.0 | -7,284,109,915,812,551,000 | 47.650397 | 187 | 0.569072 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/models/__init__.py | 1 | 24972 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
try:
from .instance_view_status_py3 import InstanceViewStatus
from .sub_resource_py3 import SubResource
from .sku_py3 import Sku
from .availability_set_py3 import AvailabilitySet
from .virtual_machine_size_py3 import VirtualMachineSize
from .virtual_machine_extension_image_py3 import VirtualMachineExtensionImage
from .virtual_machine_image_resource_py3 import VirtualMachineImageResource
from .virtual_machine_extension_instance_view_py3 import VirtualMachineExtensionInstanceView
from .virtual_machine_extension_py3 import VirtualMachineExtension
from .purchase_plan_py3 import PurchasePlan
from .os_disk_image_py3 import OSDiskImage
from .data_disk_image_py3 import DataDiskImage
from .virtual_machine_image_py3 import VirtualMachineImage
from .usage_name_py3 import UsageName
from .usage_py3 import Usage
from .virtual_machine_capture_parameters_py3 import VirtualMachineCaptureParameters
from .virtual_machine_capture_result_py3 import VirtualMachineCaptureResult
from .plan_py3 import Plan
from .hardware_profile_py3 import HardwareProfile
from .image_reference_py3 import ImageReference
from .key_vault_secret_reference_py3 import KeyVaultSecretReference
from .key_vault_key_reference_py3 import KeyVaultKeyReference
from .disk_encryption_settings_py3 import DiskEncryptionSettings
from .virtual_hard_disk_py3 import VirtualHardDisk
from .managed_disk_parameters_py3 import ManagedDiskParameters
from .os_disk_py3 import OSDisk
from .data_disk_py3 import DataDisk
from .storage_profile_py3 import StorageProfile
from .additional_unattend_content_py3 import AdditionalUnattendContent
from .win_rm_listener_py3 import WinRMListener
from .win_rm_configuration_py3 import WinRMConfiguration
from .windows_configuration_py3 import WindowsConfiguration
from .ssh_public_key_py3 import SshPublicKey
from .ssh_configuration_py3 import SshConfiguration
from .linux_configuration_py3 import LinuxConfiguration
from .vault_certificate_py3 import VaultCertificate
from .vault_secret_group_py3 import VaultSecretGroup
from .os_profile_py3 import OSProfile
from .network_interface_reference_py3 import NetworkInterfaceReference
from .network_profile_py3 import NetworkProfile
from .boot_diagnostics_py3 import BootDiagnostics
from .diagnostics_profile_py3 import DiagnosticsProfile
from .virtual_machine_extension_handler_instance_view_py3 import VirtualMachineExtensionHandlerInstanceView
from .virtual_machine_agent_instance_view_py3 import VirtualMachineAgentInstanceView
from .disk_instance_view_py3 import DiskInstanceView
from .boot_diagnostics_instance_view_py3 import BootDiagnosticsInstanceView
from .virtual_machine_identity_py3 import VirtualMachineIdentity
from .maintenance_redeploy_status_py3 import MaintenanceRedeployStatus
from .virtual_machine_instance_view_py3 import VirtualMachineInstanceView
from .virtual_machine_py3 import VirtualMachine
from .rolling_upgrade_policy_py3 import RollingUpgradePolicy
from .upgrade_policy_py3 import UpgradePolicy
from .image_os_disk_py3 import ImageOSDisk
from .image_data_disk_py3 import ImageDataDisk
from .image_storage_profile_py3 import ImageStorageProfile
from .image_py3 import Image
from .virtual_machine_scale_set_identity_py3 import VirtualMachineScaleSetIdentity
from .resource_sku_capacity_py3 import ResourceSkuCapacity
from .resource_sku_costs_py3 import ResourceSkuCosts
from .resource_sku_capabilities_py3 import ResourceSkuCapabilities
from .resource_sku_restrictions_py3 import ResourceSkuRestrictions
from .resource_sku_py3 import ResourceSku
from .virtual_machine_scale_set_os_profile_py3 import VirtualMachineScaleSetOSProfile
from .virtual_machine_scale_set_update_os_profile_py3 import VirtualMachineScaleSetUpdateOSProfile
from .virtual_machine_scale_set_managed_disk_parameters_py3 import VirtualMachineScaleSetManagedDiskParameters
from .virtual_machine_scale_set_os_disk_py3 import VirtualMachineScaleSetOSDisk
from .virtual_machine_scale_set_update_os_disk_py3 import VirtualMachineScaleSetUpdateOSDisk
from .virtual_machine_scale_set_data_disk_py3 import VirtualMachineScaleSetDataDisk
from .virtual_machine_scale_set_storage_profile_py3 import VirtualMachineScaleSetStorageProfile
from .virtual_machine_scale_set_update_storage_profile_py3 import VirtualMachineScaleSetUpdateStorageProfile
from .api_entity_reference_py3 import ApiEntityReference
from .virtual_machine_scale_set_public_ip_address_configuration_dns_settings_py3 import VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings
from .virtual_machine_scale_set_public_ip_address_configuration_py3 import VirtualMachineScaleSetPublicIPAddressConfiguration
from .virtual_machine_scale_set_update_public_ip_address_configuration_py3 import VirtualMachineScaleSetUpdatePublicIPAddressConfiguration
from .virtual_machine_scale_set_ip_configuration_py3 import VirtualMachineScaleSetIPConfiguration
from .virtual_machine_scale_set_update_ip_configuration_py3 import VirtualMachineScaleSetUpdateIPConfiguration
from .virtual_machine_scale_set_network_configuration_dns_settings_py3 import VirtualMachineScaleSetNetworkConfigurationDnsSettings
from .virtual_machine_scale_set_network_configuration_py3 import VirtualMachineScaleSetNetworkConfiguration
from .virtual_machine_scale_set_update_network_configuration_py3 import VirtualMachineScaleSetUpdateNetworkConfiguration
from .virtual_machine_scale_set_network_profile_py3 import VirtualMachineScaleSetNetworkProfile
from .virtual_machine_scale_set_update_network_profile_py3 import VirtualMachineScaleSetUpdateNetworkProfile
from .virtual_machine_scale_set_extension_py3 import VirtualMachineScaleSetExtension
from .virtual_machine_scale_set_extension_profile_py3 import VirtualMachineScaleSetExtensionProfile
from .virtual_machine_scale_set_vm_profile_py3 import VirtualMachineScaleSetVMProfile
from .virtual_machine_scale_set_update_vm_profile_py3 import VirtualMachineScaleSetUpdateVMProfile
from .virtual_machine_scale_set_py3 import VirtualMachineScaleSet
from .virtual_machine_scale_set_update_py3 import VirtualMachineScaleSetUpdate
from .virtual_machine_scale_set_vm_instance_ids_py3 import VirtualMachineScaleSetVMInstanceIDs
from .virtual_machine_scale_set_vm_instance_required_ids_py3 import VirtualMachineScaleSetVMInstanceRequiredIDs
from .virtual_machine_status_code_count_py3 import VirtualMachineStatusCodeCount
from .virtual_machine_scale_set_instance_view_statuses_summary_py3 import VirtualMachineScaleSetInstanceViewStatusesSummary
from .virtual_machine_scale_set_vm_extensions_summary_py3 import VirtualMachineScaleSetVMExtensionsSummary
from .virtual_machine_scale_set_instance_view_py3 import VirtualMachineScaleSetInstanceView
from .virtual_machine_scale_set_sku_capacity_py3 import VirtualMachineScaleSetSkuCapacity
from .virtual_machine_scale_set_sku_py3 import VirtualMachineScaleSetSku
from .virtual_machine_scale_set_vm_py3 import VirtualMachineScaleSetVM
from .virtual_machine_health_status_py3 import VirtualMachineHealthStatus
from .virtual_machine_scale_set_vm_instance_view_py3 import VirtualMachineScaleSetVMInstanceView
from .rolling_upgrade_running_status_py3 import RollingUpgradeRunningStatus
from .rolling_upgrade_progress_info_py3 import RollingUpgradeProgressInfo
from .api_error_base_py3 import ApiErrorBase
from .inner_error_py3 import InnerError
from .api_error_py3 import ApiError
from .rolling_upgrade_status_info_py3 import RollingUpgradeStatusInfo
from .compute_long_running_operation_properties_py3 import ComputeLongRunningOperationProperties
from .resource_py3 import Resource
from .update_resource_py3 import UpdateResource
from .sub_resource_read_only_py3 import SubResourceReadOnly
from .operation_status_response_py3 import OperationStatusResponse
from .disk_sku_py3 import DiskSku
from .resource_update_py3 import ResourceUpdate
from .image_disk_reference_py3 import ImageDiskReference
from .creation_data_py3 import CreationData
from .source_vault_py3 import SourceVault
from .key_vault_and_secret_reference_py3 import KeyVaultAndSecretReference
from .key_vault_and_key_reference_py3 import KeyVaultAndKeyReference
from .encryption_settings_py3 import EncryptionSettings
from .disk_py3 import Disk
from .disk_update_py3 import DiskUpdate
from .grant_access_data_py3 import GrantAccessData
from .access_uri_py3 import AccessUri
from .snapshot_py3 import Snapshot
from .snapshot_update_py3 import SnapshotUpdate
from .run_command_input_parameter_py3 import RunCommandInputParameter
from .run_command_input_py3 import RunCommandInput
from .run_command_parameter_definition_py3 import RunCommandParameterDefinition
from .run_command_document_base_py3 import RunCommandDocumentBase
from .run_command_document_py3 import RunCommandDocument
from .run_command_result_py3 import RunCommandResult
except (SyntaxError, ImportError):
from .instance_view_status import InstanceViewStatus
from .sub_resource import SubResource
from .sku import Sku
from .availability_set import AvailabilitySet
from .virtual_machine_size import VirtualMachineSize
from .virtual_machine_extension_image import VirtualMachineExtensionImage
from .virtual_machine_image_resource import VirtualMachineImageResource
from .virtual_machine_extension_instance_view import VirtualMachineExtensionInstanceView
from .virtual_machine_extension import VirtualMachineExtension
from .purchase_plan import PurchasePlan
from .os_disk_image import OSDiskImage
from .data_disk_image import DataDiskImage
from .virtual_machine_image import VirtualMachineImage
from .usage_name import UsageName
from .usage import Usage
from .virtual_machine_capture_parameters import VirtualMachineCaptureParameters
from .virtual_machine_capture_result import VirtualMachineCaptureResult
from .plan import Plan
from .hardware_profile import HardwareProfile
from .image_reference import ImageReference
from .key_vault_secret_reference import KeyVaultSecretReference
from .key_vault_key_reference import KeyVaultKeyReference
from .disk_encryption_settings import DiskEncryptionSettings
from .virtual_hard_disk import VirtualHardDisk
from .managed_disk_parameters import ManagedDiskParameters
from .os_disk import OSDisk
from .data_disk import DataDisk
from .storage_profile import StorageProfile
from .additional_unattend_content import AdditionalUnattendContent
from .win_rm_listener import WinRMListener
from .win_rm_configuration import WinRMConfiguration
from .windows_configuration import WindowsConfiguration
from .ssh_public_key import SshPublicKey
from .ssh_configuration import SshConfiguration
from .linux_configuration import LinuxConfiguration
from .vault_certificate import VaultCertificate
from .vault_secret_group import VaultSecretGroup
from .os_profile import OSProfile
from .network_interface_reference import NetworkInterfaceReference
from .network_profile import NetworkProfile
from .boot_diagnostics import BootDiagnostics
from .diagnostics_profile import DiagnosticsProfile
from .virtual_machine_extension_handler_instance_view import VirtualMachineExtensionHandlerInstanceView
from .virtual_machine_agent_instance_view import VirtualMachineAgentInstanceView
from .disk_instance_view import DiskInstanceView
from .boot_diagnostics_instance_view import BootDiagnosticsInstanceView
from .virtual_machine_identity import VirtualMachineIdentity
from .maintenance_redeploy_status import MaintenanceRedeployStatus
from .virtual_machine_instance_view import VirtualMachineInstanceView
from .virtual_machine import VirtualMachine
from .rolling_upgrade_policy import RollingUpgradePolicy
from .upgrade_policy import UpgradePolicy
from .image_os_disk import ImageOSDisk
from .image_data_disk import ImageDataDisk
from .image_storage_profile import ImageStorageProfile
from .image import Image
from .virtual_machine_scale_set_identity import VirtualMachineScaleSetIdentity
from .resource_sku_capacity import ResourceSkuCapacity
from .resource_sku_costs import ResourceSkuCosts
from .resource_sku_capabilities import ResourceSkuCapabilities
from .resource_sku_restrictions import ResourceSkuRestrictions
from .resource_sku import ResourceSku
from .virtual_machine_scale_set_os_profile import VirtualMachineScaleSetOSProfile
from .virtual_machine_scale_set_update_os_profile import VirtualMachineScaleSetUpdateOSProfile
from .virtual_machine_scale_set_managed_disk_parameters import VirtualMachineScaleSetManagedDiskParameters
from .virtual_machine_scale_set_os_disk import VirtualMachineScaleSetOSDisk
from .virtual_machine_scale_set_update_os_disk import VirtualMachineScaleSetUpdateOSDisk
from .virtual_machine_scale_set_data_disk import VirtualMachineScaleSetDataDisk
from .virtual_machine_scale_set_storage_profile import VirtualMachineScaleSetStorageProfile
from .virtual_machine_scale_set_update_storage_profile import VirtualMachineScaleSetUpdateStorageProfile
from .api_entity_reference import ApiEntityReference
from .virtual_machine_scale_set_public_ip_address_configuration_dns_settings import VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings
from .virtual_machine_scale_set_public_ip_address_configuration import VirtualMachineScaleSetPublicIPAddressConfiguration
from .virtual_machine_scale_set_update_public_ip_address_configuration import VirtualMachineScaleSetUpdatePublicIPAddressConfiguration
from .virtual_machine_scale_set_ip_configuration import VirtualMachineScaleSetIPConfiguration
from .virtual_machine_scale_set_update_ip_configuration import VirtualMachineScaleSetUpdateIPConfiguration
from .virtual_machine_scale_set_network_configuration_dns_settings import VirtualMachineScaleSetNetworkConfigurationDnsSettings
from .virtual_machine_scale_set_network_configuration import VirtualMachineScaleSetNetworkConfiguration
from .virtual_machine_scale_set_update_network_configuration import VirtualMachineScaleSetUpdateNetworkConfiguration
from .virtual_machine_scale_set_network_profile import VirtualMachineScaleSetNetworkProfile
from .virtual_machine_scale_set_update_network_profile import VirtualMachineScaleSetUpdateNetworkProfile
from .virtual_machine_scale_set_extension import VirtualMachineScaleSetExtension
from .virtual_machine_scale_set_extension_profile import VirtualMachineScaleSetExtensionProfile
from .virtual_machine_scale_set_vm_profile import VirtualMachineScaleSetVMProfile
from .virtual_machine_scale_set_update_vm_profile import VirtualMachineScaleSetUpdateVMProfile
from .virtual_machine_scale_set import VirtualMachineScaleSet
from .virtual_machine_scale_set_update import VirtualMachineScaleSetUpdate
from .virtual_machine_scale_set_vm_instance_ids import VirtualMachineScaleSetVMInstanceIDs
from .virtual_machine_scale_set_vm_instance_required_ids import VirtualMachineScaleSetVMInstanceRequiredIDs
from .virtual_machine_status_code_count import VirtualMachineStatusCodeCount
from .virtual_machine_scale_set_instance_view_statuses_summary import VirtualMachineScaleSetInstanceViewStatusesSummary
from .virtual_machine_scale_set_vm_extensions_summary import VirtualMachineScaleSetVMExtensionsSummary
from .virtual_machine_scale_set_instance_view import VirtualMachineScaleSetInstanceView
from .virtual_machine_scale_set_sku_capacity import VirtualMachineScaleSetSkuCapacity
from .virtual_machine_scale_set_sku import VirtualMachineScaleSetSku
from .virtual_machine_scale_set_vm import VirtualMachineScaleSetVM
from .virtual_machine_health_status import VirtualMachineHealthStatus
from .virtual_machine_scale_set_vm_instance_view import VirtualMachineScaleSetVMInstanceView
from .rolling_upgrade_running_status import RollingUpgradeRunningStatus
from .rolling_upgrade_progress_info import RollingUpgradeProgressInfo
from .api_error_base import ApiErrorBase
from .inner_error import InnerError
from .api_error import ApiError
from .rolling_upgrade_status_info import RollingUpgradeStatusInfo
from .compute_long_running_operation_properties import ComputeLongRunningOperationProperties
from .resource import Resource
from .update_resource import UpdateResource
from .sub_resource_read_only import SubResourceReadOnly
from .operation_status_response import OperationStatusResponse
from .disk_sku import DiskSku
from .resource_update import ResourceUpdate
from .image_disk_reference import ImageDiskReference
from .creation_data import CreationData
from .source_vault import SourceVault
from .key_vault_and_secret_reference import KeyVaultAndSecretReference
from .key_vault_and_key_reference import KeyVaultAndKeyReference
from .encryption_settings import EncryptionSettings
from .disk import Disk
from .disk_update import DiskUpdate
from .grant_access_data import GrantAccessData
from .access_uri import AccessUri
from .snapshot import Snapshot
from .snapshot_update import SnapshotUpdate
from .run_command_input_parameter import RunCommandInputParameter
from .run_command_input import RunCommandInput
from .run_command_parameter_definition import RunCommandParameterDefinition
from .run_command_document_base import RunCommandDocumentBase
from .run_command_document import RunCommandDocument
from .run_command_result import RunCommandResult
from .availability_set_paged import AvailabilitySetPaged
from .virtual_machine_size_paged import VirtualMachineSizePaged
from .usage_paged import UsagePaged
from .image_paged import ImagePaged
from .resource_sku_paged import ResourceSkuPaged
from .virtual_machine_paged import VirtualMachinePaged
from .virtual_machine_scale_set_paged import VirtualMachineScaleSetPaged
from .virtual_machine_scale_set_sku_paged import VirtualMachineScaleSetSkuPaged
from .virtual_machine_scale_set_extension_paged import VirtualMachineScaleSetExtensionPaged
from .virtual_machine_scale_set_vm_paged import VirtualMachineScaleSetVMPaged
from .disk_paged import DiskPaged
from .snapshot_paged import SnapshotPaged
from .run_command_document_base_paged import RunCommandDocumentBasePaged
from .compute_management_client_enums import (
StatusLevelTypes,
OperatingSystemTypes,
VirtualMachineSizeTypes,
CachingTypes,
DiskCreateOptionTypes,
StorageAccountTypes,
PassNames,
ComponentNames,
SettingNames,
ProtocolTypes,
ResourceIdentityType,
MaintenanceOperationResultCodeTypes,
UpgradeMode,
OperatingSystemStateTypes,
ResourceSkuCapacityScaleType,
ResourceSkuRestrictionsType,
ResourceSkuRestrictionsReasonCode,
IPVersion,
VirtualMachineScaleSetSkuScaleType,
RollingUpgradeStatusCode,
RollingUpgradeActionType,
DiskCreateOption,
AccessLevel,
InstanceViewTypes,
)
__all__ = [
'InstanceViewStatus',
'SubResource',
'Sku',
'AvailabilitySet',
'VirtualMachineSize',
'VirtualMachineExtensionImage',
'VirtualMachineImageResource',
'VirtualMachineExtensionInstanceView',
'VirtualMachineExtension',
'PurchasePlan',
'OSDiskImage',
'DataDiskImage',
'VirtualMachineImage',
'UsageName',
'Usage',
'VirtualMachineCaptureParameters',
'VirtualMachineCaptureResult',
'Plan',
'HardwareProfile',
'ImageReference',
'KeyVaultSecretReference',
'KeyVaultKeyReference',
'DiskEncryptionSettings',
'VirtualHardDisk',
'ManagedDiskParameters',
'OSDisk',
'DataDisk',
'StorageProfile',
'AdditionalUnattendContent',
'WinRMListener',
'WinRMConfiguration',
'WindowsConfiguration',
'SshPublicKey',
'SshConfiguration',
'LinuxConfiguration',
'VaultCertificate',
'VaultSecretGroup',
'OSProfile',
'NetworkInterfaceReference',
'NetworkProfile',
'BootDiagnostics',
'DiagnosticsProfile',
'VirtualMachineExtensionHandlerInstanceView',
'VirtualMachineAgentInstanceView',
'DiskInstanceView',
'BootDiagnosticsInstanceView',
'VirtualMachineIdentity',
'MaintenanceRedeployStatus',
'VirtualMachineInstanceView',
'VirtualMachine',
'RollingUpgradePolicy',
'UpgradePolicy',
'ImageOSDisk',
'ImageDataDisk',
'ImageStorageProfile',
'Image',
'VirtualMachineScaleSetIdentity',
'ResourceSkuCapacity',
'ResourceSkuCosts',
'ResourceSkuCapabilities',
'ResourceSkuRestrictions',
'ResourceSku',
'VirtualMachineScaleSetOSProfile',
'VirtualMachineScaleSetUpdateOSProfile',
'VirtualMachineScaleSetManagedDiskParameters',
'VirtualMachineScaleSetOSDisk',
'VirtualMachineScaleSetUpdateOSDisk',
'VirtualMachineScaleSetDataDisk',
'VirtualMachineScaleSetStorageProfile',
'VirtualMachineScaleSetUpdateStorageProfile',
'ApiEntityReference',
'VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings',
'VirtualMachineScaleSetPublicIPAddressConfiguration',
'VirtualMachineScaleSetUpdatePublicIPAddressConfiguration',
'VirtualMachineScaleSetIPConfiguration',
'VirtualMachineScaleSetUpdateIPConfiguration',
'VirtualMachineScaleSetNetworkConfigurationDnsSettings',
'VirtualMachineScaleSetNetworkConfiguration',
'VirtualMachineScaleSetUpdateNetworkConfiguration',
'VirtualMachineScaleSetNetworkProfile',
'VirtualMachineScaleSetUpdateNetworkProfile',
'VirtualMachineScaleSetExtension',
'VirtualMachineScaleSetExtensionProfile',
'VirtualMachineScaleSetVMProfile',
'VirtualMachineScaleSetUpdateVMProfile',
'VirtualMachineScaleSet',
'VirtualMachineScaleSetUpdate',
'VirtualMachineScaleSetVMInstanceIDs',
'VirtualMachineScaleSetVMInstanceRequiredIDs',
'VirtualMachineStatusCodeCount',
'VirtualMachineScaleSetInstanceViewStatusesSummary',
'VirtualMachineScaleSetVMExtensionsSummary',
'VirtualMachineScaleSetInstanceView',
'VirtualMachineScaleSetSkuCapacity',
'VirtualMachineScaleSetSku',
'VirtualMachineScaleSetVM',
'VirtualMachineHealthStatus',
'VirtualMachineScaleSetVMInstanceView',
'RollingUpgradeRunningStatus',
'RollingUpgradeProgressInfo',
'ApiErrorBase',
'InnerError',
'ApiError',
'RollingUpgradeStatusInfo',
'ComputeLongRunningOperationProperties',
'Resource',
'UpdateResource',
'SubResourceReadOnly',
'OperationStatusResponse',
'DiskSku',
'ResourceUpdate',
'ImageDiskReference',
'CreationData',
'SourceVault',
'KeyVaultAndSecretReference',
'KeyVaultAndKeyReference',
'EncryptionSettings',
'Disk',
'DiskUpdate',
'GrantAccessData',
'AccessUri',
'Snapshot',
'SnapshotUpdate',
'RunCommandInputParameter',
'RunCommandInput',
'RunCommandParameterDefinition',
'RunCommandDocumentBase',
'RunCommandDocument',
'RunCommandResult',
'AvailabilitySetPaged',
'VirtualMachineSizePaged',
'UsagePaged',
'ImagePaged',
'ResourceSkuPaged',
'VirtualMachinePaged',
'VirtualMachineScaleSetPaged',
'VirtualMachineScaleSetSkuPaged',
'VirtualMachineScaleSetExtensionPaged',
'VirtualMachineScaleSetVMPaged',
'DiskPaged',
'SnapshotPaged',
'RunCommandDocumentBasePaged',
'StatusLevelTypes',
'OperatingSystemTypes',
'VirtualMachineSizeTypes',
'CachingTypes',
'DiskCreateOptionTypes',
'StorageAccountTypes',
'PassNames',
'ComponentNames',
'SettingNames',
'ProtocolTypes',
'ResourceIdentityType',
'MaintenanceOperationResultCodeTypes',
'UpgradeMode',
'OperatingSystemStateTypes',
'ResourceSkuCapacityScaleType',
'ResourceSkuRestrictionsType',
'ResourceSkuRestrictionsReasonCode',
'IPVersion',
'VirtualMachineScaleSetSkuScaleType',
'RollingUpgradeStatusCode',
'RollingUpgradeActionType',
'DiskCreateOption',
'AccessLevel',
'InstanceViewTypes',
]
| mit | -4,941,298,559,367,331,000 | 51.133612 | 153 | 0.799215 | false |
jules185/IoT_Hackathon | .homeassistant/deps/aiohttp_cors/resource_options.py | 3 | 5931 | # Copyright 2015 Vladimir Rutsky <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource CORS options class definition.
"""
import numbers
import collections
import collections.abc
__all__ = ("ResourceOptions",)
def _is_proper_sequence(seq):
"""Returns is seq is sequence and not string."""
return (isinstance(seq, collections.abc.Sequence) and
not isinstance(seq, str))
class ResourceOptions(collections.namedtuple(
"Base",
("allow_credentials", "expose_headers", "allow_headers", "max_age",
"allow_methods"))):
"""Resource CORS options."""
__slots__ = ()
def __init__(self, *, allow_credentials=False, expose_headers=(),
allow_headers=(), max_age=None, allow_methods=None):
"""Construct resource CORS options.
Options will be normalized.
:param allow_credentials:
Is passing client credentials to the resource from other origin
is allowed.
See <http://www.w3.org/TR/cors/#user-credentials> for
the definition.
:type allow_credentials: bool
Is passing client credentials to the resource from other origin
is allowed.
:param expose_headers:
Server headers that are allowed to be exposed to the client.
Simple response headers are excluded from this set, see
<http://www.w3.org/TR/cors/#list-of-exposed-headers>.
:type expose_headers: sequence of strings or ``*`` string.
:param allow_headers:
Client headers that are allowed to be passed to the resource.
See <http://www.w3.org/TR/cors/#list-of-headers>.
:type allow_headers: sequence of strings or ``*`` string.
:param max_age:
How long the results of a preflight request can be cached in a
preflight result cache (in seconds).
See <http://www.w3.org/TR/cors/#http-access-control-max-age>.
:param allow_methods:
List of allowed methods or ``*``string. Can be used in resource or
global defaults, but not in specific route.
It's not required to specify all allowed methods for specific
resource, routes that have explicit CORS configuration will be
treated as if their methods are allowed.
"""
super().__init__()
def __new__(cls, *, allow_credentials=False, expose_headers=(),
allow_headers=(), max_age=None, allow_methods=None):
"""Normalize source parameters and store them in namedtuple."""
if not isinstance(allow_credentials, bool):
raise ValueError(
"'allow_credentials' must be boolean, "
"got '{!r}'".format(allow_credentials))
_allow_credentials = allow_credentials
# `expose_headers` is either "*", or sequence of strings.
if expose_headers == "*":
_expose_headers = expose_headers
elif not _is_proper_sequence(expose_headers):
raise ValueError(
"'expose_headers' must be either '*', or sequence of strings, "
"got '{!r}'".format(expose_headers))
elif expose_headers:
# "Access-Control-Expose-Headers" ":" #field-name
# TODO: Check that headers are valid.
# TODO: Remove headers that in the _SIMPLE_RESPONSE_HEADERS set
# according to
# <http://www.w3.org/TR/cors/#list-of-exposed-headers>.
_expose_headers = frozenset(expose_headers)
else:
# No headers exposed.
_expose_headers = frozenset()
# `allow_headers` is either "*", or set of headers in upper case.
if allow_headers == "*":
_allow_headers = allow_headers
elif not _is_proper_sequence(allow_headers):
raise ValueError(
"'allow_headers' must be either '*', or sequence of strings, "
"got '{!r}'".format(allow_headers))
else:
# TODO: Check that headers are valid.
_allow_headers = frozenset(h.upper() for h in allow_headers)
if max_age is None:
_max_age = None
else:
if not isinstance(max_age, numbers.Integral) or max_age < 0:
raise ValueError(
"'max_age' must be non-negative integer, "
"got '{!r}'".format(max_age))
_max_age = max_age
if allow_methods is None or allow_methods == "*":
_allow_methods = allow_methods
elif not _is_proper_sequence(allow_methods):
raise ValueError(
"'allow_methods' must be either '*', or sequence of strings, "
"got '{!r}'".format(allow_methods))
else:
# TODO: Check that methods are valid.
_allow_methods = frozenset(m.upper() for m in allow_methods)
return super().__new__(
cls,
allow_credentials=_allow_credentials,
expose_headers=_expose_headers,
allow_headers=_allow_headers,
max_age=_max_age,
allow_methods=_allow_methods)
def is_method_allowed(self, method):
if self.allow_methods is None:
return False
if self.allow_methods == '*':
return True
return method.upper() in self.allow_methods
| mit | 6,539,294,440,362,301,000 | 37.764706 | 79 | 0.59619 | false |
wolverineav/horizon | openstack_dashboard/test/integration_tests/tests/test_projects.py | 8 | 3093 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.regions import messages
PROJECT_NAME = helpers.gen_random_resource_name("project")
class TestCreateDeleteProject(helpers.AdminTestCase):
def setUp(self):
super(TestCreateDeleteProject, self).setUp()
self.projects_page = self.home_pg.go_to_identity_projectspage()
def test_create_delete_project(self):
self.projects_page.create_project(PROJECT_NAME)
self.assertTrue(
self.projects_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
self.projects_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(self.projects_page.is_project_present(PROJECT_NAME))
self.projects_page.delete_project(PROJECT_NAME)
self.assertTrue(
self.projects_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
self.projects_page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(self.projects_page.is_project_present(PROJECT_NAME))
class TestModifyProject(helpers.AdminTestCase):
def setUp(self):
super(TestModifyProject, self).setUp()
self.projects_page = self.home_pg.go_to_identity_projectspage()
self.projects_page.create_project(PROJECT_NAME)
self.assertTrue(
self.projects_page.find_message_and_dismiss(messages.SUCCESS))
def cleanup():
if not self.projects_page.is_the_current_page():
self.home_pg.go_to_identity_projectspage()
self.projects_page.delete_project(PROJECT_NAME)
self.addCleanup(cleanup)
def test_add_member(self):
admin_name = self.CONFIG.identity.admin_username
regular_role_name = self.CONFIG.identity.default_keystone_role
admin_role_name = self.CONFIG.identity.default_keystone_admin_role
roles2add = {regular_role_name, admin_role_name}
self.projects_page.allocate_user_to_project(
admin_name, roles2add, PROJECT_NAME)
self.assertTrue(
self.projects_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
self.projects_page.find_message_and_dismiss(messages.ERROR))
user_roles = self.projects_page.get_user_roles_at_project(
admin_name, PROJECT_NAME)
self.assertEqual(roles2add, user_roles,
"The requested roles haven't been set for the user!")
| apache-2.0 | 1,711,310,178,869,288,000 | 40.797297 | 78 | 0.690915 | false |
sharad/calibre | src/calibre/ebooks/pdf/render/graphics.py | 1 | 14463 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from math import sqrt
from collections import namedtuple
from PyQt5.Qt import (
QBrush, QPen, Qt, QPointF, QTransform, QPaintEngine, QImage)
from calibre.ebooks.pdf.render.common import (
Name, Array, fmtnum, Stream, Dictionary)
from calibre.ebooks.pdf.render.serialize import Path
from calibre.ebooks.pdf.render.gradients import LinearGradientPattern
def convert_path(path): # {{{
p = Path()
i = 0
while i < path.elementCount():
elem = path.elementAt(i)
em = (elem.x, elem.y)
i += 1
if elem.isMoveTo():
p.move_to(*em)
elif elem.isLineTo():
p.line_to(*em)
elif elem.isCurveTo():
added = False
if path.elementCount() > i+1:
c1, c2 = path.elementAt(i), path.elementAt(i+1)
if (c1.type == path.CurveToDataElement and c2.type ==
path.CurveToDataElement):
i += 2
p.curve_to(em[0], em[1], c1.x, c1.y, c2.x, c2.y)
added = True
if not added:
raise ValueError('Invalid curve to operation')
return p
# }}}
Brush = namedtuple('Brush', 'origin brush color')
class TilingPattern(Stream):
def __init__(self, cache_key, matrix, w=8, h=8, paint_type=2, compress=False):
Stream.__init__(self, compress=compress)
self.paint_type = paint_type
self.w, self.h = w, h
self.matrix = (matrix.m11(), matrix.m12(), matrix.m21(), matrix.m22(),
matrix.dx(), matrix.dy())
self.resources = Dictionary()
self.cache_key = (self.__class__.__name__, cache_key, self.matrix)
def add_extra_keys(self, d):
d['Type'] = Name('Pattern')
d['PatternType'] = 1
d['PaintType'] = self.paint_type
d['TilingType'] = 1
d['BBox'] = Array([0, 0, self.w, self.h])
d['XStep'] = self.w
d['YStep'] = self.h
d['Matrix'] = Array(self.matrix)
d['Resources'] = self.resources
class QtPattern(TilingPattern):
qt_patterns = ( # {{{
"0 J\n"
"6 w\n"
"[] 0 d\n"
"4 0 m\n"
"4 8 l\n"
"0 4 m\n"
"8 4 l\n"
"S\n", # Dense1Pattern
"0 J\n"
"2 w\n"
"[6 2] 1 d\n"
"0 0 m\n"
"0 8 l\n"
"8 0 m\n"
"8 8 l\n"
"S\n"
"[] 0 d\n"
"2 0 m\n"
"2 8 l\n"
"6 0 m\n"
"6 8 l\n"
"S\n"
"[6 2] -3 d\n"
"4 0 m\n"
"4 8 l\n"
"S\n", # Dense2Pattern
"0 J\n"
"2 w\n"
"[6 2] 1 d\n"
"0 0 m\n"
"0 8 l\n"
"8 0 m\n"
"8 8 l\n"
"S\n"
"[2 2] -1 d\n"
"2 0 m\n"
"2 8 l\n"
"6 0 m\n"
"6 8 l\n"
"S\n"
"[6 2] -3 d\n"
"4 0 m\n"
"4 8 l\n"
"S\n", # Dense3Pattern
"0 J\n"
"2 w\n"
"[2 2] 1 d\n"
"0 0 m\n"
"0 8 l\n"
"8 0 m\n"
"8 8 l\n"
"S\n"
"[2 2] -1 d\n"
"2 0 m\n"
"2 8 l\n"
"6 0 m\n"
"6 8 l\n"
"S\n"
"[2 2] 1 d\n"
"4 0 m\n"
"4 8 l\n"
"S\n", # Dense4Pattern
"0 J\n"
"2 w\n"
"[2 6] -1 d\n"
"0 0 m\n"
"0 8 l\n"
"8 0 m\n"
"8 8 l\n"
"S\n"
"[2 2] 1 d\n"
"2 0 m\n"
"2 8 l\n"
"6 0 m\n"
"6 8 l\n"
"S\n"
"[2 6] 3 d\n"
"4 0 m\n"
"4 8 l\n"
"S\n", # Dense5Pattern
"0 J\n"
"2 w\n"
"[2 6] -1 d\n"
"0 0 m\n"
"0 8 l\n"
"8 0 m\n"
"8 8 l\n"
"S\n"
"[2 6] 3 d\n"
"4 0 m\n"
"4 8 l\n"
"S\n", # Dense6Pattern
"0 J\n"
"2 w\n"
"[2 6] -1 d\n"
"0 0 m\n"
"0 8 l\n"
"8 0 m\n"
"8 8 l\n"
"S\n", # Dense7Pattern
"1 w\n"
"0 4 m\n"
"8 4 l\n"
"S\n", # HorPattern
"1 w\n"
"4 0 m\n"
"4 8 l\n"
"S\n", # VerPattern
"1 w\n"
"4 0 m\n"
"4 8 l\n"
"0 4 m\n"
"8 4 l\n"
"S\n", # CrossPattern
"1 w\n"
"-1 5 m\n"
"5 -1 l\n"
"3 9 m\n"
"9 3 l\n"
"S\n", # BDiagPattern
"1 w\n"
"-1 3 m\n"
"5 9 l\n"
"3 -1 m\n"
"9 5 l\n"
"S\n", # FDiagPattern
"1 w\n"
"-1 3 m\n"
"5 9 l\n"
"3 -1 m\n"
"9 5 l\n"
"-1 5 m\n"
"5 -1 l\n"
"3 9 m\n"
"9 3 l\n"
"S\n", # DiagCrossPattern
) # }}}
def __init__(self, pattern_num, matrix):
super(QtPattern, self).__init__(pattern_num, matrix)
self.write(self.qt_patterns[pattern_num-2])
class TexturePattern(TilingPattern):
def __init__(self, pixmap, matrix, pdf, clone=None):
if clone is None:
image = pixmap.toImage()
cache_key = pixmap.cacheKey()
imgref = pdf.add_image(image, cache_key)
paint_type = (2 if image.format() in {QImage.Format_MonoLSB,
QImage.Format_Mono} else 1)
super(TexturePattern, self).__init__(
cache_key, matrix, w=image.width(), h=image.height(),
paint_type=paint_type)
m = (self.w, 0, 0, -self.h, 0, self.h)
self.resources['XObject'] = Dictionary({'Texture':imgref})
self.write_line('%s cm /Texture Do'%(' '.join(map(fmtnum, m))))
else:
super(TexturePattern, self).__init__(
clone.cache_key[1], matrix, w=clone.w, h=clone.h,
paint_type=clone.paint_type)
self.resources['XObject'] = Dictionary(clone.resources['XObject'])
self.write(clone.getvalue())
class GraphicsState(object):
FIELDS = ('fill', 'stroke', 'opacity', 'transform', 'brush_origin',
'clip_updated', 'do_fill', 'do_stroke')
def __init__(self):
self.fill = QBrush(Qt.white)
self.stroke = QPen()
self.opacity = 1.0
self.transform = QTransform()
self.brush_origin = QPointF()
self.clip_updated = False
self.do_fill = False
self.do_stroke = True
self.qt_pattern_cache = {}
def __eq__(self, other):
for x in self.FIELDS:
if getattr(other, x) != getattr(self, x):
return False
return True
def copy(self):
ans = GraphicsState()
ans.fill = QBrush(self.fill)
ans.stroke = QPen(self.stroke)
ans.opacity = self.opacity
ans.transform = self.transform * QTransform()
ans.brush_origin = QPointF(self.brush_origin)
ans.clip_updated = self.clip_updated
ans.do_fill, ans.do_stroke = self.do_fill, self.do_stroke
return ans
class Graphics(object):
def __init__(self, page_width_px, page_height_px):
self.base_state = GraphicsState()
self.current_state = GraphicsState()
self.pending_state = None
self.page_width_px, self.page_height_px = (page_width_px, page_height_px)
def begin(self, pdf):
self.pdf = pdf
def update_state(self, state, painter):
flags = state.state()
if self.pending_state is None:
self.pending_state = self.current_state.copy()
s = self.pending_state
if flags & QPaintEngine.DirtyTransform:
s.transform = state.transform()
if flags & QPaintEngine.DirtyBrushOrigin:
s.brush_origin = state.brushOrigin()
if flags & QPaintEngine.DirtyBrush:
s.fill = state.brush()
if flags & QPaintEngine.DirtyPen:
s.stroke = state.pen()
if flags & QPaintEngine.DirtyOpacity:
s.opacity = state.opacity()
if flags & QPaintEngine.DirtyClipPath or flags & QPaintEngine.DirtyClipRegion:
s.clip_updated = True
def reset(self):
self.current_state = GraphicsState()
self.pending_state = None
def __call__(self, pdf_system, painter):
# Apply the currently pending state to the PDF
if self.pending_state is None:
return
pdf_state = self.current_state
ps = self.pending_state
pdf = self.pdf
if ps.transform != pdf_state.transform or ps.clip_updated:
pdf.restore_stack()
pdf.save_stack()
pdf_state = self.base_state
if (pdf_state.transform != ps.transform):
pdf.transform(ps.transform)
if (pdf_state.opacity != ps.opacity or pdf_state.stroke != ps.stroke):
self.apply_stroke(ps, pdf_system, painter)
if (pdf_state.opacity != ps.opacity or pdf_state.fill != ps.fill or
pdf_state.brush_origin != ps.brush_origin):
self.apply_fill(ps, pdf_system, painter)
if ps.clip_updated:
ps.clip_updated = False
path = painter.clipPath()
if not path.isEmpty():
p = convert_path(path)
fill_rule = {Qt.OddEvenFill:'evenodd',
Qt.WindingFill:'winding'}[path.fillRule()]
pdf.add_clip(p, fill_rule=fill_rule)
self.current_state = self.pending_state
self.pending_state = None
def convert_brush(self, brush, brush_origin, global_opacity,
pdf_system, qt_system):
# Convert a QBrush to PDF operators
style = brush.style()
pdf = self.pdf
pattern = color = pat = None
opacity = global_opacity
do_fill = True
matrix = (QTransform.fromTranslate(brush_origin.x(), brush_origin.y())
* pdf_system * qt_system.inverted()[0])
vals = list(brush.color().getRgbF())
self.brushobj = None
if style <= Qt.DiagCrossPattern:
opacity *= vals[-1]
color = vals[:3]
if style > Qt.SolidPattern:
pat = QtPattern(style, matrix)
elif style == Qt.TexturePattern:
pat = TexturePattern(brush.texture(), matrix, pdf)
if pat.paint_type == 2:
opacity *= vals[-1]
color = vals[:3]
elif style == Qt.LinearGradientPattern:
pat = LinearGradientPattern(brush, matrix, pdf, self.page_width_px,
self.page_height_px)
opacity *= pat.const_opacity
# TODO: Add support for radial/conical gradient fills
if opacity < 1e-4 or style == Qt.NoBrush:
do_fill = False
self.brushobj = Brush(brush_origin, pat, color)
if pat is not None:
pattern = pdf.add_pattern(pat)
return color, opacity, pattern, do_fill
def apply_stroke(self, state, pdf_system, painter):
# TODO: Support miter limit by using QPainterPathStroker
pen = state.stroke
self.pending_state.do_stroke = True
pdf = self.pdf
# Width
w = pen.widthF()
if pen.isCosmetic():
t = painter.transform()
try:
w /= sqrt(t.m11()**2 + t.m22()**2)
except ZeroDivisionError:
pass
pdf.serialize(w)
pdf.current_page.write(' w ')
# Line cap
cap = {Qt.FlatCap:0, Qt.RoundCap:1, Qt.SquareCap:
2}.get(pen.capStyle(), 0)
pdf.current_page.write('%d J '%cap)
# Line join
join = {Qt.MiterJoin:0, Qt.RoundJoin:1,
Qt.BevelJoin:2}.get(pen.joinStyle(), 0)
pdf.current_page.write('%d j '%join)
# Dash pattern
ps = {Qt.DashLine:[3], Qt.DotLine:[1,2], Qt.DashDotLine:[3,2,1,2],
Qt.DashDotDotLine:[3, 2, 1, 2, 1, 2]}.get(pen.style(), [])
if ps:
pdf.serialize(Array(ps))
pdf.current_page.write(' 0 d ')
# Stroke fill
color, opacity, pattern, self.pending_state.do_stroke = self.convert_brush(
pen.brush(), state.brush_origin, state.opacity, pdf_system,
painter.transform())
self.pdf.apply_stroke(color, pattern, opacity)
if pen.style() == Qt.NoPen:
self.pending_state.do_stroke = False
def apply_fill(self, state, pdf_system, painter):
self.pending_state.do_fill = True
color, opacity, pattern, self.pending_state.do_fill = self.convert_brush(
state.fill, state.brush_origin, state.opacity, pdf_system,
painter.transform())
self.pdf.apply_fill(color, pattern, opacity)
self.last_fill = self.brushobj
def __enter__(self):
self.pdf.save_stack()
def __exit__(self, *args):
self.pdf.restore_stack()
def resolve_fill(self, rect, pdf_system, qt_system):
'''
Qt's paint system does not update brushOrigin when using
TexturePatterns and it also uses TexturePatterns to emulate gradients,
leading to brokenness. So this method allows the paint engine to update
the brush origin before painting an object. While not perfect, this is
better than nothing. The problem is that if the rect being filled has a
border, then QtWebKit generates an image of the rect size - border but
fills the full rect, and there's no way for the paint engine to know
that and adjust the brush origin.
'''
if not hasattr(self, 'last_fill') or not self.current_state.do_fill:
return
if isinstance(self.last_fill.brush, TexturePattern):
tl = rect.topLeft()
if tl == self.last_fill.origin:
return
matrix = (QTransform.fromTranslate(tl.x(), tl.y())
* pdf_system * qt_system.inverted()[0])
pat = TexturePattern(None, matrix, self.pdf, clone=self.last_fill.brush)
pattern = self.pdf.add_pattern(pat)
self.pdf.apply_fill(self.last_fill.color, pattern)
| gpl-3.0 | 7,913,336,814,895,095,000 | 29.006224 | 86 | 0.508539 | false |
eayunstack/fuel-web | nailgun/nailgun/extensions/cluster_upgrade/tests/test_objects.py | 3 | 2007 | # -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.test.base import BaseIntegrationTest
from .. import models
from ..objects import relations as objects
class TestUpgradeRelationObject(BaseIntegrationTest):
def test_get_and_create_relation(self):
objects.UpgradeRelationObject.create_relation(1, 2)
rel0 = objects.UpgradeRelationObject.get_cluster_relation(1)
self.assertEqual(rel0.orig_cluster_id, 1)
self.assertEqual(rel0.seed_cluster_id, 2)
rel1 = objects.UpgradeRelationObject.get_cluster_relation(2)
self.assertEqual(rel1.orig_cluster_id, 1)
self.assertEqual(rel1.seed_cluster_id, 2)
def test_is_cluster_in_upgrade(self):
objects.UpgradeRelationObject.create_relation(1, 2)
in_upgrade = objects.UpgradeRelationObject.is_cluster_in_upgrade
self.assertTrue(in_upgrade(1))
self.assertTrue(in_upgrade(2))
def test_is_cluster_not_in_upgrade(self):
self.assertEqual(self.db.query(models.UpgradeRelation).count(), 0)
in_upgrade = objects.UpgradeRelationObject.is_cluster_in_upgrade
self.assertFalse(in_upgrade(1))
self.assertFalse(in_upgrade(2))
def test_delete_relation(self):
objects.UpgradeRelationObject.create_relation(1, 2)
objects.UpgradeRelationObject.delete_relation(1)
self.assertEqual(self.db.query(models.UpgradeRelation).count(), 0)
| apache-2.0 | -2,190,093,603,358,828,000 | 40.8125 | 78 | 0.714499 | false |
yokose-ks/edx-platform | lms/djangoapps/pgreport/tests/test_views.py | 1 | 30880 | from django.test import TestCase
from mock import MagicMock, patch, ANY
from contextlib import nested
from pgreport.views import (
ProgressReport, UserDoesNotExists, InvalidCommand,
get_pgreport_csv, create_pgreport_csv, delete_pgreport_csv,
get_pgreport_table, update_pgreport_table
)
from pgreport.models import ProgressModules, ProgressModulesHistory
from django.test.utils import override_settings
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
import factory
from factory.django import DjangoModelFactory
from student.tests.factories import UserFactory, UserStandingFactory, CourseEnrollmentFactory
from courseware.tests.factories import (InstructorFactory, StaffFactory)
from django.contrib.auth.models import User
from student.models import UserStanding
from gridfs.errors import GridFSError
from xmodule.exceptions import NotFoundError
from django.db import DatabaseError
from pytz import UTC
import datetime
import StringIO
import gzip
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class ProgressReportTestCase(ModuleStoreTestCase):
""" Test Progress Report """
COURSE_NAME = "test_pgreport"
COURSE_NUM = 3
def setUp(self):
self.output = StringIO.StringIO()
self.gzipfile = StringIO.StringIO()
self.course = CourseFactory.create(
display_name=self.COURSE_NAME,
)
self.course.raw_grader = [{
'drop_count': 0,
'min_count': 1,
'short_label': 'Final',
'type': 'Final Exam',
'weight': 1.0
}]
self.course.grade_cutoffs = {'Pass': 0.1}
self.students = [
UserFactory.create(username='student1'),
UserFactory.create(username='student2'),
UserFactory.create(username='student3'),
UserFactory.create(username='student4'),
UserFactory.create(username='student5'),
StaffFactory.create(username='staff1', course=self.course.location),
InstructorFactory.create(username='instructor1', course=self.course.location),
]
UserStandingFactory.create(
user=self.students[4],
account_status=UserStanding.ACCOUNT_DISABLED,
changed_by=self.students[6]
)
for user in self.students:
CourseEnrollmentFactory.create(user=user, course_id=self.course.id)
self.pgreport = ProgressReport(self.course.id)
self.pgreport2 = ProgressReport(self.course.id, lambda state: state)
self.chapter = ItemFactory.create(
parent_location=self.course.location,
category="chapter",
display_name="Week 1"
)
self.chapter.save()
self.section = ItemFactory.create(
parent_location=self.chapter.location,
category="sequential",
display_name="Lesson 1"
)
self.section.save()
self.vertical = ItemFactory.create(
parent_location=self.section.location,
category="vertical",
display_name="Unit1"
)
self.vertical.save()
self.html = ItemFactory.create(
parent_location=self.vertical.location,
category="html",
data={'data': "<html>foobar</html>"}
)
self.html.save()
"""
course.children = [week1.location.url(), week2.location.url(),
week3.location.url()]
"""
from capa.tests.response_xml_factory import OptionResponseXMLFactory
self.problem_xml = OptionResponseXMLFactory().build_xml(
question_text='The correct answer is Correct',
num_inputs=2,
weight=2,
options=['Correct', 'Incorrect'],
correct_option='Correct'
)
self.problems = []
for num in xrange(1, 3):
self.problems.append(ItemFactory.create(
parent_location=self.vertical.location,
category='problem',
display_name='problem_' + str(num),
metadata={'graded': True, 'format': 'Final Exam'},
data=self.problem_xml
))
self.problems[num - 1].save()
for problem in self.problems:
problem.correct_map = {
problem.location.url() + "_2_1": {
"hint": "",
"hintmode": "",
"correctness": "correct",
"npoints": "",
"msg": "",
"queuestate": ""
},
problem.location.url() + "_2_2": {
"hint": "",
"hintmode": "",
"correctness": "incorrect",
"npoints": "",
"msg": "",
"queuestate": ""
}
}
problem.student_answers = {
problem.location.url() + "_2_1": "Correct",
problem.location.url() + "_2_2": "Incorrect"
}
problem.input_state = {
problem.location.url() + "_2_1": {},
problem.location.url() + "_2_2": {}
}
self.course.save()
patcher = patch('pgreport.views.logging')
self.log_mock = patcher.start()
self.addCleanup(patcher.stop)
"""
from xmodule.modulestore import Location
import json
for user in self.students:
StudentModuleFactory.create(
grade=1,
max_grade=1,
student=user,
course_id=self.course.id,
#module_state_key=Location(self.problem).url(),
module_state_key=self.problem.location.url(),
#state = json.dumps({'attempts': self.attempts, 'done':True})
state = json.dumps({'done':True})
)
./lms/djangoapps/courseware/management/commands/tests/test_dump_course.py
def load_courses(self):
cp xmport-course common/test/data and modify TEST_DATA_MIXED_MODULESTORE
"""
def tearDown(self):
self.output.close()
self.gzipfile.close()
def test_get_active_students(self):
counts, actives, users = ProgressReport.get_active_students(self.course.id)
self.assertEquals(counts, 7)
self.assertEquals(actives, 6)
self.assertItemsEqual(users, self.students[:4] + self.students[5:])
fake_course = CourseFactory.create(display_name="fake")
with self.assertRaises(UserDoesNotExists):
counts, actives, users = ProgressReport.get_active_students(fake_course.id)
def test_create_request(self):
from django.core.handlers.wsgi import WSGIRequest
request = self.pgreport._create_request()
self.assertIsInstance(request, WSGIRequest)
def test_calc_statistics(self):
self.pgreport.module_statistics = {
self.problems[0].location: [1.0, 5.6, 3.4, 9.8, 20.2],
self.problems[1].location: [5.0, 10.6, 8.4, 2.8, 134.8]}
calc_statistics = self.pgreport._calc_statistics()
self.assertEquals(calc_statistics[self.problems[0].location]["mean"], 8.0)
self.assertEquals(calc_statistics[self.problems[0].location]["median"], 5.6)
self.assertEquals(calc_statistics[self.problems[0].location]["variance"], 45.6)
self.assertEquals(
calc_statistics[self.problems[0].location]["standard_deviation"], 6.753)
self.assertEquals(calc_statistics[self.problems[1].location]["mean"], 32.32)
self.assertEquals(calc_statistics[self.problems[1].location]["median"], 8.4)
self.assertEquals(calc_statistics[self.problems[1].location]["variance"], 2632.778)
self.assertEquals(
calc_statistics[self.problems[1].location]["standard_deviation"], 51.311)
def test_get_correctmap(self):
corrects = self.pgreport._get_correctmap(self.problems[0])
self.assertEquals(
corrects, {
self.problems[0].location.url() + "_2_1": 1,
self.problems[0].location.url() + "_2_2": 0
}
)
def test_get_student_answers(self):
answers1 = self.pgreport._get_student_answers(self.problems[0])
self.problems[1].student_answers = {
self.problems[1].location.url() + "_2_1": ["answer1", "answer2", 5]
}
answers2 = self.pgreport._get_student_answers(self.problems[1])
self.assertEquals(
answers1, {
self.problems[0].location.url() + "_2_1": {"Correct": 1},
self.problems[0].location.url() + "_2_2": {"Incorrect": 1}
}
)
self.assertEquals(answers2, {
self.problems[1].location.url() + "_2_1": ANY})
self.assertEquals(
answers2[self.problems[1].location.url() + "_2_1"],
{"answer1": 1, 5: 1, "answer2": 1}
)
def test_get_module_data(self):
module_mock = MagicMock()
module_data = self.pgreport._get_module_data(module_mock)
self.assertEquals(module_data, {
'start': module_mock.start,
'display_name': module_mock.display_name,
'student_answers': {},
'weight': module_mock.weight,
'correct_map': {},
'type': module_mock.category,
'due': module_mock.due,
'score/total': module_mock.get_progress()
})
def test_increment_student_answers(self):
name = self.problems[0].location.url()
unit_id1 = name + "_2_1"
unit_id2 = name + "_2_2"
unit_id3 = name + "_2_3"
answer = {"Correct": 1}
self.pgreport.module_summary[name] = {
"student_answers": {
unit_id1: {"Correct": 1}, unit_id2: {"Incorrect": 1}},
}
self.pgreport._increment_student_answers(name, answer, unit_id1)
self.pgreport._increment_student_answers(name, answer, unit_id2)
self.pgreport._increment_student_answers(name, answer, unit_id3)
self.assertEquals(self.pgreport.module_summary[name]["student_answers"], {
unit_id1: {"Correct": 2},
unit_id2: {"Correct": 1, "Incorrect": 1},
unit_id3: {"Correct": 1}})
def test_increment_student_correctmap(self):
name = self.problems[0].location.url()
unit_id1 = name + "_2_1"
unit_id2 = name + "_2_2"
unit_id3 = name + "_2_3"
self.pgreport.module_summary[name] = {
"correct_map": {unit_id1: 1, unit_id2: 2},
}
self.pgreport._increment_student_correctmap(name, 1, unit_id1)
self.pgreport._increment_student_correctmap(name, 1, unit_id2)
self.pgreport._increment_student_correctmap(name, 1, unit_id3)
self.assertEquals(self.pgreport.module_summary[name]["correct_map"], {
unit_id1: 2, unit_id2: 3, unit_id3: 1})
def test_collect_module_summary(self):
module_mock = MagicMock()
progress_mock = MagicMock()
progress_mock.frac.return_value = (2.0, 3.0)
module_mock.get_progress.return_value = progress_mock
module_mock.location = self.problems[0].location
self.pgreport.collect_module_summary(module_mock)
self.assertEquals(self.pgreport.module_summary[module_mock.location], {
'count': 1,
'display_name': module_mock.display_name,
'weight': module_mock.weight,
'type': module_mock.category,
'total_score': 3.0,
'due': module_mock.due,
'score/total': progress_mock,
'submit_count': 1,
'start': module_mock.start,
'student_answers': {},
'max_score': 2.0,
'correct_map': {}
})
module_mock.is_submitted.return_value = False
module_data = {
"student_answers": {
module_mock.location.url() + "_2_1": {"Correct": 1},
module_mock.location.url() + "_2_2": [{"answer1": 1}, {"answer2": 2}]},
"correct_map": {
module_mock.location.url() + "_2_1": 1,
module_mock.location.url() + "_2_2": 2}
}
with patch(
'pgreport.views.ProgressReport._get_module_data',
return_value=module_data
) as pgmock:
self.pgreport.collect_module_summary(module_mock)
self.assertEquals(
self.pgreport.module_summary[module_mock.location], {
'count': 2,
'display_name': module_mock.display_name,
'weight': module_mock.weight,
'type': module_mock.category,
'total_score': 6.0,
'due': module_mock.due,
'score/total': progress_mock,
'submit_count': 1,
'start': module_mock.start,
'student_answers': {
module_mock.location.url() + '_2_1': {'Correct': 1},
module_mock.location.url() + '_2_2': {'answer1': 1, 'answer2': 2}
},
'max_score': 4.0,
'correct_map': module_data["correct_map"]
}
)
def test_yield_student_summary(self):
module_mock = MagicMock()
module_mock.location = self.problems[0].location
csvheader = [
'username', 'location', 'last_login', 'grade', 'percent',
'start', 'display_name', 'student_answers', 'weight', 'correct_map',
'type', 'due', 'score/total'
]
rows = []
mg = MagicMock()
location_list = {
u'chapter': [self.chapter.location],
u'problem': [self.problems[0].location],
u'sequential': [self.section.location],
u'vertical': [self.vertical.location]
}
grade_mock = MagicMock(return_value={'grade': True, 'percent': 1.0})
with nested(
patch('pgreport.views.grades'),
patch('pgreport.views.get_module_for_student',
side_effect=[module_mock, module_mock]),
) as (grmock, gemock):
grmock.grade = grade_mock
#self.pgreport.update_state = lambda state: state
self.pgreport.students = User.objects.filter(id__in=[1, 2])
self.pgreport.location_list = location_list
for row in self.pgreport.yield_students_progress():
rows.append(row)
def create_csvrow(csvrows):
for i in [0, 1]:
csvrows.append([
unicode(self.students[i].username), self.problems[0].location,
self.students[i].last_login.strftime("%Y/%m/%d %H:%M:%S %Z"),
True, 1.0, module_mock.start, module_mock.display_name,
{}, module_mock.weight, {}, module_mock.category,
module_mock.due, module_mock.get_progress(),
])
return csvrows
grmock.grade.assert_called_with(ANY, ANY, ANY)
gemock.assert_called_with(ANY, ANY, ANY)
self.assertEquals(rows, create_csvrow([csvheader]))
"""
def test_yield_student_summary_with_update_state(self):
module_mock = MagicMock()
module_mock.location = self.problems[0].location
csvheader = [
'username', 'location', 'last_login', 'grade', 'percent',
'start', 'display_name', 'student_answers', 'weight', 'correct_map',
'type', 'due', 'score/total'
]
rows = []
mg = MagicMock()
location_list = {
u'chapter': [self.chapter.location],
u'problem': [self.problems[0].location],
u'sequential': [self.section.location],
u'vertical': [self.vertical.location]
}
grade_mock = MagicMock(return_value={'grade': True, 'percent': 1.0})
with nested(
patch('pgreport.views.grades'),
patch(
'pgreport.views.get_module_for_student',
side_effect=[module_mock, module_mock]
),
) as (grmock, gemock):
grmock.grade = grade_mock
self.pgreport.update_state = lambda state: state
self.pgreport.students = User.objects.filter(id__in=[1, 2])
self.pgreport.location_list = location_list
for row in self.pgreport.yield_students_progress():
rows.append(row)
def create_csvrow(csvrows):
for i in [0, 1]:
csvrows.append([
unicode(self.students[i].username), self.problems[0].location,
self.students[i].last_login.strftime("%Y/%m/%d %H:%M:%S %Z"),
True, 1.0, module_mock.start, module_mock.display_name,
{}, module_mock.weight, {}, module_mock.category,
module_mock.due, module_mock.get_progress(),
])
return csvrows
grmock.grade.assert_called_with(ANY, ANY, ANY)
gemock.assert_called_with(ANY, ANY, ANY)
self.assertEquals(rows, create_csvrow([csvheader]))
"""
def test_get_children_rec(self):
course_mock = MagicMock()
course_mock.location = self.course.location
chapter_mock = MagicMock()
chapter_mock.has_children = True
chapter_mock.category = self.chapter.category
chapter_mock.location = self.chapter.location
chapter_mock.display_name = self.chapter.display_name
sequential_mock = MagicMock()
sequential_mock.has_children = True
sequential_mock.category = self.section.category
sequential_mock.location = self.section.location
sequential_mock.display_name = self.section.display_name
vertical_mock = MagicMock()
vertical_mock.has_children = True
vertical_mock.category = self.vertical.category
vertical_mock.location = self.vertical.location
vertical_mock.display_name = self.vertical.display_name
chapter_mock.get_children.return_value = [sequential_mock]
sequential_mock.get_children.return_value = [vertical_mock]
vertical_mock.get_children.return_value = self.problems
course_mock.get_children.return_value = [chapter_mock]
self.pgreport._get_children_rec(course_mock)
self.assertEquals(self.pgreport.location_list, {
u'chapter': [self.chapter.location],
u'problem': [self.problems[0].location, self.problems[1].location],
u'sequential': [self.section.location],
u'vertical': [self.vertical.location]
})
self.assertEquals(self.pgreport.location_parent, [
{
self.problems[0].location: [
self.chapter.display_name,
self.section.display_name,
self.vertical.display_name
]
},
{
self.problems[1].location: [
self.chapter.display_name,
self.section.display_name,
self.vertical.display_name
]
},
])
@patch('sys.stdout', new_callable=StringIO.StringIO)
@patch('pgreport.views.cache')
@patch('pgreport.views.ProgressReport.collect_module_summary')
def test_get_raw(self, cmmock, camock, symock):
with self.assertRaises(InvalidCommand):
self.pgreport.get_raw(command="fake")
summary = self.pgreport2.get_raw(command="summary")
self.assertEquals(summary, {
'enrollments': 7, 'active_students': 6, 'module_tree': []})
location_list = {
u'chapter': [self.chapter.location],
u'problem': [self.problems[0].location, self.problems[1].location],
u'sequential': [self.section.location],
u'vertical': [self.vertical.location]
}
module_summary = {'module_summary': {'dummy': 'dummy'}}
mg = MagicMock()
with nested(
patch('pgreport.views.grades', return_value={'grade': True, 'percent': 1.0}),
patch('pgreport.views.get_module_for_student', side_effect=[
None, mg, mg, mg, mg, mg, mg, mg, mg, mg, mg, mg, mg, mg]),
) as (grmock, gemock):
self.pgreport.location_list = location_list
self.pgreport.module_summary = module_summary
modules = self.pgreport.get_raw(command="modules")
with nested(
patch('pgreport.views.grades', return_value={'grade': True, 'percent': 1.0}),
patch('pgreport.views.get_module_for_student', side_effect=[
None, mg, mg, mg, mg, mg, mg, mg, mg, mg, mg, mg, mg, mg]),
) as (grmock, gemock):
self.pgreport.location_list = location_list
self.pgreport.module_summary = module_summary
summary, modules = self.pgreport.get_raw()
grmock.grade.assert_called_with(self.students[6], ANY, ANY)
gemock.assert_any_called_with(self.students[6], self.course, self.problems[0].location)
def test_get_pgreport_csv(self):
gzipdata = gzip.GzipFile(fileobj=self.gzipfile, mode='wb')
gzipdata.write("row1\nrow2\nrow3\n")
gzipdata.close()
cstore_mock = MagicMock()
content_mock = MagicMock()
content_mock.stream_data.return_value = self.gzipfile.getvalue()
cstore_mock.find.return_value = content_mock
with nested(
patch('pgreport.views.contentstore', return_value=cstore_mock),
patch('sys.stdout', new_callable=StringIO.StringIO)
) as (pmock, smock):
get_pgreport_csv(self.course.id)
pmock.assert_called_once_with()
cstore_mock.find.assert_called_once_with(ANY, throw_on_not_found=True, as_stream=True)
content_mock.stream_data.assert_called_once_with()
self.assertEquals(smock.getvalue(), 'row1\nrow2\nrow3\n')
cstore_mock.find.side_effect = NotFoundError()
with patch('pgreport.views.contentstore', return_value=cstore_mock):
with self.assertRaises(NotFoundError):
get_pgreport_csv(self.course.id)
def test_create_pgreport_csv(self):
rows = [
["username", "loc", "last_login"],
[self.students[0].username, self.problems[0].location.url(), "2014/1/1"],
[self.students[1].username, self.problems[1].location.url(), "2014/1/1"],
]
progress_mock = MagicMock()
progress_mock.get_raw.return_value = rows
scontent_mock = MagicMock()
cstore_mock = MagicMock()
cstore_mock.fs.new_file().__exit__.return_value = False
with nested(
patch('pgreport.views.StaticContent', return_value=scontent_mock),
patch('pgreport.views.contentstore', return_value=cstore_mock),
patch('pgreport.views.ProgressReport', return_value=progress_mock),
) as (smock, cmock, pmock):
create_pgreport_csv(self.course.id)
smock.assert_called_once_with(
ANY, "progress_students.csv.gz",
"application/x-gzip", "dummy-data"
)
cmock.assert_called_once_with()
scontent_mock.get_id.assert_called_once_with()
scontent_mock.get_url_path.assert_called_once_with()
progress_mock.get_raw.return_value = rows
cstore_mock.fs.new_file().__enter__().write.side_effect = GridFSError()
with nested(
patch('pgreport.views.StaticContent', return_value=scontent_mock),
patch('pgreport.views.contentstore', return_value=cstore_mock),
patch('pgreport.views.ProgressReport', return_value=progress_mock),
) as (smock, cmock, pmock):
with self.assertRaises(GridFSError):
create_pgreport_csv(self.course.id)
def test_delete_pgreport_csv(self):
cstore_mock = MagicMock()
content_mock = MagicMock()
with nested(
patch('pgreport.views.StaticContent', return_value=content_mock),
patch('pgreport.views.contentstore', return_value=cstore_mock),
) as (scmock, csmock):
delete_pgreport_csv(self.course.id)
scmock.assert_called_once_with(
ANY, "progress_students.csv.gz",
"application/x-gzip", "dummy-data"
)
csmock.assert_called_once_with()
def test_get_pgreport_table(self):
module_summary = {
'location': self.problems[0].location.url(),
'count': 1,
'display_name': "display_name",
'weight': "weight",
'type': "category",
'total_score': 3.0,
'due': "due",
'score/total': "score-total",
'submit_count': 1,
'start': "start",
'student_answers': {},
'max_score': 2.0,
'correct_map': {}
}
filter_mock = MagicMock()
pgmodule_mock = MagicMock()
filter_mock.values.return_value = [module_summary]
pgmodule_mock.objects.filter.return_value = filter_mock
with patch('pgreport.views.ProgressModules', pgmodule_mock):
summary, modules = get_pgreport_table(self.course.id)
filter_mock.values.assert_called_once_with()
pgmodule_mock.objects.filter.assert_called_with(course_id=self.course.id)
self.assertEquals(summary, {
'enrollments': 7, 'active_students': 6,
'module_tree': [
{self.html.location: [u'Week 1', u'Lesson 1', u'Unit1']},
{self.problems[0].location: [u'Week 1', u'Lesson 1', u'Unit1']},
{self.problems[1].location: [u'Week 1', u'Lesson 1', u'Unit1']}
]
})
self.assertEquals(modules, {self.problems[0].location.url(): module_summary})
def test_update_pgreport_table(self):
with patch('pgreport.views.ProgressModules') as pmock:
update_pgreport_table(self.course.id)
pmock.assert_any_call(
count=6, display_name=self.problems[0].display_name,
weight=None, standard_deviation=0.0, correct_map={}, median=0.0, due=None,
submit_count=0, start=datetime.datetime(2030, 1, 1, 0, 0, tzinfo=UTC),
location=self.problems[0].location, course_id=self.course.id, variance=0.0,
student_answers={}, max_score=0.0, total_score=12.0, mean=0.0
)
with patch('pgreport.views.ProgressModules', side_effect=DatabaseError()):
with self.assertRaises(DatabaseError):
update_pgreport_table(self.course.id)
class ProgressModulesFactory(DjangoModelFactory):
FACTORY_FOR = ProgressModules
location = "i4x://org/cn/problem/unitid"
course_id = "org/cn/run"
created = datetime.datetime.now()
display_name = "problem unit"
module_type = "problem"
count = 2
max_score = 1
total_score = 2
submit_count = 1
weight = None
start = datetime.datetime.now()
due = None
correct_map = {u'i4x-org-cn-problem-unitid_2_1': 1}
student_answers = {u'i4x-org-cn-problem-unitid_2_1': {
u'choice_0': 1, u'choice_2': 1}}
mean = 0.5
median = 0.5
variance = 0.25
standard_deviation = 0.5
class ProgressModulesHistoryFactory(DjangoModelFactory):
FACTORY_FOR = ProgressModulesHistory
progress_module = factory.SubFactory(ProgressModulesFactory)
created = datetime.datetime.now()
count = 2
max_score = 1
total_score = 2
submit_count = 1
weight = None
start = datetime.datetime.now()
due = None
correct_map = {u'i4x-org-cn-problem-unitid_2_1': 1}
student_answers = {u'i4x-org-cn-problem-unitid_2_1': {
u'choice_0': 1, u'choice_2': 1}}
mean = 0.5
median = 0.5
variance = 0.25
standard_deviation = 0.5
class ProgressModulesTestCase(TestCase):
def setUp(self):
self.start = self.created = datetime.datetime.utcnow()
self.pgmodule = ProgressModulesFactory.create(
start=self.start, created=self.created)
patcher = patch('pgreport.views.logging')
self.log_mock = patcher.start()
self.addCleanup(patcher.stop)
def tearDown(self):
pass
def test_repr(self):
self.assertEquals(
str(self.pgmodule),
"[ProgressModules] i4x://org/cn/problem/unitid"
)
def test_unicode(self):
self.assertEquals(
unicode(self.pgmodule),
"[ProgressModules] i4x://org/cn/problem/unitid"
)
def test_get_by_course_id(self):
loc = "i4x://org/cn/problem/unitid"
modules = ProgressModules.get_by_course_id("org/cn/run")
self.assertEquals(modules[loc]["count"], 2)
self.assertEquals(modules[loc]["display_name"], u'problem unit')
self.assertEquals(modules[loc]["weight"], None)
self.assertEquals(modules[loc]["standard_deviation"], 0.5)
self.assertEquals(modules[loc]["total_score"], 2.0)
self.assertEquals(modules[loc]["median"], 0.5)
self.assertEquals(modules[loc]["due"], None)
self.assertEquals(modules[loc]["submit_count"], 1)
self.assertEquals(modules[loc]["module_type"], u'problem')
self.assertEquals(modules[loc]["course_id"], u'org/cn/run')
self.assertEquals(modules[loc]["variance"], 0.25)
self.assertEquals(modules[loc]["student_answers"], u"{u'i4x-org-cn-problem-unitid_2_1': {u'choice_0': 1, u'choice_2': 1}}")
self.assertEquals(modules[loc]["max_score"], 1.0)
self.assertEquals(modules[loc]["correct_map"], u"{u'i4x-org-cn-problem-unitid_2_1': 1}")
self.assertEquals(modules[loc]["mean"], 0.5)
class ProgressModulesHistoryTestCase(TestCase):
def setUp(self):
self.maxDiff = 10000
self.start = self.created = datetime.datetime.utcnow()
self.phmodule = ProgressModulesHistoryFactory.create(
start=self.start, created=self.created)
patcher = patch('pgreport.views.logging')
self.log_mock = patcher.start()
self.addCleanup(patcher.stop)
def tearDown(self):
pass
def test_repr(self):
self.assertEquals(
str(self.phmodule),
"[ProgressModules] i4x://org/cn/problem/unitid : created {}".format(self.created)
)
def test_unicode(self):
self.assertEquals(
unicode(self.phmodule),
"[ProgressModules] i4x://org/cn/problem/unitid : created {}".format(self.created)
)
| agpl-3.0 | 3,124,031,571,454,364,000 | 38.539052 | 131 | 0.579275 | false |
paulbellamy/docker-py | tests/test.py | 1 | 95769 | # Copyright 2013 dotCloud inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import datetime
import gzip
import io
import json
import os
import re
import shutil
import signal
import socket
import sys
import tarfile
import tempfile
import threading
import time
import unittest
import warnings
import random
import docker
import requests
import six
import base
import fake_api
try:
from unittest import mock
except ImportError:
import mock
DEFAULT_TIMEOUT_SECONDS = docker.client.constants.DEFAULT_TIMEOUT_SECONDS
warnings.simplefilter('error')
warnings.filterwarnings('error')
create_host_config = docker.utils.create_host_config
def response(status_code=200, content='', headers=None, reason=None, elapsed=0,
request=None):
res = requests.Response()
res.status_code = status_code
if not isinstance(content, six.binary_type):
content = json.dumps(content).encode('ascii')
res._content = content
res.headers = requests.structures.CaseInsensitiveDict(headers or {})
res.reason = reason
res.elapsed = datetime.timedelta(elapsed)
res.request = request
return res
def fake_resolve_authconfig(authconfig, registry=None):
return None
def fake_inspect_container(self, container, tty=False):
return fake_api.get_fake_inspect_container(tty=tty)[1]
def fake_inspect_container_tty(self, container):
return fake_inspect_container(self, container, tty=True)
def fake_resp(url, data=None, **kwargs):
status_code, content = fake_api.fake_responses[url]()
return response(status_code=status_code, content=content)
fake_request = mock.Mock(side_effect=fake_resp)
url_prefix = 'http+docker://localunixsocket/v{0}/'.format(
docker.client.constants.DEFAULT_DOCKER_API_VERSION)
class Cleanup(object):
if sys.version_info < (2, 7):
# Provide a basic implementation of addCleanup for Python < 2.7
def __init__(self, *args, **kwargs):
super(Cleanup, self).__init__(*args, **kwargs)
self._cleanups = []
def tearDown(self):
super(Cleanup, self).tearDown()
ok = True
while self._cleanups:
fn, args, kwargs = self._cleanups.pop(-1)
try:
fn(*args, **kwargs)
except KeyboardInterrupt:
raise
except:
ok = False
if not ok:
raise
def addCleanup(self, function, *args, **kwargs):
self._cleanups.append((function, args, kwargs))
@mock.patch.multiple('docker.Client', get=fake_request, post=fake_request,
put=fake_request, delete=fake_request)
class DockerClientTest(Cleanup, base.BaseTestCase):
def setUp(self):
self.client = docker.Client()
# Force-clear authconfig to avoid tampering with the tests
self.client._cfg = {'Configs': {}}
def tearDown(self):
self.client.close()
def assertIn(self, object, collection):
if six.PY2 and sys.version_info[1] <= 6:
return self.assertTrue(object in collection)
return super(DockerClientTest, self).assertIn(object, collection)
def base_create_payload(self, img='busybox', cmd=None):
if not cmd:
cmd = ['true']
return {"Tty": False, "Image": img, "Cmd": cmd,
"AttachStdin": False,
"AttachStderr": True, "AttachStdout": True,
"StdinOnce": False,
"OpenStdin": False, "NetworkDisabled": False,
}
def test_ctor(self):
try:
docker.Client(version=1.12)
except Exception as e:
self.assertTrue(isinstance(e, docker.errors.DockerException))
if not six.PY3:
self.assertEqual(
str(e),
'Version parameter must be a string or None. Found float'
)
#########################
# INFORMATION TESTS #
#########################
def test_version(self):
try:
self.client.version()
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'version',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_retrieve_server_version(self):
client = docker.Client(version="auto")
self.assertTrue(isinstance(client._version, six.string_types))
self.assertFalse(client._version == "auto")
client.close()
def test_auto_retrieve_server_version(self):
try:
version = self.client._retrieve_server_version()
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
else:
self.assertTrue(isinstance(version, six.string_types))
def test_info(self):
try:
self.client.info()
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'info',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_search(self):
try:
self.client.search('busybox')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/search',
params={'term': 'busybox'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_image_viz(self):
try:
self.client.images('busybox', viz=True)
self.fail('Viz output should not be supported!')
except Exception:
pass
def test_events(self):
try:
self.client.events()
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'events',
params={'since': None, 'until': None, 'filters': None},
stream=True
)
def test_events_with_since_until(self):
ts = 1356048000
now = datetime.datetime.fromtimestamp(ts)
since = now - datetime.timedelta(seconds=10)
until = now + datetime.timedelta(seconds=10)
try:
self.client.events(since=since, until=until)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'events',
params={
'since': ts - 10,
'until': ts + 10,
'filters': None
},
stream=True
)
def test_events_with_filters(self):
filters = {'event': ['die', 'stop'],
'container': fake_api.FAKE_CONTAINER_ID}
try:
self.client.events(filters=filters)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
expected_filters = docker.utils.convert_filters(filters)
fake_request.assert_called_with(
url_prefix + 'events',
params={
'since': None,
'until': None,
'filters': expected_filters
},
stream=True
)
###################
# LISTING TESTS #
###################
def test_images(self):
try:
self.client.images(all=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 0, 'all': 1},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_images_quiet(self):
try:
self.client.images(all=True, quiet=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 1, 'all': 1},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_image_ids(self):
try:
self.client.images(quiet=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 1, 'all': 0},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_images_filters(self):
try:
self.client.images(filters={'dangling': True})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 0, 'all': 0,
'filters': '{"dangling": ["true"]}'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_list_containers(self):
try:
self.client.containers(all=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/json',
params={
'all': 1,
'since': None,
'size': 0,
'limit': -1,
'trunc_cmd': 0,
'before': None
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
#####################
# CONTAINER TESTS #
#####################
def test_create_container(self):
try:
self.client.create_container('busybox', 'true')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
"AttachStdin": false,
"AttachStderr": true, "AttachStdout": true,
"StdinOnce": false,
"OpenStdin": false, "NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_binds(self):
mount_dest = '/mnt'
try:
self.client.create_container('busybox', ['ls', mount_dest],
volumes=[mount_dest])
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls", "/mnt"], "AttachStdin": false,
"Volumes": {"/mnt": {}},
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_volume_string(self):
mount_dest = '/mnt'
try:
self.client.create_container('busybox', ['ls', mount_dest],
volumes=mount_dest)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls", "/mnt"], "AttachStdin": false,
"Volumes": {"/mnt": {}},
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_ports(self):
try:
self.client.create_container('busybox', 'ls',
ports=[1111, (2222, 'udp'), (3333,)])
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"ExposedPorts": {
"1111/tcp": {},
"2222/udp": {},
"3333/tcp": {}
},
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_entrypoint(self):
try:
self.client.create_container('busybox', 'hello',
entrypoint='cowsay')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["hello"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"Entrypoint": "cowsay"}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_cpu_shares(self):
try:
self.client.create_container('busybox', 'ls',
cpu_shares=5)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"CpuShares": 5}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_cpuset(self):
try:
self.client.create_container('busybox', 'ls',
cpuset='0,1')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"Cpuset": "0,1",
"CpusetCpus": "0,1"}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_working_dir(self):
try:
self.client.create_container('busybox', 'ls',
working_dir='/root')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"WorkingDir": "/root"}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_stdin_open(self):
try:
self.client.create_container('busybox', 'true', stdin_open=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
"AttachStdin": true,
"AttachStderr": true, "AttachStdout": true,
"StdinOnce": true,
"OpenStdin": true, "NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_volumes_from(self):
vol_names = ['foo', 'bar']
try:
self.client.create_container('busybox', 'true',
volumes_from=vol_names)
except docker.errors.DockerException as e:
self.assertTrue(
docker.utils.compare_version('1.10', self.client._version) >= 0
)
return
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data'])['VolumesFrom'],
','.join(vol_names))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_empty_volumes_from(self):
try:
self.client.create_container('busybox', 'true', volumes_from=[])
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertTrue('VolumesFrom' not in data)
def test_create_named_container(self):
try:
self.client.create_container('busybox', 'true',
name='marisa-kirisame')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
"AttachStdin": false,
"AttachStderr": true, "AttachStdout": true,
"StdinOnce": false,
"OpenStdin": false, "NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(args[1]['params'], {'name': 'marisa-kirisame'})
def test_create_container_with_mem_limit_as_int(self):
try:
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
mem_limit=128.0
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['HostConfig']['Memory'], 128.0)
def test_create_container_with_mem_limit_as_string(self):
try:
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
mem_limit='128'
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['HostConfig']['Memory'], 128.0)
def test_create_container_with_mem_limit_as_string_with_k_unit(self):
try:
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
mem_limit='128k'
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['HostConfig']['Memory'], 128.0 * 1024)
def test_create_container_with_mem_limit_as_string_with_m_unit(self):
try:
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
mem_limit='128m'
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['HostConfig']['Memory'], 128.0 * 1024 * 1024)
def test_create_container_with_mem_limit_as_string_with_g_unit(self):
try:
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
mem_limit='128g'
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(
data['HostConfig']['Memory'], 128.0 * 1024 * 1024 * 1024
)
def test_create_container_with_mem_limit_as_string_with_wrong_value(self):
self.assertRaises(
docker.errors.DockerException, create_host_config, mem_limit='128p'
)
self.assertRaises(
docker.errors.DockerException, create_host_config, mem_limit='1f28'
)
def test_start_container(self):
try:
self.client.start(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
raise e
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0],
url_prefix + 'containers/3cc2351ab11b/start'
)
self.assertEqual(json.loads(args[1]['data']), {})
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_start_container_none(self):
try:
self.client.start(container=None)
except ValueError as e:
self.assertEqual(str(e), 'image or container param is undefined')
else:
self.fail('Command should raise ValueError')
try:
self.client.start(None)
except ValueError as e:
self.assertEqual(str(e), 'image or container param is undefined')
else:
self.fail('Command should raise ValueError')
def test_start_container_regression_573(self):
try:
self.client.start(**{'container': fake_api.FAKE_CONTAINER_ID})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
def test_create_container_with_lxc_conf(self):
try:
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
lxc_conf={'lxc.conf.k': 'lxc.conf.value'}
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0],
url_prefix + 'containers/create'
)
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['LxcConf'] = [
{"Value": "lxc.conf.value", "Key": "lxc.conf.k"}
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'],
{'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_lxc_conf_compat(self):
try:
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}]
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['LxcConf'] = [
{"Value": "lxc.conf.value", "Key": "lxc.conf.k"}
]
self.assertEqual(
json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_binds_ro(self):
try:
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
binds={mount_origin: {
"bind": mount_dest,
"ro": True
}}
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:ro"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_binds_rw(self):
try:
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
binds={mount_origin: {
"bind": mount_dest,
"ro": False
}}
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:rw"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_binds_mode(self):
try:
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
binds={mount_origin: {
"bind": mount_dest,
"mode": "z",
}}
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:z"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_binds_mode_and_ro_error(self):
try:
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
binds={mount_origin: {
"bind": mount_dest,
"mode": "z",
"ro": True,
}}
)
)
except ValueError:
return
self.fail('Command should raise ValueError')
def test_create_container_with_binds_list(self):
try:
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
binds=[
"/tmp:/mnt/1:ro",
"/tmp:/mnt/2",
],
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['Binds'] = [
"/tmp:/mnt/1:ro",
"/tmp:/mnt/2",
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_port_binds(self):
self.maxDiff = None
try:
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
port_bindings={
1111: None,
2222: 2222,
'3333/udp': (3333,),
4444: ('127.0.0.1',),
5555: ('127.0.0.1', 5555),
6666: [('127.0.0.1',), ('192.168.0.1',)]
}
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
data = json.loads(args[1]['data'])
port_bindings = data['HostConfig']['PortBindings']
self.assertTrue('1111/tcp' in port_bindings)
self.assertTrue('2222/tcp' in port_bindings)
self.assertTrue('3333/udp' in port_bindings)
self.assertTrue('4444/tcp' in port_bindings)
self.assertTrue('5555/tcp' in port_bindings)
self.assertTrue('6666/tcp' in port_bindings)
self.assertEqual(
[{"HostPort": "", "HostIp": ""}],
port_bindings['1111/tcp']
)
self.assertEqual(
[{"HostPort": "2222", "HostIp": ""}],
port_bindings['2222/tcp']
)
self.assertEqual(
[{"HostPort": "3333", "HostIp": ""}],
port_bindings['3333/udp']
)
self.assertEqual(
[{"HostPort": "", "HostIp": "127.0.0.1"}],
port_bindings['4444/tcp']
)
self.assertEqual(
[{"HostPort": "5555", "HostIp": "127.0.0.1"}],
port_bindings['5555/tcp']
)
self.assertEqual(len(port_bindings['6666/tcp']), 2)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_mac_address(self):
try:
mac_address_expected = "02:42:ac:11:00:0a"
container = self.client.create_container(
'busybox', ['sleep', '60'], mac_address=mac_address_expected)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
res = self.client.inspect_container(container['Id'])
self.assertEqual(mac_address_expected,
res['NetworkSettings']['MacAddress'])
def test_create_container_with_links(self):
try:
link_path = 'path'
alias = 'alias'
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
links={link_path: alias}
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0], url_prefix + 'containers/create'
)
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['Links'] = ['path:alias']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
def test_create_container_with_multiple_links(self):
try:
link_path = 'path'
alias = 'alias'
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
links={
link_path + '1': alias + '1',
link_path + '2': alias + '2'
}
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['Links'] = [
'path1:alias1', 'path2:alias2'
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
def test_create_container_with_links_as_list_of_tuples(self):
try:
link_path = 'path'
alias = 'alias'
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
links=[(link_path, alias)]
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['Links'] = ['path:alias']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
def test_create_container_privileged(self):
try:
self.client.create_container(
'busybox', 'true',
host_config=create_host_config(privileged=True)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['Privileged'] = True
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_start_container_with_lxc_conf(self):
if six.PY2:
try:
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf={'lxc.conf.k': 'lxc.conf.value'}
)
except DeprecationWarning as e:
return
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
else:
self.fail('Expected a DeprecationWarning')
else:
with self.assertWarns(DeprecationWarning):
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf={'lxc.conf.k': 'lxc.conf.value'}
)
def test_start_container_with_lxc_conf_compat(self):
if six.PY2:
try:
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}]
)
except DeprecationWarning as e:
return
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
else:
self.fail('Expected a DeprecationWarning')
else:
with self.assertWarns(DeprecationWarning):
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}]
)
def test_start_container_with_binds_ro(self):
mount_dest = '/mnt'
mount_origin = '/tmp'
if six.PY2:
try:
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
mount_origin: {
"bind": mount_dest,
"ro": True
}
}
)
except DeprecationWarning as e:
return
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
else:
self.fail('Expected a DeprecationWarning')
else:
with self.assertWarns(DeprecationWarning):
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
mount_origin: {
"bind": mount_dest,
"ro": True
}
}
)
def test_start_container_with_binds_rw(self):
mount_dest = '/mnt'
mount_origin = '/tmp'
if six.PY2:
try:
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
mount_origin: {"bind": mount_dest, "ro": False}
}
)
except DeprecationWarning as e:
return
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
else:
self.fail('Expected a DeprecationWarning')
else:
with self.assertWarns(DeprecationWarning):
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
mount_origin: {"bind": mount_dest, "ro": False}
}
)
def test_start_container_with_port_binds(self):
self.maxDiff = None
if six.PY2:
try:
self.client.start(fake_api.FAKE_CONTAINER_ID, port_bindings={
1111: None,
2222: 2222,
'3333/udp': (3333,),
4444: ('127.0.0.1',),
5555: ('127.0.0.1', 5555),
6666: [('127.0.0.1',), ('192.168.0.1',)]
})
except DeprecationWarning as e:
return
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
else:
self.fail('Expected a DeprecationWarning')
else:
with self.assertWarns(DeprecationWarning):
self.client.start(fake_api.FAKE_CONTAINER_ID, port_bindings={
1111: None,
2222: 2222,
'3333/udp': (3333,),
4444: ('127.0.0.1',),
5555: ('127.0.0.1', 5555),
6666: [('127.0.0.1',), ('192.168.0.1',)]
})
def test_start_container_with_links(self):
# one link
link_path = 'path'
alias = 'alias'
if six.PY2:
try:
self.client.start(fake_api.FAKE_CONTAINER_ID,
links={link_path: alias})
except DeprecationWarning as e:
return
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
else:
self.fail('Expected a DeprecationWarning')
else:
with self.assertWarns(DeprecationWarning):
self.client.start(
fake_api.FAKE_CONTAINER_ID, links={link_path: alias}
)
def test_start_container_with_multiple_links(self):
link_path = 'path'
alias = 'alias'
if six.PY2:
try:
self.client.start(
fake_api.FAKE_CONTAINER_ID,
links={
link_path + '1': alias + '1',
link_path + '2': alias + '2'
}
)
except DeprecationWarning as e:
return
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
else:
self.fail('Expected a DeprecationWarning')
else:
with self.assertWarns(DeprecationWarning):
self.client.start(
fake_api.FAKE_CONTAINER_ID,
links={
link_path + '1': alias + '1',
link_path + '2': alias + '2'
}
)
def test_start_container_with_links_as_list_of_tuples(self):
# one link
link_path = 'path'
alias = 'alias'
if six.PY2:
try:
self.client.start(fake_api.FAKE_CONTAINER_ID,
links=[(link_path, alias)])
except DeprecationWarning as e:
return
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
else:
self.fail('Expected a DeprecationWarning')
else:
with self.assertWarns(DeprecationWarning):
self.client.start(fake_api.FAKE_CONTAINER_ID,
links=[(link_path, alias)])
def test_start_container_privileged(self):
if six.PY2:
try:
self.client.start(fake_api.FAKE_CONTAINER_ID, privileged=True)
except DeprecationWarning as e:
return
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
else:
self.fail('Expected a DeprecationWarning')
else:
with self.assertWarns(DeprecationWarning):
self.client.start(fake_api.FAKE_CONTAINER_ID, privileged=True)
def test_start_container_with_dict_instead_of_id(self):
try:
self.client.start({'Id': fake_api.FAKE_CONTAINER_ID})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0],
url_prefix + 'containers/3cc2351ab11b/start'
)
self.assertEqual(json.loads(args[1]['data']), {})
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_restart_policy(self):
try:
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
restart_policy={
"Name": "always",
"MaximumRetryCount": 0
}
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['RestartPolicy'] = {
"MaximumRetryCount": 0, "Name": "always"
}
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_added_capabilities(self):
try:
self.client.create_container(
'busybox', 'true',
host_config=create_host_config(cap_add=['MKNOD'])
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['CapAdd'] = ['MKNOD']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_dropped_capabilities(self):
try:
self.client.create_container(
'busybox', 'true',
host_config=create_host_config(cap_drop=['MKNOD'])
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['CapDrop'] = ['MKNOD']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_devices(self):
try:
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
devices=['/dev/sda:/dev/xvda:rwm',
'/dev/sdb:/dev/xvdb',
'/dev/sdc']
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['Devices'] = [
{'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/xvda',
'PathOnHost': '/dev/sda'},
{'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/xvdb',
'PathOnHost': '/dev/sdb'},
{'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/sdc',
'PathOnHost': '/dev/sdc'}
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_labels_dict(self):
labels_dict = {
six.text_type('foo'): six.text_type('1'),
six.text_type('bar'): six.text_type('2'),
}
try:
self.client.create_container(
'busybox', 'true',
labels=labels_dict,
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data'])['Labels'], labels_dict)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_labels_list(self):
labels_list = [
six.text_type('foo'),
six.text_type('bar'),
]
labels_dict = {
six.text_type('foo'): six.text_type(),
six.text_type('bar'): six.text_type(),
}
try:
self.client.create_container(
'busybox', 'true',
labels=labels_list,
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data'])['Labels'], labels_dict)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_named_volume(self):
try:
mount_dest = '/mnt'
volume_name = 'name'
self.client.create_container(
'busybox', 'true',
host_config=create_host_config(
binds={volume_name: {
"bind": mount_dest,
"ro": False
}}),
volume_driver='foodriver',
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['VolumeDriver'] = 'foodriver'
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['Binds'] = ["name:/mnt:rw"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_resize_container(self):
try:
self.client.resize(
{'Id': fake_api.FAKE_CONTAINER_ID},
height=15,
width=120
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/resize',
params={'h': 15, 'w': 120},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_rename_container(self):
try:
self.client.rename(
{'Id': fake_api.FAKE_CONTAINER_ID},
name='foobar'
)
except Exception as e:
self.fail('Command shold not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/rename',
params={'name': 'foobar'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_wait(self):
try:
self.client.wait(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/wait',
timeout=None
)
def test_wait_with_dict_instead_of_id(self):
try:
self.client.wait({'Id': fake_api.FAKE_CONTAINER_ID})
except Exception as e:
raise e
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/wait',
timeout=None
)
def _socket_path_for_client_session(self, client):
socket_adapter = client.get_adapter('http+docker://')
return socket_adapter.socket_path
def test_url_compatibility_unix(self):
c = docker.Client(base_url="unix://socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_unix_triple_slash(self):
c = docker.Client(base_url="unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http_unix_triple_slash(self):
c = docker.Client(base_url="http+unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http(self):
c = docker.Client(base_url="http://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_url_compatibility_tcp(self):
c = docker.Client(base_url="tcp://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_logs(self):
try:
with mock.patch('docker.Client.inspect_container',
fake_inspect_container):
logs = self.client.logs(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=False
)
self.assertEqual(
logs,
'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
)
def test_logs_with_dict_instead_of_id(self):
try:
with mock.patch('docker.Client.inspect_container',
fake_inspect_container):
logs = self.client.logs({'Id': fake_api.FAKE_CONTAINER_ID})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=False
)
self.assertEqual(
logs,
'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
)
def test_log_streaming(self):
try:
with mock.patch('docker.Client.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=True
)
def test_log_tail(self):
try:
with mock.patch('docker.Client.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
tail=10)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 10},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=False
)
def test_log_tty(self):
try:
m = mock.Mock()
with mock.patch('docker.Client.inspect_container',
fake_inspect_container_tty):
with mock.patch('docker.Client._stream_raw_result',
m):
self.client.logs(fake_api.FAKE_CONTAINER_ID,
stream=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
self.assertTrue(m.called)
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=True
)
def test_diff(self):
try:
self.client.diff(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/changes',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_diff_with_dict_instead_of_id(self):
try:
self.client.diff({'Id': fake_api.FAKE_CONTAINER_ID})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/changes',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_port(self):
try:
self.client.port({'Id': fake_api.FAKE_CONTAINER_ID}, 1111)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/json',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_stop_container(self):
timeout = 2
try:
self.client.stop(fake_api.FAKE_CONTAINER_ID, timeout=timeout)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/stop',
params={'t': timeout},
timeout=(DEFAULT_TIMEOUT_SECONDS + timeout)
)
def test_stop_container_with_dict_instead_of_id(self):
timeout = 2
try:
self.client.stop({'Id': fake_api.FAKE_CONTAINER_ID},
timeout=timeout)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/stop',
params={'t': timeout},
timeout=(DEFAULT_TIMEOUT_SECONDS + timeout)
)
def test_exec_create(self):
try:
self.client.exec_create(fake_api.FAKE_CONTAINER_ID, ['ls', '-1'])
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0], url_prefix + 'containers/{0}/exec'.format(
fake_api.FAKE_CONTAINER_ID
)
)
self.assertEqual(
json.loads(args[1]['data']), {
'Tty': False,
'AttachStdout': True,
'Container': fake_api.FAKE_CONTAINER_ID,
'Cmd': ['ls', '-1'],
'Privileged': False,
'AttachStdin': False,
'AttachStderr': True,
'User': ''
}
)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_exec_start(self):
try:
self.client.exec_start(fake_api.FAKE_EXEC_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0], url_prefix + 'exec/{0}/start'.format(
fake_api.FAKE_EXEC_ID
)
)
self.assertEqual(
json.loads(args[1]['data']), {
'Tty': False,
'Detach': False,
}
)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_exec_inspect(self):
try:
self.client.exec_inspect(fake_api.FAKE_EXEC_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0], url_prefix + 'exec/{0}/json'.format(
fake_api.FAKE_EXEC_ID
)
)
def test_exec_resize(self):
try:
self.client.exec_resize(fake_api.FAKE_EXEC_ID, height=20, width=60)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'exec/{0}/resize'.format(fake_api.FAKE_EXEC_ID),
params={'h': 20, 'w': 60},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_pause_container(self):
try:
self.client.pause(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/pause',
timeout=(DEFAULT_TIMEOUT_SECONDS)
)
def test_unpause_container(self):
try:
self.client.unpause(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/unpause',
timeout=(DEFAULT_TIMEOUT_SECONDS)
)
def test_kill_container(self):
try:
self.client.kill(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/kill',
params={},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_kill_container_with_dict_instead_of_id(self):
try:
self.client.kill({'Id': fake_api.FAKE_CONTAINER_ID})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/kill',
params={},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_kill_container_with_signal(self):
try:
self.client.kill(fake_api.FAKE_CONTAINER_ID, signal=signal.SIGTERM)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/kill',
params={'signal': signal.SIGTERM},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_restart_container(self):
try:
self.client.restart(fake_api.FAKE_CONTAINER_ID, timeout=2)
except Exception as e:
self.fail('Command should not raise exception : {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/restart',
params={'t': 2},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_restart_container_with_dict_instead_of_id(self):
try:
self.client.restart({'Id': fake_api.FAKE_CONTAINER_ID}, timeout=2)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/restart',
params={'t': 2},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_remove_container(self):
try:
self.client.remove_container(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': False, 'force': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_remove_container_with_dict_instead_of_id(self):
try:
self.client.remove_container({'Id': fake_api.FAKE_CONTAINER_ID})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': False, 'force': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_remove_link(self):
try:
self.client.remove_container(fake_api.FAKE_CONTAINER_ID, link=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': True, 'force': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_export(self):
try:
self.client.export(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/export',
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_export_with_dict_instead_of_id(self):
try:
self.client.export({'Id': fake_api.FAKE_CONTAINER_ID})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/export',
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_container(self):
try:
self.client.inspect_container(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/json',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_container_undefined_id(self):
for arg in None, '', {True: True}:
try:
self.client.inspect_container(arg)
except docker.errors.NullResource as e:
self.assertEqual(
e.args[0], 'image or container param is undefined'
)
else:
self.fail('Command expected NullResource exception')
def test_container_stats(self):
try:
self.client.stats(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/stats',
timeout=60,
stream=True
)
##################
# IMAGES TESTS #
##################
def test_pull(self):
try:
self.client.pull('joffrey/test001')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0],
url_prefix + 'images/create'
)
self.assertEqual(
args[1]['params'],
{'tag': None, 'fromImage': 'joffrey/test001'}
)
self.assertFalse(args[1]['stream'])
def test_pull_stream(self):
try:
self.client.pull('joffrey/test001', stream=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0],
url_prefix + 'images/create'
)
self.assertEqual(
args[1]['params'],
{'tag': None, 'fromImage': 'joffrey/test001'}
)
self.assertTrue(args[1]['stream'])
def test_commit(self):
try:
self.client.commit(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'commit',
data='{}',
headers={'Content-Type': 'application/json'},
params={
'repo': None,
'comment': None,
'tag': None,
'container': '3cc2351ab11b',
'author': None
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_remove_image(self):
try:
self.client.remove_image(fake_api.FAKE_IMAGE_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/e9aa60c60128',
params={'force': False, 'noprune': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_image_history(self):
try:
self.client.history(fake_api.FAKE_IMAGE_NAME)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/test_image/history',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_import_image(self):
try:
self.client.import_image(
fake_api.FAKE_TARBALL_PATH,
repository=fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/create',
params={
'repo': fake_api.FAKE_REPO_NAME,
'tag': fake_api.FAKE_TAG_NAME,
'fromSrc': fake_api.FAKE_TARBALL_PATH
},
data=None,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_import_image_from_bytes(self):
stream = (i for i in range(0, 100))
try:
self.client.import_image(
stream,
repository=fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/create',
params={
'repo': fake_api.FAKE_REPO_NAME,
'tag': fake_api.FAKE_TAG_NAME,
'fromSrc': '-',
},
headers={
'Content-Type': 'application/tar',
},
data=stream,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_import_image_from_image(self):
try:
self.client.import_image(
image=fake_api.FAKE_IMAGE_NAME,
repository=fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/create',
params={
'repo': fake_api.FAKE_REPO_NAME,
'tag': fake_api.FAKE_TAG_NAME,
'fromImage': fake_api.FAKE_IMAGE_NAME
},
data=None,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_image(self):
try:
self.client.inspect_image(fake_api.FAKE_IMAGE_NAME)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/test_image/json',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_image_undefined_id(self):
for arg in None, '', {True: True}:
try:
self.client.inspect_image(arg)
except docker.errors.NullResource as e:
self.assertEqual(
e.args[0], 'image or container param is undefined'
)
else:
self.fail('Command expected NullResource exception')
def test_insert_image(self):
try:
self.client.insert(fake_api.FAKE_IMAGE_NAME,
fake_api.FAKE_URL, fake_api.FAKE_PATH)
except docker.errors.DeprecatedMethod as e:
self.assertTrue(
docker.utils.compare_version('1.12', self.client._version) >= 0
)
return
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/test_image/insert',
params={
'url': fake_api.FAKE_URL,
'path': fake_api.FAKE_PATH
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_push_image(self):
try:
with mock.patch('docker.auth.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(fake_api.FAKE_IMAGE_NAME)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/test_image/push',
params={
'tag': None
},
data='{}',
headers={'Content-Type': 'application/json'},
stream=False,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_push_image_with_tag(self):
try:
with mock.patch('docker.auth.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(
fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/test_image/push',
params={
'tag': fake_api.FAKE_TAG_NAME,
},
data='{}',
headers={'Content-Type': 'application/json'},
stream=False,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_push_image_stream(self):
try:
with mock.patch('docker.auth.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(fake_api.FAKE_IMAGE_NAME, stream=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/test_image/push',
params={
'tag': None
},
data='{}',
headers={'Content-Type': 'application/json'},
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_tag_image(self):
try:
self.client.tag(fake_api.FAKE_IMAGE_ID, fake_api.FAKE_REPO_NAME)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/e9aa60c60128/tag',
params={
'tag': None,
'repo': 'repo',
'force': 0
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_tag_image_tag(self):
try:
self.client.tag(
fake_api.FAKE_IMAGE_ID,
fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/e9aa60c60128/tag',
params={
'tag': 'tag',
'repo': 'repo',
'force': 0
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_tag_image_force(self):
try:
self.client.tag(
fake_api.FAKE_IMAGE_ID, fake_api.FAKE_REPO_NAME, force=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/e9aa60c60128/tag',
params={
'tag': None,
'repo': 'repo',
'force': 1
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_get_image(self):
try:
self.client.get_image(fake_api.FAKE_IMAGE_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/e9aa60c60128/get',
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_load_image(self):
try:
self.client.load_image('Byte Stream....')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/load',
data='Byte Stream....',
timeout=DEFAULT_TIMEOUT_SECONDS
)
#################
# BUILDER TESTS #
#################
def test_build_container(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
try:
self.client.build(fileobj=script)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
def test_build_container_pull(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
try:
self.client.build(fileobj=script, pull=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
def test_build_container_stream(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
try:
self.client.build(fileobj=script, stream=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
def test_build_container_custom_context(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
context = docker.utils.mkbuildcontext(script)
try:
self.client.build(fileobj=context, custom_context=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
def test_build_container_custom_context_gzip(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
context = docker.utils.mkbuildcontext(script)
gz_context = gzip.GzipFile(fileobj=context)
try:
self.client.build(
fileobj=gz_context,
custom_context=True,
encoding="gzip"
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
def test_build_remote_with_registry_auth(self):
try:
self.client._auth_configs = {
'https://example.com': {
'user': 'example',
'password': 'example',
'email': '[email protected]'
}
}
self.client.build(path='https://github.com/docker-library/mongo')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
def test_build_container_with_named_dockerfile(self):
try:
self.client.build('.', dockerfile='nameddockerfile')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
def test_build_container_with_container_limits(self):
try:
self.client.build('.', container_limits={
'memory': 1024 * 1024,
'cpusetcpus': 1,
'cpushares': 1000,
'memswap': 1024 * 1024 * 8
})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
def test_build_container_invalid_container_limits(self):
self.assertRaises(
docker.errors.DockerException,
lambda: self.client.build('.', container_limits={
'foo': 'bar'
})
)
#######################
# PY SPECIFIC TESTS #
#######################
def test_load_config_no_file(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
cfg = docker.auth.load_config(folder)
self.assertTrue(cfg is not None)
def test_load_config(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, '.dockercfg')
with open(dockercfg_path, 'w') as f:
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
f.write('auth = {0}\n'.format(auth_))
f.write('email = [email protected]')
cfg = docker.auth.load_config(dockercfg_path)
self.assertTrue(docker.auth.INDEX_NAME in cfg)
self.assertNotEqual(cfg[docker.auth.INDEX_NAME], None)
cfg = cfg[docker.auth.INDEX_NAME]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], '[email protected]')
self.assertEqual(cfg.get('auth'), None)
def test_load_config_with_random_name(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder,
'.{0}.dockercfg'.format(
random.randrange(100000)))
registry = 'https://your.private.registry.io'
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
config = {
registry: {
'auth': '{0}'.format(auth_),
'email': '[email protected]'
}
}
with open(dockercfg_path, 'w') as f:
f.write(json.dumps(config))
cfg = docker.auth.load_config(dockercfg_path)
self.assertTrue(registry in cfg)
self.assertNotEqual(cfg[registry], None)
cfg = cfg[registry]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], '[email protected]')
self.assertEqual(cfg.get('auth'), None)
def test_tar_with_excludes(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
for d in ['test/foo', 'bar']:
os.makedirs(os.path.join(base, d))
for f in ['a.txt', 'b.py', 'other.png']:
with open(os.path.join(base, d, f), 'w') as f:
f.write("content")
for exclude, names in (
(['*.py'], ['bar', 'bar/a.txt', 'bar/other.png',
'test', 'test/foo', 'test/foo/a.txt',
'test/foo/other.png']),
(['*.png', 'bar'], ['test', 'test/foo', 'test/foo/a.txt',
'test/foo/b.py']),
(['test/foo', 'a.txt'], ['bar', 'bar/a.txt', 'bar/b.py',
'bar/other.png', 'test']),
):
with docker.utils.tar(base, exclude=exclude) as archive:
tar = tarfile.open(fileobj=archive)
self.assertEqual(sorted(tar.getnames()), names)
def test_tar_with_empty_directory(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
for d in ['foo', 'bar']:
os.makedirs(os.path.join(base, d))
with docker.utils.tar(base) as archive:
tar = tarfile.open(fileobj=archive)
self.assertEqual(sorted(tar.getnames()), ['bar', 'foo'])
def test_tar_with_file_symlinks(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
with open(os.path.join(base, 'foo'), 'w') as f:
f.write("content")
os.makedirs(os.path.join(base, 'bar'))
os.symlink('../foo', os.path.join(base, 'bar/foo'))
with docker.utils.tar(base) as archive:
tar = tarfile.open(fileobj=archive)
self.assertEqual(sorted(tar.getnames()), ['bar', 'bar/foo', 'foo'])
def test_tar_with_directory_symlinks(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
for d in ['foo', 'bar']:
os.makedirs(os.path.join(base, d))
os.symlink('../foo', os.path.join(base, 'bar/foo'))
with docker.utils.tar(base) as archive:
tar = tarfile.open(fileobj=archive)
self.assertEqual(sorted(tar.getnames()), ['bar', 'bar/foo', 'foo'])
#######################
# HOST CONFIG TESTS #
#######################
def test_create_host_config_secopt(self):
security_opt = ['apparmor:test_profile']
result = create_host_config(security_opt=security_opt)
self.assertIn('SecurityOpt', result)
self.assertEqual(result['SecurityOpt'], security_opt)
self.assertRaises(
docker.errors.DockerException, create_host_config,
security_opt='wrong'
)
class StreamTest(Cleanup, base.BaseTestCase):
def setUp(self):
socket_dir = tempfile.mkdtemp()
self.build_context = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, socket_dir)
self.addCleanup(shutil.rmtree, self.build_context)
self.socket_file = os.path.join(socket_dir, 'test_sock.sock')
self.server_socket = self._setup_socket()
self.stop_server = False
server_thread = threading.Thread(target=self.run_server)
server_thread.setDaemon(True)
server_thread.start()
self.response = None
self.request_handler = None
self.addCleanup(server_thread.join)
self.addCleanup(self.stop)
def stop(self):
self.stop_server = True
def _setup_socket(self):
server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server_sock.bind(self.socket_file)
# Non-blocking mode so that we can shut the test down easily
server_sock.setblocking(0)
server_sock.listen(5)
return server_sock
def run_server(self):
try:
while not self.stop_server:
try:
connection, client_address = self.server_socket.accept()
except socket.error:
# Probably no connection to accept yet
time.sleep(0.01)
continue
connection.setblocking(1)
try:
self.request_handler(connection)
finally:
connection.close()
finally:
self.server_socket.close()
def early_response_sending_handler(self, connection):
data = b''
headers = None
connection.sendall(self.response)
while not headers:
data += connection.recv(2048)
parts = data.split(b'\r\n\r\n', 1)
if len(parts) == 2:
headers, data = parts
mo = re.search(r'Content-Length: ([0-9]+)', headers.decode())
assert mo
content_length = int(mo.group(1))
while True:
if len(data) >= content_length:
break
data += connection.recv(2048)
def test_early_stream_response(self):
self.request_handler = self.early_response_sending_handler
lines = []
for i in range(0, 50):
line = str(i).encode()
lines += [('%x' % len(line)).encode(), line]
lines.append(b'0')
lines.append(b'')
self.response = (
b'HTTP/1.1 200 OK\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
) + b'\r\n'.join(lines)
with docker.Client(base_url="http+unix://" + self.socket_file) \
as client:
for i in range(5):
try:
stream = client.build(
path=self.build_context,
stream=True
)
break
except requests.ConnectionError as e:
if i == 4:
raise e
self.assertEqual(list(stream), [
str(i).encode() for i in range(50)])
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -3,702,290,130,594,926,600 | 35.400228 | 79 | 0.519364 | false |
tquilian/exelearningTest | twisted/scripts/tkmktap.py | 16 | 17390 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""Implementation module for the graphical version of the `mktap` command.
"""
# System imports
import Tkinter, tkMessageBox, tkFileDialog, StringIO, os
import traceback
# Twisted imports
from twisted.application import service
from twisted.internet import tksupport, reactor
from twisted.scripts import mktap
from twisted.python import usage, reflect
from twisted.copyright import version
class TkMkAppFrame(Tkinter.Frame):
"""
A frame with all the necessary widgets to configure a Twisted Application.
"""
# Plugin currently selected
coil = None
# Options instance currently displayed
options = None
# Frame options are displayed in
optFrame = None
def __init__(self, master, coil):
Tkinter.Frame.__init__(self, master)
self.setupMkTap()
self.reset(coil)
def setupMkTap(self):
# Create all of the "mktap" option widgets
appFrame = Tkinter.Frame(self)
f = Tkinter.Frame(appFrame)
listLabel = Tkinter.Label(f, text='TAp Format')
self.typeList = Tkinter.Listbox(f, background='white')
self.typeList['height'] = 3
for t in ('pickle', 'xml', 'source'):
self.typeList.insert(Tkinter.END, t)
self.typeList.selection_set(0)
listLabel.pack(side=Tkinter.TOP)
self.typeList.pack(side=Tkinter.TOP)
f.pack(side=Tkinter.LEFT, anchor=Tkinter.N)
f = Tkinter.Frame(appFrame)
tapLabel = Tkinter.Label(f, text='TAp Filename')
tapButton = Tkinter.Button(f, text="Choose", command=self.pickTapFile)
self.tapfile = Tkinter.Entry(f, background='white')
tapLabel.pack(side=Tkinter.LEFT)
self.tapfile.pack(side=Tkinter.LEFT)
tapButton.pack(side=Tkinter.LEFT)
f.pack(side=Tkinter.TOP, anchor=Tkinter.E)
f = Tkinter.Frame(appFrame)
nameLabel = Tkinter.Label(f, text='Application Process Name')
self.appname = Tkinter.Entry(f, background='white')
nameLabel.pack(side=Tkinter.LEFT)
self.appname.pack(side=Tkinter.LEFT)
f.pack(side=Tkinter.TOP, anchor=Tkinter.E)
f = Tkinter.Frame(appFrame)
encLabel = Tkinter.Label(f, text='Passphrase')
self.passphrase = Tkinter.Entry(f, background='white')
encLabel.pack(side=Tkinter.LEFT)
self.passphrase.pack(side=Tkinter.LEFT)
f.pack(side=Tkinter.TOP, anchor=Tkinter.E)
f = Tkinter.Frame(appFrame)
self.append = Tkinter.BooleanVar()
appLabel = Tkinter.Label(f, text='Append')
appButton = Tkinter.Checkbutton(f, variable=self.append)
appLabel.pack(side=Tkinter.LEFT)
appButton.pack(side=Tkinter.LEFT)
f.pack(side=Tkinter.LEFT, anchor=Tkinter.E)
f = Tkinter.Frame(appFrame)
s = Tkinter.StringVar()
s.set(not hasattr(os, 'getuid') and '0' or str(os.getuid()))
uidLabel = Tkinter.Label(f, text='UID')
self.uid = Tkinter.Entry(f, text=s, background='white')
uidLabel.pack(side=Tkinter.LEFT)
self.uid.pack(side=Tkinter.LEFT)
f.pack(side=Tkinter.BOTTOM)
f = Tkinter.Frame(appFrame)
s = Tkinter.StringVar()
s.set(not hasattr(os, 'getgid') and '0' or str(os.getgid()))
gidLabel = Tkinter.Label(f, text='GID')
self.gid = Tkinter.Entry(f, text=s, background='white')
gidLabel.pack(side=Tkinter.LEFT)
self.gid.pack(side=Tkinter.LEFT)
f.pack(side=Tkinter.BOTTOM)
appFrame.grid(row=0, column=0, columnspan=3, sticky=Tkinter.N + Tkinter.S)
def pickTapFile(self):
r = tkFileDialog.askopenfilename()
if r:
self.tapfile.delete(0, Tkinter.END)
self.tapfile.insert(Tkinter.END, r)
def reset(self, coil):
"""
Remove the existing coil-specific widgets and then create and add
new ones based on the given plugin object.
"""
if coil is self.coil:
return
try:
opt = coil.load().Options()
except:
f = StringIO.StringIO()
traceback.print_exc(file=f)
# XXX - Why is this so narrow?
tkMessageBox.showerror(title="Options Error", message=f.getvalue(), parent=self)
return
if self.optFrame:
self.optFrame.forget()
self.optFrame.destroy()
self.optFrame = None
self.coil = coil
self.options = opt
self.optFrame = TkConfigFrame(self, self.options)
self.optFrame.grid(row=1, column=0)
# self.tapfile.delete(0, Tkinter.END)
# try:
# self.tapfile.insert(Tkinter.END, self.coil.tapname)
# except AttributeError:
# self.tapfile.insert(Tkinter.END, self.coil.name)
def copyOptions(self):
# Snarf the data out of the widgets and place them into the Options
# instance.
extra = self.optFrame.updateConfig(self.options)
self.options['filename'] = self.tapfile.get()
self.options['appname'] = self.appname.get()
self.options['passphrase'] = self.passphrase.get()
self.options['append'] = self.append.get()
self.options['encrypted'] = len(self.options['passphrase'])
self.options['uid'] = int(self.uid.get())
self.options['gid'] = int(self.gid.get())
try:
self.options['type'] = self.typeList.curselection()[0]
except IndexError:
raise usage.UsageError("Select a TAp Format")
self.options['help'] = 0
if extra:
try:
# XXX - this is wrong. It needs to respect quotes, etc.
self.options.parseArgs(extra.split())
except TypeError:
raise usage.UsageError("Wrong number of extra arguments")
self.options.postOptions()
def createApplication(self):
if not self.options:
tkMessageBox.showerror(message="Select an Application first")
return
try:
self.copyOptions()
except usage.UsageError, e:
tkMessageBox.showerror(message=str(e))
return
exists = os.path.exists(self.options['filename'])
if self.options['append'] and exists:
a = service.loadApplication(
self.options['filename'],
self.options['type'],
self.options['passphrase']
)
else:
if exists:
overwrite = tkMessageBox.askyesno(title='File Exists', message='Overwrite?')
if not overwrite:
return
a = service.Application(self.coil.name, self.options['uid'], self.options['gid'])
try:
s = mktap.makeService(
self.coil.load(),
self.options['appname'],
self.options
)
except usage.UsageError:
f = StringIO.StringIO()
traceback.print_stack(file=f)
tkMessageBox.showerror(title="Usage Error", message=f.getvalue(), parent=self)
else:
try:
mktap.addToApplication(
s, self.coil.name, self.options['append'],
self.options['appname'], self.options['type'],
self.options['encrypted'], self.options['uid'],
self.options['gid'],
)
except:
f = StringIO.StringIO()
traceback.print_exc(file=f)
print f.getvalue()
tkMessageBox.showerror(title="Usage Error", message=f.getvalue(), parent=self)
else:
filename = self.options['filename']
if not filename:
filename = self.coil.name
tkMessageBox.showinfo(message="Wrote " + filename)
def destroy(self):
reactor.crash()
Tkinter.Frame.destroy(self)
#
# This class was written based on code from Drew "drewp" Pertulla
# (<drewp (at) bigasterisk (dot) com>) - without his help, tkmktap
# would be an ugly POS.
#
class ParameterLine(Tkinter.Frame):
def __init__(self, master, lines, label, desc, default, cmd, **kw):
Tkinter.Frame.__init__(self, master, relief='raised', bd=1, **kw)
self.lines = lines
l = Tkinter.Label(
self, text=label, wraplen=200,
width=30, anchor='w', justify='left'
)
s = Tkinter.StringVar()
if default:
s.set(default)
self.entry = Tkinter.Entry(self, text=s, background='white')
self.flag = label
more = Tkinter.Button(
self, text='+',
command=lambda f = cmd, a = label, b = default, c = desc: f(a, b, c)
)
l.pack(side=Tkinter.LEFT, fill='y')
self.entry.pack(side=Tkinter.LEFT)
more.pack(side=Tkinter.LEFT)
l.bind("<Enter>", self.highlight)
l.bind("<Leave>", self.unhighlight)
l.bind("<ButtonPress-1>", self.press)
l.bind("<B1-ButtonRelease>", self.release)
l.bind("<B1-Motion>", self.motion)
def highlight(self, ev, hicolor = 'gray90'):
# make the label light up when you mouseover
ev.widget._oldcolor = self.cget('bg')
ev.widget.config(bg=hicolor)
def unhighlight(self, ev):
# make the label return to its old setting
try:
ev.widget.config(bg=ev.widget._oldcolor)
del ev.widget._oldcolor
except:
pass
# make the frame change order when you drag it (by its label)
def press(self, ev):
# save old attrs
self._oldrelief = self.cget('relief'), self.cget('bd')
# thicken the border
self.config(relief='raised', bd=3)
def motion(self, ev):
this = self.lines.index(self)
framey = ev.y + self.winfo_y() # get mouse y coord in parent frame
replace = this # replace will be the index of the row to swap with
for i, l in zip(range(len(self.lines)), self.lines):
y1 = l.winfo_y()
y2 = y1 + l.winfo_height()
if y1 < framey < y2:
replace = i
if replace != this:
# we moved over another row-- swap them
self.lines[replace], self.lines[this] = self.lines[this], self.lines[replace]
# and re-assign all rows in the new order
for i, l in zip(range(len(self.lines)), self.lines):
l.grid(row=i, column=0)
def release(self, ev):
# restore the old border width
try:
rel, bd = self._oldrelief
self.config(relief=rel, bd=bd)
del self._oldrelief
except:
pass
class TkConfigFrame(Tkinter.Frame):
optFrame = None
paramFrame = None
commandFrame = None
subCmdFrame = None
previousCommand = None
optFlags = None
paramLines = None
def __init__(self, master, options):
Tkinter.Frame.__init__(self, master)
self.options = options
self.setupOptFlags()
self.setupOptParameters()
self.setupSubCommands()
self.setupExtra()
def getOptFlags(self):
return self.optFlags
def getOptParameters(self):
r = []
for p in self.paramLines:
r.append((p.flag, p.entry.get()))
return r
def updateConfig(self, options):
for (opt, var) in self.getOptFlags():
var = var.get()
if not var:
continue # XXX - this is poor - add a '-' button to remove options
f = getattr(options, 'opt_' + opt, None)
if f:
f()
else:
options[opt] = var
for (opt, var) in self.getOptParameters():
if not var:
continue # XXX - this is poor - add a '-' button to remove options
f = getattr(options, 'opt_' + opt, None)
if f:
f(var)
else:
options[opt] = var
return self.extra.get()
def setupOptFlags(self):
self.optFlags = []
flags = []
if hasattr(self.options, 'optFlags'):
flags.extend(self.options.optFlags)
d = {}
soFar = {}
for meth in reflect.prefixedMethodNames(self.options.__class__, 'opt_'):
full = 'opt_' + meth
func = getattr(self.options, full)
if not usage.flagFunction(func) or meth in ('help', 'version'):
continue
if soFar.has_key(func):
continue
soFar[func] = 1
existing = d.setdefault(func, meth)
if existing != meth:
if len(existing) < len(meth):
d[func] = meth
for (func, name) in d.items():
flags.append((name, None, func.__doc__))
if len(flags):
self.optFrame = f = Tkinter.Frame(self)
for (flag, _, desc) in flags:
b = Tkinter.BooleanVar()
c = Tkinter.Checkbutton(f, text=desc, variable=b, wraplen=200)
c.pack(anchor=Tkinter.W)
self.optFlags.append((flag, b))
f.grid(row=1, column=1)
def setupOptParameters(self):
params = []
if hasattr(self.options, 'optParameters'):
params.extend(self.options.optParameters)
d = {}
soFar = {}
for meth in reflect.prefixedMethodNames(self.options.__class__, 'opt_'):
full = 'opt_' + meth
func = getattr(self.options, full)
if usage.flagFunction(func) or soFar.has_key(func):
continue
soFar[func] = 1
existing = d.setdefault(func, meth)
if existing != meth:
if len(existing) < len(meth):
d[func] = meth
for (func, name) in d.items():
params.append((name, None, None, func.__doc__))
if len(params):
self.paramFrame = Tkinter.Frame(self)
self.paramLines = []
for (flag, _, default, desc) in params:
try:
default = self.options[flag]
except KeyError:
pass
self.makeField(flag, default, desc)
self.paramFrame.grid(row=1, column=2)
def makeField(self, flag, default, desc):
line = ParameterLine(
self.paramFrame, self.paramLines, flag, desc, default,
cmd=self.makeField
)
self.paramLines.append(line)
line.grid(row=len(self.paramLines), column=0)
def setupSubCommands(self):
self.optMap = {}
if hasattr(self.options, 'subCommands'):
self.commandFrame = f = Tkinter.Frame(self)
self.cmdList = Tkinter.Listbox(f)
for (cmd, _, opt, desc) in self.options.subCommands:
self.cmdList.insert(Tkinter.END, cmd)
self.optMap[cmd] = opt()
self.cmdList.pack()
self.subCmdPoll = reactor.callLater(0.1, self.pollSubCommands)
f.grid(row=1, column=3)
def setupExtra(self):
f = Tkinter.Frame(self)
l = Tkinter.Label(f, text='Extra Options')
self.extra = Tkinter.Entry(f, background='white')
l.pack()
self.extra.pack(fill='y')
f.grid(row=2, column=1, columnspan=2)
def pollSubCommands(self):
s = self.cmdList.curselection()
if len(s):
s = s[0]
if s != self.previousCommand:
if self.subOptFrame:
self.subOptFrame.forget()
self.subOptFrame.destroy()
self.subOptFrame = TkConfigFrame(self.commandFrame, self.optMap[s])
self.subOptFrame.pack()
self.subCmdPoll = reactor.callLater(0.1, self.pollSubCommands)
class TkAppMenu(Tkinter.Menu):
def __init__(self, master, create, callback, items):
Tkinter.Menu.__init__(self, master)
cmdMenu = Tkinter.Menu(self)
self.add_cascade(label="Actions", menu=cmdMenu)
cmdMenu.add_command(label='Create', command=create)
cmdMenu.add_separator()
cmdMenu.add_command(label='Quit', command=reactor.crash)
tapMenu = Tkinter.Menu(self)
self.add_cascade(label="Applications", menu=tapMenu)
for item in items:
tapMenu.add_command(
label=item, command=lambda i=item, c = callback: c(i)
)
def run():
taps = mktap.loadPlugins()
r = Tkinter.Tk()
r.withdraw()
keyList = taps.keys()
keyList.sort()
config = TkMkAppFrame(r, None)
menu = TkAppMenu(
r,
config.createApplication,
lambda i, d = taps, c = config: c.reset(d[i]),
keyList
)
config.pack()
r['menu'] = menu
r.title('Twisted Application Maker ' + version)
r.deiconify()
tksupport.install(r)
reactor.run()
if __name__ == '__main__':
run()
| gpl-2.0 | 2,626,503,716,499,862,500 | 30.164875 | 94 | 0.555492 | false |
jroltgen/aqua-gesture-framework | samples/SampleGesture/scons-local-1.3.0/SCons/Variables/BoolVariable.py | 5 | 3082 | """engine.SCons.Variables.BoolVariable
This file defines the option type for SCons implementing true/false values.
Usage example:
opts = Variables()
opts.Add(BoolVariable('embedded', 'build for an embedded system', 0))
...
if env['embedded'] == 1:
...
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Variables/BoolVariable.py 4720 2010/03/24 03:14:11 jars"
__all__ = ['BoolVariable',]
import string
import SCons.Errors
__true_strings = ('y', 'yes', 'true', 't', '1', 'on' , 'all' )
__false_strings = ('n', 'no', 'false', 'f', '0', 'off', 'none')
def _text2bool(val):
"""
Converts strings to True/False depending on the 'truth' expressed by
the string. If the string can't be converted, the original value
will be returned.
See '__true_strings' and '__false_strings' for values considered
'true' or 'false respectivly.
This is usable as 'converter' for SCons' Variables.
"""
lval = string.lower(val)
if lval in __true_strings: return True
if lval in __false_strings: return False
raise ValueError("Invalid value for boolean option: %s" % val)
def _validator(key, val, env):
"""
Validates the given value to be either '0' or '1'.
This is usable as 'validator' for SCons' Variables.
"""
if not env[key] in (True, False):
raise SCons.Errors.UserError(
'Invalid value for boolean option %s: %s' % (key, env[key]))
def BoolVariable(key, help, default):
"""
The input parameters describe a boolen option, thus they are
returned with the correct converter and validator appended. The
'help' text will by appended by '(yes|no) to show the valid
valued. The result is usable for input to opts.Add().
"""
return (key, '%s (yes|no)' % help, default,
_validator, _text2bool)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 | -2,273,738,489,320,727,300 | 32.868132 | 95 | 0.693381 | false |
LukeC92/iris | lib/iris/tests/unit/analysis/test_WPERCENTILE.py | 7 | 9671 | # (C) British Crown Copyright 2015 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the :data:`iris.analysis.PERCENTILE` aggregator."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
import numpy.ma as ma
from iris.analysis import WPERCENTILE
class Test_aggregate(tests.IrisTest):
def test_missing_mandatory_kwargs(self):
emsg = "weighted_percentile aggregator requires " \
".* keyword argument 'percent'"
with self.assertRaisesRegexp(ValueError, emsg):
WPERCENTILE.aggregate('dummy', axis=0, weights=None)
emsg = "weighted_percentile aggregator requires " \
".* keyword argument 'weights'"
with self.assertRaisesRegexp(ValueError, emsg):
WPERCENTILE.aggregate('dummy', axis=0, percent=50)
def test_wrong_weights_shape(self):
data = np.arange(11)
weights = np.ones(10)
emsg = "_weighted_percentile: weights wrong shape."
with self.assertRaisesRegexp(ValueError, emsg):
WPERCENTILE.aggregate(data, axis=0, percent=50, weights=weights)
def test_1d_single(self):
data = np.arange(11)
weights = np.ones(data.shape)
actual = WPERCENTILE.aggregate(data, axis=0, percent=50,
weights=weights)
expected = 5
self.assertTupleEqual(actual.shape, ())
self.assertEqual(actual, expected)
def test_1d_single_unequal(self):
data = np.arange(12)
weights = np.ones(data.shape)
weights[0:3] = 3
actual, weight_total = WPERCENTILE.aggregate(
data, axis=0, percent=50, weights=weights, returned=True)
expected = 2.75
self.assertTupleEqual(actual.shape, ())
self.assertEqual(actual, expected)
self.assertEqual(weight_total, 18)
def test_masked_1d_single(self):
data = ma.arange(11)
weights = np.ones(data.shape)
data[3:7] = ma.masked
actual = WPERCENTILE.aggregate(data, axis=0, percent=50,
weights=weights)
expected = 7
self.assertTupleEqual(actual.shape, ())
self.assertEqual(actual, expected)
def test_1d_multi(self):
data = np.arange(11)
weights = np.ones(data.shape)
percent = np.array([20, 50, 90])
actual = WPERCENTILE.aggregate(data, axis=0, percent=percent,
weights=weights)
expected = [1.7, 5, 9.4]
self.assertTupleEqual(actual.shape, percent.shape)
self.assertArrayAlmostEqual(actual, expected)
def test_1d_multi_unequal(self):
data = np.arange(13)
weights = np.ones(data.shape)
weights[1::2] = 3
percent = np.array([20, 50, 96])
actual = WPERCENTILE.aggregate(data, axis=0, percent=percent,
weights=weights)
expected = [2.25, 6, 11.75]
self.assertTupleEqual(actual.shape, percent.shape)
self.assertArrayAlmostEqual(actual, expected)
def test_masked_1d_multi(self):
data = ma.arange(11)
weights = np.ones(data.shape)
data[3:9] = ma.masked
percent = np.array([25, 50, 75])
actual = WPERCENTILE.aggregate(data, axis=0, percent=percent,
weights=weights)
expected = [0.75, 2, 9.25]
self.assertTupleEqual(actual.shape, percent.shape)
self.assertArrayAlmostEqual(actual, expected)
def test_2d_single(self):
shape = (2, 11)
data = np.arange(np.prod(shape)).reshape(shape)
weights = np.ones(shape)
actual = WPERCENTILE.aggregate(data, axis=0, percent=50,
weights=weights)
self.assertTupleEqual(actual.shape, shape[-1:])
expected = np.arange(shape[-1]) + 5.5
self.assertArrayEqual(actual, expected)
def test_masked_2d_single(self):
shape = (2, 11)
data = ma.arange(np.prod(shape)).reshape(shape)
data[0, ::2] = ma.masked
data[1, 1::2] = ma.masked
weights = np.ones(shape)
actual = WPERCENTILE.aggregate(data, axis=0, percent=50,
weights=weights)
self.assertTupleEqual(actual.shape, shape[-1:])
expected = np.empty(shape[-1:])
expected[1::2] = data[0, 1::2]
expected[::2] = data[1, ::2]
self.assertArrayEqual(actual, expected)
def test_2d_multi(self):
shape = (2, 10)
data = np.arange(np.prod(shape)).reshape(shape)
weights = np.ones(shape)
percent = np.array([10, 50, 70, 100])
actual = WPERCENTILE.aggregate(data, axis=0, percent=percent,
weights=weights)
self.assertTupleEqual(actual.shape, (shape[-1], percent.size))
expected = np.tile(np.arange(shape[-1]), percent.size).astype('f8')
expected = expected.reshape(percent.size, shape[-1]).T
expected[:, 1:-1] += (percent[1:-1]-25)*0.2
expected[:, -1] += 10.
self.assertArrayAlmostEqual(actual, expected)
def test_masked_2d_multi(self):
shape = (3, 10)
data = ma.arange(np.prod(shape)).reshape(shape)
weights = np.ones(shape)
data[1] = ma.masked
percent = np.array([10, 50, 70, 80])
actual = WPERCENTILE.aggregate(data, axis=0, percent=percent,
weights=weights)
self.assertTupleEqual(actual.shape, (shape[-1], percent.size))
expected = np.tile(np.arange(shape[-1]), percent.size).astype('f8')
expected = expected.reshape(percent.size, shape[-1]).T
expected[:, 1:-1] += (percent[1:-1]-25)*0.4
expected[:, -1] += 20.
self.assertArrayAlmostEqual(actual, expected)
def test_masked_2d_multi_unequal(self):
shape = (3, 10)
data = ma.arange(np.prod(shape)).reshape(shape)
weights = np.ones(shape)
weights[0] = 3
data[1] = ma.masked
percent = np.array([30, 50, 75, 80])
actual, weight_total = WPERCENTILE.aggregate(
data, axis=0, percent=percent, weights=weights, returned=True)
self.assertTupleEqual(actual.shape, (shape[-1], percent.size))
expected = np.tile(np.arange(shape[-1]), percent.size)
expected = expected.reshape(percent.size, shape[-1]).T
expected[:, 1:] = 2.0 * (
(0.875 - percent[1:]/100.0) * data[0, np.newaxis].T +
(percent[1:]/100.0 - 0.375) * data[-1, np.newaxis].T
)
self.assertArrayAlmostEqual(actual, expected)
self.assertTupleEqual(weight_total.shape, (shape[-1],))
self.assertArrayEqual(weight_total, np.repeat(4, shape[-1]))
class Test_name(tests.IrisTest):
def test(self):
self.assertEqual(WPERCENTILE.name(), 'weighted_percentile')
class Test_aggregate_shape(tests.IrisTest):
def test_missing_mandatory_kwarg(self):
emsg_pc = "weighted_percentile aggregator requires " \
".* keyword argument 'percent'"
emsg_wt = "weighted_percentile aggregator requires " \
".* keyword argument 'weights'"
with self.assertRaisesRegexp(ValueError, emsg_pc):
WPERCENTILE.aggregate_shape(weights=None)
with self.assertRaisesRegexp(ValueError, emsg_pc):
kwargs = dict(weights=None)
WPERCENTILE.aggregate_shape(**kwargs)
with self.assertRaisesRegexp(ValueError, emsg_pc):
kwargs = dict(point=10)
WPERCENTILE.aggregate_shape(**kwargs)
with self.assertRaisesRegexp(ValueError, emsg_wt):
WPERCENTILE.aggregate_shape(percent=50)
with self.assertRaisesRegexp(ValueError, emsg_wt):
kwargs = dict(percent=50)
WPERCENTILE.aggregate_shape(**kwargs)
with self.assertRaisesRegexp(ValueError, emsg_wt):
kwargs = dict(percent=50, weight=None)
WPERCENTILE.aggregate_shape(**kwargs)
def test_mandatory_kwarg_no_shape(self):
kwargs = dict(percent=50, weights=None)
self.assertTupleEqual(WPERCENTILE.aggregate_shape(**kwargs), ())
kwargs = dict(percent=[50], weights=None)
self.assertTupleEqual(WPERCENTILE.aggregate_shape(**kwargs), ())
def test_mandatory_kwarg_shape(self):
kwargs = dict(percent=(10, 20), weights=None)
self.assertTupleEqual(WPERCENTILE.aggregate_shape(**kwargs), (2,))
kwargs = dict(percent=range(13), weights=None)
self.assertTupleEqual(WPERCENTILE.aggregate_shape(**kwargs), (13,))
class Test_cell_method(tests.IrisTest):
def test(self):
self.assertIsNone(WPERCENTILE.cell_method)
if __name__ == "__main__":
tests.main()
| lgpl-3.0 | -8,163,767,714,099,672,000 | 40.506438 | 76 | 0.612553 | false |
prasadtalasila/INET-Vagrant-Demos | Nonce_Demo/impacket-0.9.12/impacket/crypto.py | 3 | 18139 | # Copyright (c) 2003-2012 CORE Security Technologies)
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# $Id: crypto.py 915 2013-11-09 22:47:13Z bethus $
#
# Author: Alberto Solino ([email protected])
#
# Description:
# RFC 4493 implementation (http://www.ietf.org/rfc/rfc4493.txt)
# RFC 4615 implementation (http://www.ietf.org/rfc/rfc4615.txt)
#
# NIST SP 800-108 Section 5.1, with PRF HMAC-SHA256 implementation
# (http://tools.ietf.org/html/draft-irtf-cfrg-kdf-uses-00#ref-SP800-108)
#
# [MS-LSAD] Section 5.1.2
# [MS-SAMR] Section 2.2.11.1.1
try:
from Crypto.Cipher import DES, AES, ARC4
except Exception:
print "Warning: You don't have any crypto installed. You need PyCrypto"
print "See http://www.pycrypto.org/"
from struct import pack, unpack
from impacket.structure import Structure
from impacket import ntlm
import hmac, hashlib
def Generate_Subkey(K):
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# + Algorithm Generate_Subkey +
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# + +
# + Input : K (128-bit key) +
# + Output : K1 (128-bit first subkey) +
# + K2 (128-bit second subkey) +
# +-------------------------------------------------------------------+
# + +
# + Constants: const_Zero is 0x00000000000000000000000000000000 +
# + const_Rb is 0x00000000000000000000000000000087 +
# + Variables: L for output of AES-128 applied to 0^128 +
# + +
# + Step 1. L := AES-128(K, const_Zero); +
# + Step 2. if MSB(L) is equal to 0 +
# + then K1 := L << 1; +
# + else K1 := (L << 1) XOR const_Rb; +
# + Step 3. if MSB(K1) is equal to 0 +
# + then K2 := K1 << 1; +
# + else K2 := (K1 << 1) XOR const_Rb; +
# + Step 4. return K1, K2; +
# + +
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
AES_128 = AES.new(K)
L = AES_128.encrypt('\x00'*16)
LHigh = unpack('>Q',L[:8])[0]
LLow = unpack('>Q',L[8:])[0]
K1High = ((LHigh << 1) | ( LLow >> 63 )) & 0xFFFFFFFFFFFFFFFF
K1Low = (LLow << 1) & 0xFFFFFFFFFFFFFFFF
if (LHigh >> 63):
K1Low ^= 0x87
K2High = ((K1High << 1) | (K1Low >> 63)) & 0xFFFFFFFFFFFFFFFF
K2Low = ((K1Low << 1)) & 0xFFFFFFFFFFFFFFFF
if (K1High >> 63):
K2Low ^= 0x87
K1 = pack('>QQ', K1High, K1Low)
K2 = pack('>QQ', K2High, K2Low)
return K1, K2
def XOR_128(N1,N2):
J = ''
for i in range(len(N1)):
J = J + chr(ord(N1[i]) ^ ord(N2[i]))
return J
def PAD(N):
const_Bsize = 16
padLen = 16-len(N)
return N + '\x80' + '\x00'*(padLen-1)
def AES_CMAC(K, M, length):
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# + Algorithm AES-CMAC +
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# + +
# + Input : K ( 128-bit key ) +
# + : M ( message to be authenticated ) +
# + : len ( length of the message in octets ) +
# + Output : T ( message authentication code ) +
# + +
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# + Constants: const_Zero is 0x00000000000000000000000000000000 +
# + const_Bsize is 16 +
# + +
# + Variables: K1, K2 for 128-bit subkeys +
# + M_i is the i-th block (i=1..ceil(len/const_Bsize)) +
# + M_last is the last block xor-ed with K1 or K2 +
# + n for number of blocks to be processed +
# + r for number of octets of last block +
# + flag for denoting if last block is complete or not +
# + +
# + Step 1. (K1,K2) := Generate_Subkey(K); +
# + Step 2. n := ceil(len/const_Bsize); +
# + Step 3. if n = 0 +
# + then +
# + n := 1; +
# + flag := false; +
# + else +
# + if len mod const_Bsize is 0 +
# + then flag := true; +
# + else flag := false; +
# + +
# + Step 4. if flag is true +
# + then M_last := M_n XOR K1; +
# + else M_last := padding(M_n) XOR K2; +
# + Step 5. X := const_Zero; +
# + Step 6. for i := 1 to n-1 do +
# + begin +
# + Y := X XOR M_i; +
# + X := AES-128(K,Y); +
# + end +
# + Y := M_last XOR X; +
# + T := AES-128(K,Y); +
# + Step 7. return T; +
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
const_Bsize = 16
const_Zero = '\x00'*16
AES_128= AES.new(K)
M = M[:length]
K1, K2 = Generate_Subkey(K)
n = len(M)/const_Bsize
if n == 0:
n = 1
flag = False
else:
if (length % const_Bsize) == 0:
flag = True
else:
n += 1
flag = False
M_n = M[(n-1)*const_Bsize:]
if flag is True:
M_last = XOR_128(M_n,K1)
else:
M_last = XOR_128(PAD(M_n),K2)
X = const_Zero
for i in range(n-1):
M_i = M[(i)*const_Bsize:][:16]
Y = XOR_128(X, M_i)
X = AES_128.encrypt(Y)
Y = XOR_128(M_last, X)
T = AES_128.encrypt(Y)
return T
def AES_CMAC_PRF_128(VK, M, VKlen, Mlen):
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# + AES-CMAC-PRF-128 +
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# + +
# + Input : VK (Variable-length key) +
# + : M (Message, i.e., the input data of the PRF) +
# + : VKlen (length of VK in octets) +
# + : len (length of M in octets) +
# + Output : PRV (128-bit Pseudo-Random Variable) +
# + +
# +-------------------------------------------------------------------+
# + Variable: K (128-bit key for AES-CMAC) +
# + +
# + Step 1. If VKlen is equal to 16 +
# + Step 1a. then +
# + K := VK; +
# + Step 1b. else +
# + K := AES-CMAC(0^128, VK, VKlen); +
# + Step 2. PRV := AES-CMAC(K, M, len); +
# + return PRV; +
# + +
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
if VKlen == 16:
K = VK
else:
K = AES_CMAC('\x00'*16, VK, VKlen)
PRV = AES_CMAC(K, M, Mlen)
return PRV
def KDF_CounterMode(KI, Label, Context, L):
# Implements NIST SP 800-108 Section 5.1, with PRF HMAC-SHA256
# http://tools.ietf.org/html/draft-irtf-cfrg-kdf-uses-00#ref-SP800-108
# Fixed values:
# 1. h - The length of the output of the PRF in bits, and
# 2. r - The length of the binary representation of the counter i.
# Input: KI, Label, Context, and L.
# Process:
# 1. n := [L/h]
# 2. If n > 2r-1, then indicate an error and stop.
# 3. result(0):= empty .
# 4. For i = 1 to n, do
# a. K(i) := PRF (KI, [i]2 || Label || 0x00 || Context || [L]2)
# b. result(i) := result(i-1) || K(i).
# 5. Return: KO := the leftmost L bits of result(n).
h = 256
r = 32
n = L / h
if n == 0:
n = 1
if n > (pow(2,r)-1):
raise "Error computing KDF_CounterMode"
result = ''
K = ''
for i in range(1,n+1):
input = pack('>L', i) + Label + '\x00' + Context + pack('>L',L)
K = hmac.new(KI, input, hashlib.sha256).digest()
result = result + K
return result[:(L/8)]
# [MS-LSAD] Section 5.1.2 / 5.1.3
class LSA_SECRET_XP(Structure):
structure = (
('Length','<L=0'),
('Version','<L=0'),
('_Secret','_-Secret', 'self["Length"]'),
('Secret', ':'),
)
def transformKey(InputKey):
# Section 5.1.3
OutputKey = []
OutputKey.append( chr(ord(InputKey[0]) >> 0x01) )
OutputKey.append( chr(((ord(InputKey[0])&0x01)<<6) | (ord(InputKey[1])>>2)) )
OutputKey.append( chr(((ord(InputKey[1])&0x03)<<5) | (ord(InputKey[2])>>3)) )
OutputKey.append( chr(((ord(InputKey[2])&0x07)<<4) | (ord(InputKey[3])>>4)) )
OutputKey.append( chr(((ord(InputKey[3])&0x0F)<<3) | (ord(InputKey[4])>>5)) )
OutputKey.append( chr(((ord(InputKey[4])&0x1F)<<2) | (ord(InputKey[5])>>6)) )
OutputKey.append( chr(((ord(InputKey[5])&0x3F)<<1) | (ord(InputKey[6])>>7)) )
OutputKey.append( chr(ord(InputKey[6]) & 0x7F) )
for i in range(8):
OutputKey[i] = chr((ord(OutputKey[i]) << 1) & 0xfe)
return "".join(OutputKey)
def decryptSecret(key, value):
# [MS-LSAD] Section 5.1.2
plainText = ''
key0 = key
for i in range(0, len(value), 8):
cipherText = value[:8]
tmpStrKey = key0[:7]
tmpKey = transformKey(tmpStrKey)
Crypt1 = DES.new(tmpKey, DES.MODE_ECB)
plainText += Crypt1.decrypt(cipherText)
cipherText = cipherText[8:]
key0 = key0[7:]
value = value[8:]
# AdvanceKey
if len(key0) < 7:
key0 = key[len(key0):]
secret = LSA_SECRET_XP(plainText)
return (secret['Secret'])
def encryptSecret(key, value):
# [MS-LSAD] Section 5.1.2
plainText = ''
cipherText = ''
key0 = key
value0 = pack('<LL', len(value), 1) + value
for i in range(0, len(value0), 8):
if len(value0) < 8:
value0 = value0 + '\x00'*(8-len(value0))
plainText = value0[:8]
tmpStrKey = key0[:7]
tmpKey = transformKey(tmpStrKey)
Crypt1 = DES.new(tmpKey, DES.MODE_ECB)
cipherText += Crypt1.encrypt(plainText)
plainText = plainText[8:]
key0 = key0[7:]
value0 = value0[8:]
# AdvanceKey
if len(key0) < 7:
key0 = key[len(key0):]
return cipherText
def SamDecryptNTLMHash(encryptedHash, key):
# [MS-SAMR] Section 2.2.11.1.1
Block1 = encryptedHash[:8]
Block2 = encryptedHash[8:]
Key1 = key[:7]
Key1 = transformKey(Key1)
Key2 = key[7:14]
Key2 = transformKey(Key2)
Crypt1 = DES.new(Key1, DES.MODE_ECB)
Crypt2 = DES.new(Key2, DES.MODE_ECB)
plain1 = Crypt1.decrypt(Block1)
plain2 = Crypt2.decrypt(Block2)
return plain1 + plain2
def SamEncryptNTLMHash(encryptedHash, key):
# [MS-SAMR] Section 2.2.11.1.1
Block1 = encryptedHash[:8]
Block2 = encryptedHash[8:]
Key1 = key[:7]
Key1 = transformKey(Key1)
Key2 = key[7:14]
Key2 = transformKey(Key2)
Crypt1 = DES.new(Key1, DES.MODE_ECB)
Crypt2 = DES.new(Key2, DES.MODE_ECB)
plain1 = Crypt1.encrypt(Block1)
plain2 = Crypt2.encrypt(Block2)
return plain1 + plain2
if __name__ == '__main__':
# Test Vectors
# --------------------------------------------------
# Subkey Generation
# K 2b7e1516 28aed2a6 abf71588 09cf4f3c
# AES-128(key,0) 7df76b0c 1ab899b3 3e42f047 b91b546f
# K1 fbeed618 35713366 7c85e08f 7236a8de
# K2 f7ddac30 6ae266cc f90bc11e e46d513b
# --------------------------------------------------
#
# --------------------------------------------------
# Example 1: len = 0
# M <empty string>
# AES-CMAC bb1d6929 e9593728 7fa37d12 9b756746
# --------------------------------------------------
#
# Example 2: len = 16
# M 6bc1bee2 2e409f96 e93d7e11 7393172a
# AES-CMAC 070a16b4 6b4d4144 f79bdd9d d04a287c
# --------------------------------------------------
#
# Example 3: len = 40
# M 6bc1bee2 2e409f96 e93d7e11 7393172a
# ae2d8a57 1e03ac9c 9eb76fac 45af8e51
# 30c81c46 a35ce411
# AES-CMAC dfa66747 de9ae630 30ca3261 1497c827
# --------------------------------------------------
#
# Example 4: len = 64
# M 6bc1bee2 2e409f96 e93d7e11 7393172a
# ae2d8a57 1e03ac9c 9eb76fac 45af8e51
# 30c81c46 a35ce411 e5fbc119 1a0a52ef
# f69f2445 df4f9b17 ad2b417b e66c3710
# AES-CMAC 51f0bebf 7e3b9d92 fc497417 79363cfe
# --------------------------------------------------
def pp(s):
for i in range((len(s)/8)):
print s[:8] ,
s = s[8:]
return ''
from binascii import hexlify, unhexlify
K = "2b7e151628aed2a6abf7158809cf4f3c"
M = "6bc1bee22e409f96e93d7e117393172aae2d8a571e03ac9c9eb76fac45af8e5130c81c46a35ce411e5fbc1191a0a52eff69f2445df4f9b17ad2b417be66c3710"
K1, K2 = Generate_Subkey(unhexlify(K))
print "Subkey Generation"
print "K ", pp(K)
print "K1 ", pp(hexlify(K1))
print "K2 ", pp(hexlify(K2))
print
print "Example 1: len = 0"
print "M <empty string>"
print "AES-CMAC " , pp(hexlify(AES_CMAC(unhexlify(K),unhexlify(M),0)))
print
print "Example 2: len = 16"
print "M " , pp(M[:16*2])
print "AES-CMAC " , pp(hexlify(AES_CMAC(unhexlify(K),unhexlify(M),16)))
print
print "Example 3: len = 40"
print "M " , pp(M[:40*2])
print "AES-CMAC " , pp(hexlify(AES_CMAC(unhexlify(K),unhexlify(M),40)))
print
print "Example 3: len = 64"
print "M " , pp(M[:64*2])
print "AES-CMAC " , pp(hexlify(AES_CMAC(unhexlify(K),unhexlify(M),64)))
print
M = "eeab9ac8fb19cb012849536168b5d6c7a5e6c5b2fcdc32bc29b0e3654078a5129f6be2562046766f93eebf146b"
K = "6c3473624099e17ff3a39ff6bdf6cc38"
# Mac = dbf63fd93c4296609e2d66bf79251cb5
print "Example 4: len = 45"
print "M " , pp(M[:45*2])
print "AES-CMAC " , pp(hexlify(AES_CMAC(unhexlify(K),unhexlify(M),45)))
# ------------------------------------------------------------
#
# Test Case AES-CMAC-PRF-128 with 20-octet input
# Key : 00010203 04050607 08090a0b 0c0d0e0f edcb
# Key Length : 18
# Message : 00010203 04050607 08090a0b 0c0d0e0f 10111213
# PRF Output : 84a348a4 a45d235b abfffc0d 2b4da09a
#
# Test Case AES-CMAC-PRF-128 with 20-octet input
# Key : 00010203 04050607 08090a0b 0c0d0e0f
# Key Length : 16
# Message : 00010203 04050607 08090a0b 0c0d0e0f 10111213
# PRF Output : 980ae87b 5f4c9c52 14f5b6a8 455e4c2d
#
# Test Case AES-CMAC-PRF-128 with 20-octet input
# Key : 00010203 04050607 0809
# Key Length : 10
# Message : 00010203 04050607 08090a0b 0c0d0e0f 10111213
# PRF Output : 290d9e11 2edb09ee 141fcf64 c0b72f3d
#
# ------------------------------------------------------------
K = "000102030405060708090a0b0c0d0e0fedcb"
M = "000102030405060708090a0b0c0d0e0f10111213"
print "AES-CMAC-PRF-128 Test Vectors"
print
print "Example 1: len = 0"
print "M " , pp(K)
print "Key Length 18 "
print "AES-CMAC " , pp(hexlify(AES_CMAC_PRF_128(unhexlify(K),unhexlify(M),18,len(unhexlify(M)))))
print
print "Example 1: len = 0"
print "M " , pp(K)
print "Key Length 16 "
print "AES-CMAC " , pp(hexlify(AES_CMAC_PRF_128(unhexlify(K)[:16],unhexlify(M),16,len(unhexlify(M)))))
print
print "Example 1: len = 0"
print "M " , pp(K)
print "Key Length 10 "
print "AES-CMAC " , pp(hexlify(AES_CMAC_PRF_128(unhexlify(K)[:10],unhexlify(M),10,len(unhexlify(M)))))
print
| gpl-2.0 | -2,276,810,774,761,027,000 | 37.348837 | 138 | 0.423452 | false |
LFPy/LFPy | LFPy/test/test_imem.py | 1 | 3578 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Copyright (C) 2020 Computational Neuroscience Group, NMBU.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
import unittest
import os
import numpy as np
import LFPy
class testImem(unittest.TestCase):
def test_cell_v_init_00(self):
cell = LFPy.Cell(morphology=os.path.join(LFPy.__path__[0], 'test',
'ball_and_sticks.hoc'),
cm=1.,
Ra=150.,
passive=True,
)
cell.simulate(rec_imem=True)
np.testing.assert_allclose(
cell.imem, np.zeros_like(
cell.imem), atol=1E-10)
np.testing.assert_allclose(cell.somav, cell.v_init)
def test_cell_v_init_01(self):
cell = LFPy.Cell(morphology=os.path.join(LFPy.__path__[0], 'test',
'ball_and_sticks.hoc'),
cm=1.,
Ra=150.,
v_init=-70.,
passive=True,
passive_parameters=dict(e_pas=-70, g_pas=0.001)
)
cell.simulate(rec_imem=True)
np.testing.assert_allclose(
cell.imem, np.zeros_like(
cell.imem), atol=1E-10)
np.testing.assert_allclose(cell.somav, cell.v_init)
def test_cell_v_init_02(self):
cell = LFPy.Cell(morphology=os.path.join(LFPy.__path__[0], 'test',
'ball_and_sticks.hoc'),
cm=1.,
Ra=150.,
v_init=0.,
passive=True,
passive_parameters=dict(e_pas=0., g_pas=0.001)
)
cell.simulate(rec_imem=True)
np.testing.assert_allclose(
cell.imem, np.zeros_like(
cell.imem), atol=1E-10)
np.testing.assert_allclose(cell.somav, cell.v_init)
def test_cell_v_init_03(self):
cell = LFPy.Cell(morphology=os.path.join(LFPy.__path__[0], 'test',
'ball_and_sticks.hoc'),
cm=1.,
Ra=150.,
passive=False,
)
cell.simulate(rec_imem=True)
np.testing.assert_allclose(
cell.imem, np.zeros_like(
cell.imem), atol=1E-10)
np.testing.assert_allclose(cell.somav, cell.v_init)
def test_cell_v_init_04(self):
cell = LFPy.Cell(morphology=os.path.join(LFPy.__path__[0], 'test',
'ball_and_sticks.hoc'),
cm=1.,
Ra=150.,
v_init=0.,
passive=False,
)
cell.simulate(rec_imem=True)
np.testing.assert_allclose(
cell.imem, np.zeros_like(
cell.imem), atol=1E-10)
np.testing.assert_allclose(cell.somav, cell.v_init)
| gpl-3.0 | -1,022,368,455,699,087,500 | 36.663158 | 74 | 0.486864 | false |
esikachev/sahara-backup | sahara/plugins/cdh/v5_3_0/edp_engine.py | 2 | 2917 | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh import confighints_helper as ch_helper
from sahara.plugins.cdh.v5_3_0 import cloudera_utils as cu
from sahara.plugins import exceptions as ex
from sahara.plugins import utils as u
from sahara.service.edp import hdfs_helper
from sahara.service.edp.oozie import engine as edp_engine
from sahara.utils import edp
CU = cu.ClouderaUtilsV530()
class EdpOozieEngine(edp_engine.OozieJobEngine):
def get_hdfs_user(self):
return 'hdfs'
def create_hdfs_dir(self, remote, dir_name):
hdfs_helper.create_dir_hadoop2(remote, dir_name, self.get_hdfs_user())
def get_oozie_server_uri(self, cluster):
oozie_ip = CU.pu.get_oozie(cluster).management_ip
return 'http://%s:11000/oozie' % oozie_ip
def get_name_node_uri(self, cluster):
namenode_ip = CU.pu.get_namenode(cluster).fqdn()
return 'hdfs://%s:8020' % namenode_ip
def get_resource_manager_uri(self, cluster):
resourcemanager_ip = CU.pu.get_resourcemanager(cluster).fqdn()
return '%s:8032' % resourcemanager_ip
def get_oozie_server(self, cluster):
return CU.pu.get_oozie(cluster)
def validate_job_execution(self, cluster, job, data):
oo_count = u.get_instances_count(cluster, 'OOZIE_SERVER')
if oo_count != 1:
raise ex.InvalidComponentCountException(
'OOZIE_SERVER', '1', oo_count)
super(EdpOozieEngine, self).validate_job_execution(cluster, job, data)
@staticmethod
def get_possible_job_config(job_type):
if edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE):
return {'job_config': ch_helper.get_possible_hive_config_from(
'plugins/cdh/v5_3_0/resources/hive-site.xml')}
if edp.compare_job_type(job_type,
edp.JOB_TYPE_MAPREDUCE,
edp.JOB_TYPE_MAPREDUCE_STREAMING):
return {'job_config': ch_helper.get_possible_mapreduce_config_from(
'plugins/cdh/v5_3_0/resources/mapred-site.xml')}
if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG):
return {'job_config': ch_helper.get_possible_pig_config_from(
'plugins/cdh/v5_3_0/resources/mapred-site.xml')}
return edp_engine.OozieJobEngine.get_possible_job_config(job_type)
| apache-2.0 | -1,117,714,960,142,602,400 | 40.084507 | 79 | 0.671238 | false |
discos/basie | src/receiver.py | 1 | 4844 | #coding=utf-8
#
#
# Copyright (C) 2013 INAF -IRA Italian institute of radioastronomy, [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
from builtins import range
import logging
logger = logging.getLogger(__name__)
from numpy import interp
from astropy import units as u
from persistent import Persistent
from .valid_angles import VAngle
from .errors import *
from .frame import Coord, HOR
class Receiver(Persistent):
"""
represents a receiver and its characteristics
"""
def __init__(self, name, fmin, fmax,
beamsizetable=[[0.0],[0.0]],
nfeed=1,
npols=2, #polarizations per feed
feed_offsets = [Coord(HOR, VAngle(0.0), VAngle(0.0))],
has_derotator = False):
"""
Constructor
@param name: the receiver name, used as an unique ID
@type name: string
@param fmin: receiver minimum usable sky frequency (MHz)
@type fmin: float
@param fmax: receiver maximum usable sky frequency (MHz)
@type fmax: float
@param beamsizetable: a table of (frequency, beamsize) couples
@type beamsizetable: L{utils.KVTable}
@param feed_offsets: a list of offsets [(olon, olat)] of each feed
in relation to central feed. 0 place should always be [(0.0, 0.0)]
@type feed_offsets: [(angles.Angle, angles.Angle) ... ]
@param has_derotator: True if the receiver can derotate
"""
self.name = name
self.fmin = fmin * u.MHz
self.fmax = fmax * u.MHz
self.nfeed = nfeed
self.npols = npols
self.feed_offsets = feed_offsets
self.beamsize_table = beamsizetable
self.has_derotator = has_derotator
self.feed_extent = 0
self.interleave = 0
if len(self.feed_offsets) < self.nfeed:
logger.debug("adding default offset (0.0, 0.0) to receiver %s" %
(self.name,))
for i in range(self.nfeed - len(self.feed_offsets)):
self.feed_offsets.append(Coord(HOR, VAngle(0.0), VAngle(0.0)))
@property
def nifs(self):
"""
How many IFs out of this receiver (nfeed * npol)
"""
return self.nfeed * self.npols
def set_feed_offsets(self, feed_number, offsets, frame=HOR):
"""
Set the feed offset for one feed
@param offsets: (offset_lon, offset_lat)
@raise ReceiverError: if feed_number is too high
"""
if feed_number > self.nfeed:
raise ReceiverError("Receiver %s has no feed %d" % (self.name,
feed_number))
self.feed_offsets[feed_number] = Coord(frame,
VAngle(offsets[0]),
VAngle(offsets[1]))
@property
def beamsize(self):
"""
Get receiver default beamsize (calculated at self.fmin)
"""
return self.get_beamsize()
def get_beamsize(self, freq=None):
"""
Get the beamsize for this receiver at a given frequency.
Read from beamsize_table the nearest frequency value.
If freq is None defauls to self.fmin
@param freq: frequency (MHz)
@type freq: Quantity
@return: beamsize at given frequency
"""
if not freq:
logger.warning("RECEIVER %s using default beamsize at min frequency" %
(self.name,))
freq = self.fmin
if((not self.fmin <= freq <= self.fmax) and (freq > 0 * u.MHz)):
logger.warning("RECEIVER %s beamsize at frequency %f out of range" %
(self.name, freq.value,))
logger.debug("Getting beamsize\nfreq: %s\nt0: %s\nt1: %s" % \
(freq.value, self.beamsize_table[0], self.beamsize_table[1]))
return interp(freq.value,
self.beamsize_table[0],
self.beamsize_table[1])
def is_multifeed(self):
"""
True if the receiver has multiple feeds, False otherwise.
"""
return self.nfeed > 1
| bsd-3-clause | -1,834,140,680,836,344,800 | 36.261538 | 93 | 0.588976 | false |
migueldiascosta/easybuild-framework | easybuild/toolchains/iimpi.py | 3 | 1516 | ##
# Copyright 2012-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for intel compiler toolchain (includes Intel compilers (icc, ifort), Intel MPI).
:author: Stijn De Weirdt (Ghent University)
:author: Kenneth Hoste (Ghent University)
"""
from easybuild.toolchains.iccifort import IccIfort
from easybuild.toolchains.mpi.intelmpi import IntelMPI
class Iimpi(IccIfort, IntelMPI):
"""
Compiler toolchain with Intel compilers (icc/ifort), Intel MPI.
"""
NAME = 'iimpi'
SUBTOOLCHAIN = IccIfort.NAME
| gpl-2.0 | 4,494,261,878,069,330,000 | 35.97561 | 98 | 0.746702 | false |
anastasia-tarasova/indy-sdk | wrappers/python/tests/ledger/test_build_cred_def_request.py | 2 | 1044 | from indy import ledger
import json
import pytest
@pytest.mark.asyncio
async def test_build_cred_def_request_works_for_correct_data_json(did_trustee):
data = {
"ver": "1.0",
"id": "cred_def_id",
"schemaId": "1",
"type": "CL",
"tag": "TAG_1",
"value": {
"primary": {
"n": "1",
"s": "2",
"r": {"name": "1", "master_secret": "3"},
"rctxt": "1",
"z": "1"
}
}
}
expected_response = {
"identifier": did_trustee,
"operation": {
"ref": 1,
"data": {
"primary": {"n": "1", "s": "2", "r": {"name": "1", "master_secret": "3"}, "rctxt": "1", "z": "1"}
},
"type": "102",
"signature_type": "CL",
"tag": "TAG_1"
}
}
response = json.loads(await ledger.build_cred_def_request(did_trustee, json.dumps(data)))
assert expected_response.items() <= response.items()
| apache-2.0 | 168,730,944,043,497,300 | 25.1 | 113 | 0.41954 | false |
wilvk/ansible | lib/ansible/plugins/action/vyos.py | 10 | 3792 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.common.utils import load_provider
from ansible.module_utils.network.vyos.vyos import vyos_provider_spec
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
socket_path = None
if self._play_context.connection == 'network_cli':
provider = self._task.args.get('provider', {})
if any(provider.values()):
display.warning('provider is unnecessary when using network_cli and will be ignored')
elif self._play_context.connection == 'local':
provider = load_provider(vyos_provider_spec, self._task.args)
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'vyos'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
else:
return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}
# make sure we are in the right cli context which should be
# enable mode and not config module
if socket_path is None:
socket_path = self._connection.socket_path
conn = Connection(socket_path)
out = conn.get_prompt()
while to_text(out, errors='surrogate_then_replace').strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
conn.send_command('abort')
out = conn.get_prompt()
result = super(ActionModule, self).run(tmp, task_vars)
return result
| gpl-3.0 | -7,368,075,518,938,175,000 | 42.586207 | 125 | 0.661129 | false |
MrNuggles/HeyBoet-Telegram-Bot | temboo/Library/Tumblr/Post/RetrieveQueuedPosts.py | 5 | 4598 | # -*- coding: utf-8 -*-
###############################################################################
#
# RetrieveQueuedPosts
# Retrieves a list of queued posts for a specified Tumblr blog.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class RetrieveQueuedPosts(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the RetrieveQueuedPosts Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(RetrieveQueuedPosts, self).__init__(temboo_session, '/Library/Tumblr/Post/RetrieveQueuedPosts')
def new_input_set(self):
return RetrieveQueuedPostsInputSet()
def _make_result_set(self, result, path):
return RetrieveQueuedPostsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return RetrieveQueuedPostsChoreographyExecution(session, exec_id, path)
class RetrieveQueuedPostsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the RetrieveQueuedPosts
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Tumblr (AKA the OAuth Consumer Key).)
"""
super(RetrieveQueuedPostsInputSet, self)._set_input('APIKey', value)
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret retrieved during the OAuth process.)
"""
super(RetrieveQueuedPostsInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process.)
"""
super(RetrieveQueuedPostsInputSet, self)._set_input('AccessToken', value)
def set_BaseHostname(self, value):
"""
Set the value of the BaseHostname input for this Choreo. ((required, string) The standard or custom blog hostname (i.e. temboo.tumblr.com).)
"""
super(RetrieveQueuedPostsInputSet, self)._set_input('BaseHostname', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Can be set to xml or json. Defaults to json.)
"""
super(RetrieveQueuedPostsInputSet, self)._set_input('ResponseFormat', value)
def set_SecretKey(self, value):
"""
Set the value of the SecretKey input for this Choreo. ((required, string) The Secret Key provided by Tumblr (AKA the OAuth Consumer Secret).)
"""
super(RetrieveQueuedPostsInputSet, self)._set_input('SecretKey', value)
class RetrieveQueuedPostsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the RetrieveQueuedPosts Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Tumblr. Default is JSON, can be set to XML by entering 'xml' in ResponseFormat.)
"""
return self._output.get('Response', None)
class RetrieveQueuedPostsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return RetrieveQueuedPostsResultSet(response, path)
| gpl-3.0 | 7,872,661,366,429,173,000 | 41.971963 | 180 | 0.684645 | false |
akubicharm/openshift-ansible | roles/lib_openshift/library/oc_storageclass.py | 1 | 57202 | #!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/storageclass -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_storageclass
short_description: Create, modify, and idempotently manage openshift storageclasses.
description:
- Manage openshift storageclass objects programmatically.
options:
state:
description:
- State represents whether to create, modify, delete, or list
required: False
default: present
choices: ["present", "absent", "list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- Name of the object that is being queried.
required: false
default: None
aliases: []
provisioner:
description:
- Any annotations to add to the storageclass
required: false
default: 'aws-ebs'
aliases: []
default_storage_class:
description:
- Whether or not this is the default storage class
required: false
default: False
aliases: []
parameters:
description:
- A dictionary with the parameters to configure the storageclass. This will be based on provisioner
required: false
default: None
aliases: []
api_version:
description:
- The api version.
required: false
default: v1
aliases: []
author:
- "Kenny Woodson <[email protected]>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: get storageclass
run_once: true
oc_storageclass:
name: gp2
state: list
register: registry_sc_out
- name: create the storageclass
oc_storageclass:
run_once: true
name: gp2
parameters:
type: gp2
encrypted: 'true'
kmsKeyId: '<full kms key arn>'
provisioner: aws-ebs
default_storage_class: False
register: sc_out
notify:
- restart openshift master services
'''
# -*- -*- -*- End included fragment: doc/storageclass -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"cmd": ' '.join(cmds)}
if output_type == 'json':
rval['results'] = {}
if output and stdout:
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
rval['err'] = verr.args
elif output_type == 'raw':
rval['results'] = stdout if output else ''
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/storageclass.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class StorageClassConfig(object):
''' Handle service options '''
# pylint: disable=too-many-arguments
def __init__(self,
name,
provisioner,
parameters=None,
annotations=None,
default_storage_class="false",
api_version='v1',
kubeconfig='/etc/origin/master/admin.kubeconfig'):
''' constructor for handling storageclass options '''
self.name = name
self.parameters = parameters
self.annotations = annotations
self.provisioner = provisioner
self.api_version = api_version
self.default_storage_class = str(default_storage_class).lower()
self.kubeconfig = kubeconfig
self.data = {}
self.create_dict()
def create_dict(self):
''' instantiates a storageclass dict '''
self.data['apiVersion'] = self.api_version
self.data['kind'] = 'StorageClass'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['annotations'] = {}
if self.annotations is not None:
self.data['metadata']['annotations'] = self.annotations
self.data['metadata']['annotations']['storageclass.beta.kubernetes.io/is-default-class'] = \
self.default_storage_class
self.data['provisioner'] = self.provisioner
self.data['parameters'] = {}
if self.parameters is not None:
self.data['parameters'].update(self.parameters)
# default to aws if no params were passed
else:
self.data['parameters']['type'] = 'gp2'
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class StorageClass(Yedit):
''' Class to model the oc storageclass object '''
annotations_path = "metadata.annotations"
provisioner_path = "provisioner"
parameters_path = "parameters"
kind = 'StorageClass'
def __init__(self, content):
'''StorageClass constructor'''
super(StorageClass, self).__init__(content=content)
def get_annotations(self):
''' get a list of ports '''
return self.get(StorageClass.annotations_path) or {}
def get_parameters(self):
''' get the service selector'''
return self.get(StorageClass.parameters_path) or {}
# -*- -*- -*- End included fragment: lib/storageclass.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_storageclass.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCStorageClass(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'storageclass'
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
verbose=False):
''' Constructor for OCStorageClass '''
super(OCStorageClass, self).__init__(None, kubeconfig=config.kubeconfig, verbose=verbose)
self.config = config
self.storage_class = None
def exists(self):
''' return whether a storageclass exists'''
if self.storage_class:
return True
return False
def get(self):
'''return storageclass '''
result = self._get(self.kind, self.config.name)
if result['returncode'] == 0:
self.storage_class = StorageClass(content=result['results'][0])
elif '\"%s\" not found' % self.config.name in result['stderr']:
result['returncode'] = 0
result['results'] = [{}]
return result
def delete(self):
'''delete the object'''
return self._delete(self.kind, self.config.name)
def create(self):
'''create the object'''
return self._create_from_content(self.config.name, self.config.data)
def update(self):
'''update the object'''
# parameters are currently unable to be updated. need to delete and recreate
self.delete()
# pause here and attempt to wait for delete.
# Better option would be to poll
import time
time.sleep(5)
return self.create()
def needs_update(self):
''' verify an update is needed '''
# check if params have updated
if self.storage_class.get_parameters() != self.config.parameters:
return True
for anno_key, anno_value in self.storage_class.get_annotations().items():
if 'is-default-class' in anno_key and anno_value != self.config.default_storage_class:
return True
return False
@staticmethod
# pylint: disable=too-many-return-statements,too-many-branches
# TODO: This function should be refactored into its individual parts.
def run_ansible(params, check_mode):
'''run the ansible idempotent code'''
rconfig = StorageClassConfig(params['name'],
provisioner="kubernetes.io/{}".format(params['provisioner']),
parameters=params['parameters'],
annotations=params['annotations'],
api_version="storage.k8s.io/{}".format(params['api_version']),
default_storage_class=params.get('default_storage_class', 'false'),
kubeconfig=params['kubeconfig'],
)
oc_sc = OCStorageClass(rconfig, verbose=params['debug'])
state = params['state']
api_rval = oc_sc.get()
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval['results'], 'state': 'list'}
########
# Delete
########
if state == 'absent':
if oc_sc.exists():
if check_mode:
return {'changed': True, 'msg': 'Would have performed a delete.'}
api_rval = oc_sc.delete()
return {'changed': True, 'results': api_rval, 'state': 'absent'}
return {'changed': False, 'state': 'absent'}
if state == 'present':
########
# Create
########
if not oc_sc.exists():
if check_mode:
return {'changed': True, 'msg': 'Would have performed a create.'}
# Create it here
api_rval = oc_sc.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_sc.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': 'present'}
########
# Update
########
if oc_sc.needs_update():
api_rval = oc_sc.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_sc.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': 'present'}
return {'changed': False, 'results': api_rval, 'state': 'present'}
return {'failed': True,
'changed': False,
'msg': 'Unknown state passed. %s' % state,
'state': 'unknown'}
# -*- -*- -*- End included fragment: class/oc_storageclass.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_storageclass.py -*- -*- -*-
def main():
'''
ansible oc module for storageclass
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str', choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
name=dict(default=None, type='str'),
annotations=dict(default=None, type='dict'),
parameters=dict(default=None, type='dict'),
provisioner=dict(required=True, type='str'),
api_version=dict(default='v1', type='str'),
default_storage_class=dict(default="false", type='str'),
),
supports_check_mode=True,
)
rval = OCStorageClass.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
return module.fail_json(**rval)
return module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_storageclass.py -*- -*- -*-
| apache-2.0 | 5,901,550,826,576,815,000 | 32.967933 | 118 | 0.534614 | false |
pombredanne/kitchen-1 | kitchen2/tests/test_collections.py | 2 | 5970 | # -*- coding: utf-8 -*-
#
import unittest
from nose import tools
from kitchen.pycompat24.sets import add_builtin_set
add_builtin_set()
from kitchen import collections
def test_strict_dict_get_set():
'''Test getting and setting items in StrictDict'''
d = collections.StrictDict()
d[u'a'] = 1
d['a'] = 2
tools.assert_not_equal(d[u'a'], d['a'])
tools.eq_(len(d), 2)
d[u'\xf1'] = 1
d['\xf1'] = 2
d[u'\xf1'.encode('utf-8')] = 3
tools.eq_(d[u'\xf1'], 1)
tools.eq_(d['\xf1'], 2)
tools.eq_(d[u'\xf1'.encode('utf-8')], 3)
tools.eq_(len(d), 5)
class TestStrictDict(unittest.TestCase):
def setUp(self):
self.d = collections.StrictDict()
self.d[u'a'] = 1
self.d['a'] = 2
self.d[u'\xf1'] = 1
self.d['\xf1'] = 2
self.d[u'\xf1'.encode('utf8')] = 3
self.keys = [u'a', 'a', u'\xf1', '\xf1', u'\xf1'.encode('utf-8')]
def tearDown(self):
del(self.d)
def _compare_lists(self, list1, list2, debug=False):
'''We have a mixture of bytes and unicode so we have to compare these
lists manually and inefficiently
'''
def _compare_lists_helper(compare_to, dupes, idx, length):
if i not in compare_to:
return False
for n in range(1, length + 1):
if i not in dupes[n][idx]:
dupes[n][idx].add(i)
return True
if len(list1) != len(list2):
return False
list1_dupes = dict([(i, (set(), set(), set())) for i in range(1, len(list1)+1)])
list2_dupes = dict([(i, (set(), set(), set())) for i in range(1, len(list1)+1)])
list1_u = [l for l in list1 if isinstance(l, unicode)]
list1_b = [l for l in list1 if isinstance(l, str)]
list1_o = [l for l in list1 if not (isinstance(l, (unicode, bytes)))]
list2_u = [l for l in list2 if isinstance(l, unicode)]
list2_b = [l for l in list2 if isinstance(l, str)]
list2_o = [l for l in list2 if not (isinstance(l, (unicode, bytes)))]
for i in list1:
if isinstance(i, unicode):
if not _compare_lists_helper(list2_u, list1_dupes, 0, len(list1)):
return False
elif isinstance(i, str):
if not _compare_lists_helper(list2_b, list1_dupes, 1, len(list1)):
return False
else:
if not _compare_lists_helper(list2_o, list1_dupes, 2, len(list1)):
return False
if list1_dupes[2][0] or list1_dupes[2][1] or list1_dupes[2][2]:
for i in list2:
if isinstance(i, unicode):
if not _compare_lists_helper(list1_u, list2_dupes, 0, len(list1)):
return False
elif isinstance(i, str):
if not _compare_lists_helper(list1_b, list2_dupes, 1, len(list1)):
return False
else:
if not _compare_lists_helper(list1_o, list2_dupes, 2, len(list1)):
return False
for i in range(2, len(list1)+1):
for n in list1_dupes[i]:
if n not in list2_dupes[i]:
return False
return True
def test__compare_list(self):
'''*sigh* this test support function is so complex we need to test it'''
tools.ok_(self._compare_lists(['a', 'b', 'c'], ['c', 'a', 'b']))
tools.ok_(not self._compare_lists(['b', 'c'], ['c', 'a', 'b']))
tools.ok_(not self._compare_lists([u'a', 'b'], ['a', 'b']))
tools.ok_(not self._compare_lists(['a', u'b'], [u'a', 'b']))
tools.ok_(self._compare_lists(['a', 'b', 1], ['a', 1, 'b']))
tools.ok_(self._compare_lists([u'a', u'b'], [u'a', u'b']))
tools.ok_(self._compare_lists([u'a', 'b'], [u'a', 'b']))
tools.ok_(not self._compare_lists([u'a', 'b'], [u'a', u'b']))
tools.ok_(self._compare_lists([u'a', 'b', 'b', 'c', u'a'], [u'a', u'a', 'b', 'c', 'b']))
tools.ok_(not self._compare_lists([u'a', 'b', 'b', 'c', 'a'], [u'a', u'a', 'b', 'c', 'b']))
tools.ok_(not self._compare_lists([u'a', 'b', 'b', 'c', u'a'], [u'a', 'b', 'b', 'c', 'b']))
def test_strict_dict_len(self):
'''StrictDict len'''
tools.eq_(len(self.d), 5)
def test_strict_dict_del(self):
'''StrictDict del'''
tools.eq_(len(self.d), 5)
del(self.d[u'\xf1'])
tools.assert_raises(KeyError, self.d.__getitem__, u'\xf1')
tools.eq_(len(self.d), 4)
def test_strict_dict_iter(self):
'''StrictDict iteration'''
keys = []
for k in self.d:
keys.append(k)
tools.ok_(self._compare_lists(keys, self.keys),
msg='keys != self.key: %s != %s' % (keys, self.keys))
keys = []
for k in self.d.iterkeys():
keys.append(k)
tools.ok_(self._compare_lists(keys, self.keys),
msg='keys != self.key: %s != %s' % (keys, self.keys))
keys = [k for k in self.d]
tools.ok_(self._compare_lists(keys, self.keys),
msg='keys != self.key: %s != %s' % (keys, self.keys))
keys = []
for k in self.d.keys():
keys.append(k)
tools.ok_(self._compare_lists(keys, self.keys),
msg='keys != self.key: %s != %s' % (keys, self.keys))
def test_strict_dict_contains(self):
'''StrictDict contains function'''
tools.ok_('b' not in self.d)
tools.ok_(u'b' not in self.d)
tools.ok_('\xf1' in self.d)
tools.ok_(u'\xf1' in self.d)
tools.ok_('a' in self.d)
tools.ok_(u'a' in self.d)
del(self.d[u'\xf1'])
tools.ok_(u'\xf1' not in self.d)
tools.ok_('\xf1' in self.d)
del(self.d['a'])
tools.ok_(u'a' in self.d)
tools.ok_('a' not in self.d)
| gpl-2.0 | 4,488,348,237,246,046,700 | 36.54717 | 99 | 0.497822 | false |
vlegoff/tsunami | src/primaires/joueur/editeurs/descedit/__init__.py | 1 | 5115 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 CORTIER Benoît
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant l'éditeur 'descedit'."""
from primaires.interpreteur.editeur.presentation import Presentation
from primaires.interpreteur.editeur.description import Description
from .edt_annuler import EdtAnnuler
from .edt_envoyer import EdtEnvoyer
class EdtDescedit(Presentation):
"""Classe définissant l'éditeur de description 'descedit'.
"""
nom = "descedit"
def __init__(self, personnage, joueur):
"""Constructeur de l'éditeur
joueur représente ici le même objet que personnage.
"""
if personnage:
instance_connexion = personnage.instance_connexion
else:
instance_connexion = None
Presentation.__init__(self, instance_connexion, joueur, None, False)
if personnage and joueur:
self.construire(personnage, joueur)
def __getnewargs__(self):
return (None, None)
def accueil(self):
"""Message d'accueil du contexte"""
msg = "| |tit|Edition de sa description|ff|".ljust(87) + "|\n"
msg += self.opts.separateur + "\n\n"
msg += " " \
"Une description qui doit se faire à la troisième personne du " \
"singulier (il a les yeux clairs ou elle est de stature semblant " \
"vigoureuse...). Pas de mention d'équipement, de vêtement ou " \
"ornements. Rien que de l'objectif, des informations que l'on peut " \
"obtenir au premier regard.\n"
# Parcours des choix possibles
for nom, objet in self.choix.items():
raccourci = self.get_raccourci_depuis_nom(nom)
# On constitue le nom final
# Si le nom d'origine est 'description' et le raccourci est 'd',
# le nom final doit être '[D]escription'
pos = nom.find(raccourci)
raccourci = ((pos == 0) and raccourci.capitalize()) or raccourci
nom_maj = nom.capitalize()
nom_m = nom_maj[:pos] + "[|cmd|" + raccourci + "|ff|]" + \
nom_maj[pos + len(raccourci):]
msg += "\n " + nom_m
enveloppe = self.choix[nom]
apercu = enveloppe.get_apercu()
if apercu:
msg += " : " + apercu
return msg
def construire(self, personnage, joueur):
"""Construction de l'éditeur"""
# Description
# Si le personnage (l'utilisateur qui édite) est immortel, on travaille
# directement sur l'attribut description, sinon on utilise
# description_a_valider.
if personnage is joueur and personnage.est_immortel():
description = self.ajouter_choix("description", "d", \
Description, joueur, "description")
description.apercu = "{objet.description.paragraphes_indentes}"
else:
description = self.ajouter_choix("description", "d", \
Description, joueur, "description_a_valider")
description.apercu = \
"{objet.description_a_valider.paragraphes_indentes}"
description.parent = self
description.aide_courte = "Modifier sa description."
# Envoyer
envoyer = self.ajouter_choix(
"envoyer la description en validation", "e",
EdtEnvoyer, joueur)
envoyer.parent = self
# Annuler
annuler = self.ajouter_choix(
"annuler et revenir à l'ancienne description", "ann",
EdtAnnuler, joueur)
annuler.parent = self
| bsd-3-clause | -1,847,908,519,395,218,700 | 39.8 | 82 | 0.647647 | false |
fvcproductions/dotfiles | bin/alfred/Alfred.alfredpreferences/workflows/user.workflow.DEDF5652-6FEF-4776-80D8-ACEDF577D06A/bs4/builder/_html5lib.py | 22 | 13672 | # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__all__ = [
'HTML5TreeBuilder',
]
import warnings
from bs4.builder import (
PERMISSIVE,
HTML,
HTML_5,
HTMLTreeBuilder,
)
from bs4.element import (
NamespacedAttribute,
whitespace_re,
)
import html5lib
from html5lib.constants import namespaces
from bs4.element import (
Comment,
Doctype,
NavigableString,
Tag,
)
try:
# Pre-0.99999999
from html5lib.treebuilders import _base as treebuilder_base
new_html5lib = False
except ImportError, e:
# 0.99999999 and up
from html5lib.treebuilders import base as treebuilder_base
new_html5lib = True
class HTML5TreeBuilder(HTMLTreeBuilder):
"""Use html5lib to build a tree."""
NAME = "html5lib"
features = [NAME, PERMISSIVE, HTML_5, HTML]
def prepare_markup(self, markup, user_specified_encoding,
document_declared_encoding=None, exclude_encodings=None):
# Store the user-specified encoding for use later on.
self.user_specified_encoding = user_specified_encoding
# document_declared_encoding and exclude_encodings aren't used
# ATM because the html5lib TreeBuilder doesn't use
# UnicodeDammit.
if exclude_encodings:
warnings.warn("You provided a value for exclude_encoding, but the html5lib tree builder doesn't support exclude_encoding.")
yield (markup, None, None, False)
# These methods are defined by Beautiful Soup.
def feed(self, markup):
if self.soup.parse_only is not None:
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
extra_kwargs = dict()
if not isinstance(markup, unicode):
if new_html5lib:
extra_kwargs['override_encoding'] = self.user_specified_encoding
else:
extra_kwargs['encoding'] = self.user_specified_encoding
doc = parser.parse(markup, **extra_kwargs)
# Set the character encoding detected by the tokenizer.
if isinstance(markup, unicode):
# We need to special-case this because html5lib sets
# charEncoding to UTF-8 if it gets Unicode input.
doc.original_encoding = None
else:
original_encoding = parser.tokenizer.stream.charEncoding[0]
if not isinstance(original_encoding, basestring):
# In 0.99999999 and up, the encoding is an html5lib
# Encoding object. We want to use a string for compatibility
# with other tree builders.
original_encoding = original_encoding.name
doc.original_encoding = original_encoding
def create_treebuilder(self, namespaceHTMLElements):
self.underlying_builder = TreeBuilderForHtml5lib(
self.soup, namespaceHTMLElements)
return self.underlying_builder
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><head></head><body>%s</body></html>' % fragment
class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder):
def __init__(self, soup, namespaceHTMLElements):
self.soup = soup
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
def documentClass(self):
self.soup.reset()
return Element(self.soup, self.soup, None)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
self.soup.object_was_parsed(doctype)
def elementClass(self, name, namespace):
tag = self.soup.new_tag(name, namespace)
return Element(tag, self.soup, namespace)
def commentClass(self, data):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
self.soup = BeautifulSoup("")
self.soup.name = "[document_fragment]"
return Element(self.soup, self.soup, None)
def appendChild(self, node):
# XXX This code is not covered by the BS4 tests.
self.soup.append(node.element)
def getDocument(self):
return self.soup
def getFragment(self):
return treebuilder_base.TreeBuilder.getFragment(self).element
class AttrList(object):
def __init__(self, element):
self.element = element
self.attrs = dict(self.element.attrs)
def __iter__(self):
return list(self.attrs.items()).__iter__()
def __setitem__(self, name, value):
# If this attribute is a multi-valued attribute for this element,
# turn its value into a list.
list_attr = HTML5TreeBuilder.cdata_list_attributes
if (name in list_attr['*']
or (self.element.name in list_attr
and name in list_attr[self.element.name])):
# A node that is being cloned may have already undergone
# this procedure.
if not isinstance(value, list):
value = whitespace_re.split(value)
self.element[name] = value
def items(self):
return list(self.attrs.items())
def keys(self):
return list(self.attrs.keys())
def __len__(self):
return len(self.attrs)
def __getitem__(self, name):
return self.attrs[name]
def __contains__(self, name):
return name in list(self.attrs.keys())
class Element(treebuilder_base.Node):
def __init__(self, element, soup, namespace):
treebuilder_base.Node.__init__(self, element.name)
self.element = element
self.soup = soup
self.namespace = namespace
def appendChild(self, node):
string_child = child = None
if isinstance(node, basestring):
# Some other piece of code decided to pass in a string
# instead of creating a TextElement object to contain the
# string.
string_child = child = node
elif isinstance(node, Tag):
# Some other piece of code decided to pass in a Tag
# instead of creating an Element object to contain the
# Tag.
child = node
elif node.element.__class__ == NavigableString:
string_child = child = node.element
else:
child = node.element
if not isinstance(child, basestring) and child.parent is not None:
node.element.extract()
if (string_child and self.element.contents
and self.element.contents[-1].__class__ == NavigableString):
# We are appending a string onto another string.
# TODO This has O(n^2) performance, for input like
# "a</a>a</a>a</a>..."
old_element = self.element.contents[-1]
new_element = self.soup.new_string(old_element + string_child)
old_element.replace_with(new_element)
self.soup._most_recent_element = new_element
else:
if isinstance(node, basestring):
# Create a brand new NavigableString from this string.
child = self.soup.new_string(node)
# Tell Beautiful Soup to act as if it parsed this element
# immediately after the parent's last descendant. (Or
# immediately after the parent, if it has no children.)
if self.element.contents:
most_recent_element = self.element._last_descendant(False)
elif self.element.next_element is not None:
# Something from further ahead in the parse tree is
# being inserted into this earlier element. This is
# very annoying because it means an expensive search
# for the last element in the tree.
most_recent_element = self.soup._last_descendant()
else:
most_recent_element = self.element
self.soup.object_was_parsed(
child, parent=self.element,
most_recent_element=most_recent_element)
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes is not None and len(attributes) > 0:
converted_attributes = []
for name, value in list(attributes.items()):
if isinstance(name, tuple):
new_name = NamespacedAttribute(*name)
del attributes[name]
attributes[new_name] = value
self.soup.builder._replace_cdata_list_attribute_values(
self.name, attributes)
for name, value in attributes.items():
self.element[name] = value
# The attributes may contain variables that need substitution.
# Call set_up_substitutions manually.
#
# The Tag constructor called this method when the Tag was created,
# but we just set/changed the attributes, so call it again.
self.soup.builder.set_up_substitutions(self.element)
attributes = property(getAttributes, setAttributes)
def insertText(self, data, insertBefore=None):
if insertBefore:
text = TextNode(self.soup.new_string(data), self.soup)
self.insertBefore(data, insertBefore)
else:
self.appendChild(data)
def insertBefore(self, node, refNode):
index = self.element.index(refNode.element)
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[index-1].__class__ == NavigableString):
# (See comments in appendChild)
old_node = self.element.contents[index-1]
new_str = self.soup.new_string(old_node + node.element)
old_node.replace_with(new_str)
else:
self.element.insert(index, node.element)
node.parent = self
def removeChild(self, node):
node.element.extract()
def reparentChildren(self, new_parent):
"""Move all of this tag's children into another tag."""
# print "MOVE", self.element.contents
# print "FROM", self.element
# print "TO", new_parent.element
element = self.element
new_parent_element = new_parent.element
# Determine what this tag's next_element will be once all the children
# are removed.
final_next_element = element.next_sibling
new_parents_last_descendant = new_parent_element._last_descendant(False, False)
if len(new_parent_element.contents) > 0:
# The new parent already contains children. We will be
# appending this tag's children to the end.
new_parents_last_child = new_parent_element.contents[-1]
new_parents_last_descendant_next_element = new_parents_last_descendant.next_element
else:
# The new parent contains no children.
new_parents_last_child = None
new_parents_last_descendant_next_element = new_parent_element.next_element
to_append = element.contents
append_after = new_parent_element.contents
if len(to_append) > 0:
# Set the first child's previous_element and previous_sibling
# to elements within the new parent
first_child = to_append[0]
if new_parents_last_descendant:
first_child.previous_element = new_parents_last_descendant
else:
first_child.previous_element = new_parent_element
first_child.previous_sibling = new_parents_last_child
if new_parents_last_descendant:
new_parents_last_descendant.next_element = first_child
else:
new_parent_element.next_element = first_child
if new_parents_last_child:
new_parents_last_child.next_sibling = first_child
# Fix the last child's next_element and next_sibling
last_child = to_append[-1]
last_child.next_element = new_parents_last_descendant_next_element
if new_parents_last_descendant_next_element:
new_parents_last_descendant_next_element.previous_element = last_child
last_child.next_sibling = None
for child in to_append:
child.parent = new_parent_element
new_parent_element.contents.append(child)
# Now that this element has no children, change its .next_element.
element.contents = []
element.next_element = final_next_element
# print "DONE WITH MOVE"
# print "FROM", self.element
# print "TO", new_parent_element
def cloneNode(self):
tag = self.soup.new_tag(self.element.name, self.namespace)
node = Element(tag, self.soup, self.namespace)
for key,value in self.attributes:
node.attributes[key] = value
return node
def hasContent(self):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TextNode(Element):
def __init__(self, element, soup):
treebuilder_base.Node.__init__(self, None)
self.element = element
self.soup = soup
def cloneNode(self):
raise NotImplementedError
| mit | 6,823,206,357,496,521,000 | 37.404494 | 159 | 0.618856 | false |
yufish/youtube-dl | youtube_dl/downloader/http.py | 21 | 9547 | from __future__ import unicode_literals
import errno
import os
import socket
import time
from .common import FileDownloader
from ..compat import (
compat_urllib_request,
compat_urllib_error,
)
from ..utils import (
ContentTooShortError,
encodeFilename,
sanitize_open,
)
class HttpFD(FileDownloader):
def real_download(self, filename, info_dict):
url = info_dict['url']
tmpfilename = self.temp_name(filename)
stream = None
# Do not include the Accept-Encoding header
headers = {'Youtubedl-no-compression': 'True'}
add_headers = info_dict.get('http_headers')
if add_headers:
headers.update(add_headers)
basic_request = compat_urllib_request.Request(url, None, headers)
request = compat_urllib_request.Request(url, None, headers)
is_test = self.params.get('test', False)
if is_test:
request.add_header('Range', 'bytes=0-%s' % str(self._TEST_FILE_SIZE - 1))
# Establish possible resume length
if os.path.isfile(encodeFilename(tmpfilename)):
resume_len = os.path.getsize(encodeFilename(tmpfilename))
else:
resume_len = 0
open_mode = 'wb'
if resume_len != 0:
if self.params.get('continuedl', True):
self.report_resuming_byte(resume_len)
request.add_header('Range', 'bytes=%d-' % resume_len)
open_mode = 'ab'
else:
resume_len = 0
count = 0
retries = self.params.get('retries', 0)
while count <= retries:
# Establish connection
try:
data = self.ydl.urlopen(request)
break
except (compat_urllib_error.HTTPError, ) as err:
if (err.code < 500 or err.code >= 600) and err.code != 416:
# Unexpected HTTP error
raise
elif err.code == 416:
# Unable to resume (requested range not satisfiable)
try:
# Open the connection again without the range header
data = self.ydl.urlopen(basic_request)
content_length = data.info()['Content-Length']
except (compat_urllib_error.HTTPError, ) as err:
if err.code < 500 or err.code >= 600:
raise
else:
# Examine the reported length
if (content_length is not None and
(resume_len - 100 < int(content_length) < resume_len + 100)):
# The file had already been fully downloaded.
# Explanation to the above condition: in issue #175 it was revealed that
# YouTube sometimes adds or removes a few bytes from the end of the file,
# changing the file size slightly and causing problems for some users. So
# I decided to implement a suggested change and consider the file
# completely downloaded if the file size differs less than 100 bytes from
# the one in the hard drive.
self.report_file_already_downloaded(filename)
self.try_rename(tmpfilename, filename)
self._hook_progress({
'filename': filename,
'status': 'finished',
'downloaded_bytes': resume_len,
'total_bytes': resume_len,
})
return True
else:
# The length does not match, we start the download over
self.report_unable_to_resume()
resume_len = 0
open_mode = 'wb'
break
except socket.error as e:
if e.errno != errno.ECONNRESET:
# Connection reset is no problem, just retry
raise
# Retry
count += 1
if count <= retries:
self.report_retry(count, retries)
if count > retries:
self.report_error('giving up after %s retries' % retries)
return False
data_len = data.info().get('Content-length', None)
# Range HTTP header may be ignored/unsupported by a webserver
# (e.g. extractor/scivee.py, extractor/bambuser.py).
# However, for a test we still would like to download just a piece of a file.
# To achieve this we limit data_len to _TEST_FILE_SIZE and manually control
# block size when downloading a file.
if is_test and (data_len is None or int(data_len) > self._TEST_FILE_SIZE):
data_len = self._TEST_FILE_SIZE
if data_len is not None:
data_len = int(data_len) + resume_len
min_data_len = self.params.get("min_filesize", None)
max_data_len = self.params.get("max_filesize", None)
if min_data_len is not None and data_len < min_data_len:
self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
return False
if max_data_len is not None and data_len > max_data_len:
self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
return False
byte_counter = 0 + resume_len
block_size = self.params.get('buffersize', 1024)
start = time.time()
# measure time over whole while-loop, so slow_down() and best_block_size() work together properly
now = None # needed for slow_down() in the first loop run
before = start # start measuring
while True:
# Download and write
data_block = data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
byte_counter += len(data_block)
# exit loop when download is finished
if len(data_block) == 0:
break
# Open destination file just in time
if stream is None:
try:
(stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
assert stream is not None
filename = self.undo_temp_name(tmpfilename)
self.report_destination(filename)
except (OSError, IOError) as err:
self.report_error('unable to open for writing: %s' % str(err))
return False
if self.params.get('xattr_set_filesize', False) and data_len is not None:
try:
import xattr
xattr.setxattr(tmpfilename, 'user.ytdl.filesize', str(data_len))
except(OSError, IOError, ImportError) as err:
self.report_error('unable to set filesize xattr: %s' % str(err))
try:
stream.write(data_block)
except (IOError, OSError) as err:
self.to_stderr('\n')
self.report_error('unable to write data: %s' % str(err))
return False
# Apply rate limit
self.slow_down(start, now, byte_counter - resume_len)
# end measuring of one loop run
now = time.time()
after = now
# Adjust block size
if not self.params.get('noresizebuffer', False):
block_size = self.best_block_size(after - before, len(data_block))
before = after
# Progress message
speed = self.calc_speed(start, now, byte_counter - resume_len)
if data_len is None:
eta = None
else:
eta = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
self._hook_progress({
'status': 'downloading',
'downloaded_bytes': byte_counter,
'total_bytes': data_len,
'tmpfilename': tmpfilename,
'filename': filename,
'eta': eta,
'speed': speed,
'elapsed': now - start,
})
if is_test and byte_counter == data_len:
break
if stream is None:
self.to_stderr('\n')
self.report_error('Did not get any data blocks')
return False
if tmpfilename != '-':
stream.close()
if data_len is not None and byte_counter != data_len:
raise ContentTooShortError(byte_counter, int(data_len))
self.try_rename(tmpfilename, filename)
# Update file modification time
if self.params.get('updatetime', True):
info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None))
self._hook_progress({
'downloaded_bytes': byte_counter,
'total_bytes': byte_counter,
'filename': filename,
'status': 'finished',
'elapsed': time.time() - start,
})
return True
| unlicense | 1,127,488,728,651,014,700 | 39.799145 | 140 | 0.517754 | false |
parkbyte/electrumparkbyte | lib/tests/test_mnemonic.py | 19 | 1293 | import unittest
from lib import mnemonic
from lib import old_mnemonic
class Test_NewMnemonic(unittest.TestCase):
def test_prepare_seed(self):
seed = 'foo BAR Baz'
self.assertEquals(mnemonic.prepare_seed(seed), 'foo bar baz')
def test_to_seed(self):
seed = mnemonic.Mnemonic.mnemonic_to_seed(mnemonic='foobar', passphrase='none')
self.assertEquals(seed.encode('hex'),
'741b72fd15effece6bfe5a26a52184f66811bd2be363190e07a42cca442b1a5b'
'b22b3ad0eb338197287e6d314866c7fba863ac65d3f156087a5052ebc7157fce')
def test_random_seeds(self):
iters = 10
m = mnemonic.Mnemonic(lang='en')
for _ in range(iters):
seed = m.make_seed()
self.assertTrue(m.check_seed(seed, custom_entropy=1))
i = m.mnemonic_decode(seed)
self.assertEquals(m.mnemonic_encode(i), seed)
class Test_OldMnemonic(unittest.TestCase):
def test(self):
seed = '8edad31a95e7d59f8837667510d75a4d'
result = old_mnemonic.mn_encode(seed)
words = 'hardly point goal hallway patience key stone difference ready caught listen fact'
self.assertEquals(result, words.split())
self.assertEquals(old_mnemonic.mn_decode(result), seed)
| mit | 8,330,715,429,898,601,000 | 37.029412 | 98 | 0.661253 | false |
amueller/scipy-2017-sklearn | notebooks/figures/ML_flow_chart.py | 26 | 4745 | """
Tutorial Diagrams
-----------------
This script plots the flow-charts used in the scikit-learn tutorials.
"""
import matplotlib.pyplot as plt
from matplotlib.patches import Circle, Rectangle, Polygon, FancyArrow
def create_base(box_bg='#CCCCCC',
arrow1='#88CCFF',
arrow2='#88FF88',
supervised=True):
plt.figure(figsize=(9, 6), facecolor='w')
ax = plt.axes((0, 0, 1, 1), xticks=[], yticks=[], frameon=False)
ax.set_xlim(0, 9)
ax.set_ylim(0, 6)
patches = [Rectangle((0.3, 3.6), 1.5, 1.8, zorder=1, fc=box_bg),
Rectangle((0.5, 3.8), 1.5, 1.8, zorder=2, fc=box_bg),
Rectangle((0.7, 4.0), 1.5, 1.8, zorder=3, fc=box_bg),
Rectangle((2.9, 3.6), 0.2, 1.8, fc=box_bg),
Rectangle((3.1, 3.8), 0.2, 1.8, fc=box_bg),
Rectangle((3.3, 4.0), 0.2, 1.8, fc=box_bg),
Rectangle((0.3, 0.2), 1.5, 1.8, fc=box_bg),
Rectangle((2.9, 0.2), 0.2, 1.8, fc=box_bg),
Circle((5.5, 3.5), 1.0, fc=box_bg),
Polygon([[5.5, 1.7],
[6.1, 1.1],
[5.5, 0.5],
[4.9, 1.1]], fc=box_bg),
FancyArrow(2.3, 4.6, 0.35, 0, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(3.75, 4.2, 0.5, -0.2, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(5.5, 2.4, 0, -0.4, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(2.0, 1.1, 0.5, 0, fc=arrow2,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(3.3, 1.1, 1.3, 0, fc=arrow2,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(6.2, 1.1, 0.8, 0, fc=arrow2,
width=0.25, head_width=0.5, head_length=0.2)]
if supervised:
patches += [Rectangle((0.3, 2.4), 1.5, 0.5, zorder=1, fc=box_bg),
Rectangle((0.5, 2.6), 1.5, 0.5, zorder=2, fc=box_bg),
Rectangle((0.7, 2.8), 1.5, 0.5, zorder=3, fc=box_bg),
FancyArrow(2.3, 2.9, 2.0, 0, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
Rectangle((7.3, 0.85), 1.5, 0.5, fc=box_bg)]
else:
patches += [Rectangle((7.3, 0.2), 1.5, 1.8, fc=box_bg)]
for p in patches:
ax.add_patch(p)
plt.text(1.45, 4.9, "Training\nText,\nDocuments,\nImages,\netc.",
ha='center', va='center', fontsize=14)
plt.text(3.6, 4.9, "Feature\nVectors",
ha='left', va='center', fontsize=14)
plt.text(5.5, 3.5, "Machine\nLearning\nAlgorithm",
ha='center', va='center', fontsize=14)
plt.text(1.05, 1.1, "New Text,\nDocument,\nImage,\netc.",
ha='center', va='center', fontsize=14)
plt.text(3.3, 1.7, "Feature\nVector",
ha='left', va='center', fontsize=14)
plt.text(5.5, 1.1, "Predictive\nModel",
ha='center', va='center', fontsize=12)
if supervised:
plt.text(1.45, 3.05, "Labels",
ha='center', va='center', fontsize=14)
plt.text(8.05, 1.1, "Expected\nLabel",
ha='center', va='center', fontsize=14)
plt.text(8.8, 5.8, "Supervised Learning Model",
ha='right', va='top', fontsize=18)
else:
plt.text(8.05, 1.1,
"Likelihood\nor Cluster ID\nor Better\nRepresentation",
ha='center', va='center', fontsize=12)
plt.text(8.8, 5.8, "Unsupervised Learning Model",
ha='right', va='top', fontsize=18)
def plot_supervised_chart(annotate=False):
create_base(supervised=True)
if annotate:
fontdict = dict(color='r', weight='bold', size=14)
plt.text(1.9, 4.55, 'X = vec.fit_transform(input)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
plt.text(3.7, 3.2, 'clf.fit(X, y)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
plt.text(1.7, 1.5, 'X_new = vec.transform(input)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
plt.text(6.1, 1.5, 'y_new = clf.predict(X_new)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
def plot_unsupervised_chart():
create_base(supervised=False)
if __name__ == '__main__':
plot_supervised_chart(False)
plot_supervised_chart(True)
plot_unsupervised_chart()
plt.show()
| cc0-1.0 | 6,111,477,441,315,748,000 | 34.94697 | 76 | 0.493151 | false |
synergeticsedx/deployment-wipro | openedx/core/djangoapps/programs/tests/test_utils.py | 7 | 36258 | """Tests covering Programs utilities."""
import copy
import datetime
import json
from unittest import skipUnless
import uuid
import ddt
from django.conf import settings
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.text import slugify
import httpretty
import mock
from nose.plugins.attrib import attr
from opaque_keys.edx.keys import CourseKey
from edx_oauth2_provider.tests.factories import ClientFactory
from provider.constants import CONFIDENTIAL
from pytz import utc
from lms.djangoapps.certificates.api import MODES
from lms.djangoapps.commerce.tests.test_utils import update_commerce_config
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.djangoapps.credentials.tests import factories as credentials_factories
from openedx.core.djangoapps.credentials.tests.mixins import CredentialsApiConfigMixin, CredentialsDataMixin
from openedx.core.djangoapps.programs import utils
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.djangoapps.programs.tests import factories
from openedx.core.djangoapps.programs.tests.mixins import ProgramsApiConfigMixin, ProgramsDataMixin
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from util.date_utils import strftime_localized
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
UTILS_MODULE = 'openedx.core.djangoapps.programs.utils'
CERTIFICATES_API_MODULE = 'lms.djangoapps.certificates.api'
ECOMMERCE_URL_ROOT = 'https://example-ecommerce.com'
MARKETING_URL = 'https://www.example.com/marketing/path'
@ddt.ddt
@attr(shard=2)
@httpretty.activate
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class TestProgramRetrieval(ProgramsApiConfigMixin, ProgramsDataMixin, CredentialsDataMixin,
CredentialsApiConfigMixin, CacheIsolationTestCase):
"""Tests covering the retrieval of programs from the Programs service."""
ENABLED_CACHES = ['default']
def setUp(self):
super(TestProgramRetrieval, self).setUp()
ClientFactory(name=ProgramsApiConfig.OAUTH2_CLIENT_NAME, client_type=CONFIDENTIAL)
self.user = UserFactory()
cache.clear()
def _expected_progam_credentials_data(self):
"""
Dry method for getting expected program credentials response data.
"""
return [
credentials_factories.UserCredential(
id=1,
username='test',
credential=credentials_factories.ProgramCredential(
program_id=1
)
),
credentials_factories.UserCredential(
id=2,
username='test',
credential=credentials_factories.ProgramCredential(
program_id=2
)
)
]
def test_get_programs(self):
"""Verify programs data can be retrieved."""
self.create_programs_config()
self.mock_programs_api()
actual = utils.get_programs(self.user)
self.assertEqual(
actual,
self.PROGRAMS_API_RESPONSE['results']
)
# Verify the API was actually hit (not the cache).
self.assertEqual(len(httpretty.httpretty.latest_requests), 1)
def test_get_programs_caching(self):
"""Verify that when enabled, the cache is used for non-staff users."""
self.create_programs_config(cache_ttl=1)
self.mock_programs_api()
# Warm up the cache.
utils.get_programs(self.user)
# Hit the cache.
utils.get_programs(self.user)
# Verify only one request was made.
self.assertEqual(len(httpretty.httpretty.latest_requests), 1)
staff_user = UserFactory(is_staff=True)
# Hit the Programs API twice.
for _ in range(2):
utils.get_programs(staff_user)
# Verify that three requests have been made (one for student, two for staff).
self.assertEqual(len(httpretty.httpretty.latest_requests), 3)
def test_get_programs_programs_disabled(self):
"""Verify behavior when programs is disabled."""
self.create_programs_config(enabled=False)
actual = utils.get_programs(self.user)
self.assertEqual(actual, [])
@mock.patch('edx_rest_api_client.client.EdxRestApiClient.__init__')
def test_get_programs_client_initialization_failure(self, mock_init):
"""Verify behavior when API client fails to initialize."""
self.create_programs_config()
mock_init.side_effect = Exception
actual = utils.get_programs(self.user)
self.assertEqual(actual, [])
self.assertTrue(mock_init.called)
def test_get_programs_data_retrieval_failure(self):
"""Verify behavior when data can't be retrieved from Programs."""
self.create_programs_config()
self.mock_programs_api(status_code=500)
actual = utils.get_programs(self.user)
self.assertEqual(actual, [])
def test_get_program_for_certificates(self):
"""Verify programs data can be retrieved and parsed correctly for certificates."""
self.create_programs_config()
self.mock_programs_api()
program_credentials_data = self._expected_progam_credentials_data()
actual = utils.get_programs_for_credentials(self.user, program_credentials_data)
expected = self.PROGRAMS_API_RESPONSE['results'][:2]
expected[0]['credential_url'] = program_credentials_data[0]['certificate_url']
expected[1]['credential_url'] = program_credentials_data[1]['certificate_url']
self.assertEqual(len(actual), 2)
self.assertEqual(actual, expected)
def test_get_program_for_certificates_no_data(self):
"""Verify behavior when no programs data is found for the user."""
self.create_programs_config()
self.create_credentials_config()
self.mock_programs_api(data={'results': []})
program_credentials_data = self._expected_progam_credentials_data()
actual = utils.get_programs_for_credentials(self.user, program_credentials_data)
self.assertEqual(actual, [])
def test_get_program_for_certificates_id_not_exist(self):
"""Verify behavior when no program with the given program_id in
credentials exists.
"""
self.create_programs_config()
self.create_credentials_config()
self.mock_programs_api()
credential_data = [
{
"id": 1,
"username": "test",
"credential": {
"credential_id": 1,
"program_id": 100
},
"status": "awarded",
"credential_url": "www.example.com"
}
]
actual = utils.get_programs_for_credentials(self.user, credential_data)
self.assertEqual(actual, [])
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class GetProgramsByRunTests(TestCase):
"""Tests verifying that programs are inverted correctly."""
maxDiff = None
@classmethod
def setUpClass(cls):
super(GetProgramsByRunTests, cls).setUpClass()
cls.user = UserFactory()
course_keys = [
CourseKey.from_string('some/course/run'),
CourseKey.from_string('some/other/run'),
]
cls.enrollments = [CourseEnrollmentFactory(user=cls.user, course_id=c) for c in course_keys]
cls.course_ids = [unicode(c) for c in course_keys]
organization = factories.Organization()
joint_programs = sorted([
factories.Program(
organizations=[organization],
course_codes=[
factories.CourseCode(run_modes=[
factories.RunMode(course_key=cls.course_ids[0]),
]),
]
) for __ in range(2)
], key=lambda p: p['name'])
cls.programs = joint_programs + [
factories.Program(
organizations=[organization],
course_codes=[
factories.CourseCode(run_modes=[
factories.RunMode(course_key=cls.course_ids[1]),
]),
]
),
factories.Program(
organizations=[organization],
course_codes=[
factories.CourseCode(run_modes=[
factories.RunMode(course_key='yet/another/run'),
]),
]
),
]
def test_get_programs_by_run(self):
"""Verify that programs are organized by run ID."""
programs_by_run, course_ids = utils.get_programs_by_run(self.programs, self.enrollments)
self.assertEqual(programs_by_run[self.course_ids[0]], self.programs[:2])
self.assertEqual(programs_by_run[self.course_ids[1]], self.programs[2:3])
self.assertEqual(course_ids, self.course_ids)
def test_no_programs(self):
"""Verify that the utility can cope with missing programs data."""
programs_by_run, course_ids = utils.get_programs_by_run([], self.enrollments)
self.assertEqual(programs_by_run, {})
self.assertEqual(course_ids, self.course_ids)
def test_no_enrollments(self):
"""Verify that the utility can cope with missing enrollment data."""
programs_by_run, course_ids = utils.get_programs_by_run(self.programs, [])
self.assertEqual(programs_by_run, {})
self.assertEqual(course_ids, [])
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class GetCompletedCoursesTestCase(TestCase):
"""
Test the get_completed_courses function
"""
def make_cert_result(self, **kwargs):
"""
Helper to create dummy results from the certificates API
"""
result = {
'username': 'dummy-username',
'course_key': 'dummy-course',
'type': 'dummy-type',
'status': 'dummy-status',
'download_url': 'http://www.example.com/cert.pdf',
'grade': '0.98',
'created': '2015-07-31T00:00:00Z',
'modified': '2015-07-31T00:00:00Z',
}
result.update(**kwargs)
return result
@mock.patch(UTILS_MODULE + '.certificate_api.get_certificates_for_user')
def test_get_completed_courses(self, mock_get_certs_for_user):
"""
Ensure the function correctly calls to and handles results from the
certificates API
"""
student = UserFactory(username='test-username')
mock_get_certs_for_user.return_value = [
self.make_cert_result(status='downloadable', type='verified', course_key='downloadable-course'),
self.make_cert_result(status='generating', type='professional', course_key='generating-course'),
self.make_cert_result(status='unknown', type='honor', course_key='unknown-course'),
]
result = utils.get_completed_courses(student)
self.assertEqual(mock_get_certs_for_user.call_args[0], (student.username, ))
self.assertEqual(result, [
{'course_id': 'downloadable-course', 'mode': 'verified'},
{'course_id': 'generating-course', 'mode': 'professional'},
])
@attr(shard=2)
@httpretty.activate
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class TestProgramProgressMeter(ProgramsApiConfigMixin, TestCase):
"""Tests of the program progress utility class."""
def setUp(self):
super(TestProgramProgressMeter, self).setUp()
self.user = UserFactory()
self.create_programs_config()
ClientFactory(name=ProgramsApiConfig.OAUTH2_CLIENT_NAME, client_type=CONFIDENTIAL)
def _mock_programs_api(self, data):
"""Helper for mocking out Programs API URLs."""
self.assertTrue(httpretty.is_enabled(), msg='httpretty must be enabled to mock Programs API calls.')
url = ProgramsApiConfig.current().internal_api_url.strip('/') + '/programs/'
body = json.dumps({'results': data})
httpretty.register_uri(httpretty.GET, url, body=body, content_type='application/json')
def _create_enrollments(self, *course_ids):
"""Variadic helper used to create course enrollments."""
for course_id in course_ids:
CourseEnrollmentFactory(user=self.user, course_id=course_id)
def _assert_progress(self, meter, *progresses):
"""Variadic helper used to verify progress calculations."""
self.assertEqual(meter.progress, list(progresses))
def _extract_names(self, program, *course_codes):
"""Construct a list containing the display names of the indicated course codes."""
return [program['course_codes'][cc]['display_name'] for cc in course_codes]
def _attach_detail_url(self, programs):
"""Add expected detail URLs to a list of program dicts."""
for program in programs:
base = reverse('program_details_view', kwargs={'program_id': program['id']}).rstrip('/')
slug = slugify(program['name'])
program['detail_url'] = '{base}/{slug}'.format(base=base, slug=slug)
def test_no_enrollments(self):
"""Verify behavior when programs exist, but no relevant enrollments do."""
data = [
factories.Program(
organizations=[factories.Organization()],
course_codes=[
factories.CourseCode(run_modes=[factories.RunMode()]),
]
),
]
self._mock_programs_api(data)
meter = utils.ProgramProgressMeter(self.user)
self.assertEqual(meter.engaged_programs(), [])
self._assert_progress(meter)
self.assertEqual(meter.completed_programs, [])
def test_no_programs(self):
"""Verify behavior when enrollments exist, but no matching programs do."""
self._mock_programs_api([])
self._create_enrollments('org/course/run')
meter = utils.ProgramProgressMeter(self.user)
self.assertEqual(meter.engaged_programs(), [])
self._assert_progress(meter)
self.assertEqual(meter.completed_programs, [])
def test_single_program_engagement(self):
"""
Verify that correct program is returned when the user has a single enrollment
appearing in one program.
"""
course_id = 'org/course/run'
data = [
factories.Program(
organizations=[factories.Organization()],
course_codes=[
factories.CourseCode(run_modes=[
factories.RunMode(course_key=course_id),
]),
]
),
factories.Program(
organizations=[factories.Organization()],
course_codes=[
factories.CourseCode(run_modes=[factories.RunMode()]),
]
),
]
self._mock_programs_api(data)
self._create_enrollments(course_id)
meter = utils.ProgramProgressMeter(self.user)
self._attach_detail_url(data)
program = data[0]
self.assertEqual(meter.engaged_programs(), [program])
self._assert_progress(
meter,
factories.Progress(
id=program['id'],
in_progress=self._extract_names(program, 0)
)
)
self.assertEqual(meter.completed_programs, [])
def test_mutiple_program_engagement(self):
"""
Verify that correct programs are returned in the correct order when the user
has multiple enrollments.
"""
first_course_id, second_course_id = 'org/first-course/run', 'org/second-course/run'
data = [
factories.Program(
organizations=[factories.Organization()],
course_codes=[
factories.CourseCode(run_modes=[
factories.RunMode(course_key=first_course_id),
]),
]
),
factories.Program(
organizations=[factories.Organization()],
course_codes=[
factories.CourseCode(run_modes=[
factories.RunMode(course_key=second_course_id),
]),
]
),
factories.Program(
organizations=[factories.Organization()],
course_codes=[
factories.CourseCode(run_modes=[factories.RunMode()]),
]
),
]
self._mock_programs_api(data)
self._create_enrollments(second_course_id, first_course_id)
meter = utils.ProgramProgressMeter(self.user)
self._attach_detail_url(data)
programs = data[:2]
self.assertEqual(meter.engaged_programs(), programs)
self._assert_progress(
meter,
factories.Progress(id=programs[0]['id'], in_progress=self._extract_names(programs[0], 0)),
factories.Progress(id=programs[1]['id'], in_progress=self._extract_names(programs[1], 0))
)
self.assertEqual(meter.completed_programs, [])
def test_shared_enrollment_engagement(self):
"""
Verify that correct programs are returned when the user has a single enrollment
appearing in multiple programs.
"""
shared_course_id, solo_course_id = 'org/shared-course/run', 'org/solo-course/run'
joint_programs = sorted([
factories.Program(
organizations=[factories.Organization()],
course_codes=[
factories.CourseCode(run_modes=[
factories.RunMode(course_key=shared_course_id),
]),
]
) for __ in range(2)
], key=lambda p: p['name'])
data = joint_programs + [
factories.Program(
organizations=[factories.Organization()],
course_codes=[
factories.CourseCode(run_modes=[
factories.RunMode(course_key=solo_course_id),
]),
]
),
factories.Program(
organizations=[factories.Organization()],
course_codes=[
factories.CourseCode(run_modes=[factories.RunMode()]),
]
),
]
self._mock_programs_api(data)
# Enrollment for the shared course ID created last (most recently).
self._create_enrollments(solo_course_id, shared_course_id)
meter = utils.ProgramProgressMeter(self.user)
self._attach_detail_url(data)
programs = data[:3]
self.assertEqual(meter.engaged_programs(), programs)
self._assert_progress(
meter,
factories.Progress(id=programs[0]['id'], in_progress=self._extract_names(programs[0], 0)),
factories.Progress(id=programs[1]['id'], in_progress=self._extract_names(programs[1], 0)),
factories.Progress(id=programs[2]['id'], in_progress=self._extract_names(programs[2], 0))
)
self.assertEqual(meter.completed_programs, [])
@mock.patch(UTILS_MODULE + '.get_completed_courses')
def test_simulate_progress(self, mock_get_completed_courses):
"""Simulate the entirety of a user's progress through a program."""
first_course_id, second_course_id = 'org/first-course/run', 'org/second-course/run'
data = [
factories.Program(
organizations=[factories.Organization()],
course_codes=[
factories.CourseCode(run_modes=[
factories.RunMode(course_key=first_course_id),
]),
factories.CourseCode(run_modes=[
factories.RunMode(course_key=second_course_id),
]),
]
),
factories.Program(
organizations=[factories.Organization()],
course_codes=[
factories.CourseCode(run_modes=[factories.RunMode()]),
]
),
]
self._mock_programs_api(data)
# No enrollments, no program engaged.
meter = utils.ProgramProgressMeter(self.user)
self._assert_progress(meter)
self.assertEqual(meter.completed_programs, [])
# One enrollment, program engaged.
self._create_enrollments(first_course_id)
meter = utils.ProgramProgressMeter(self.user)
program, program_id = data[0], data[0]['id']
self._assert_progress(
meter,
factories.Progress(
id=program_id,
in_progress=self._extract_names(program, 0),
not_started=self._extract_names(program, 1)
)
)
self.assertEqual(meter.completed_programs, [])
# Two enrollments, program in progress.
self._create_enrollments(second_course_id)
meter = utils.ProgramProgressMeter(self.user)
self._assert_progress(
meter,
factories.Progress(
id=program_id,
in_progress=self._extract_names(program, 0, 1)
)
)
self.assertEqual(meter.completed_programs, [])
# One valid certificate earned, one course code complete.
mock_get_completed_courses.return_value = [
{'course_id': first_course_id, 'mode': MODES.verified},
]
meter = utils.ProgramProgressMeter(self.user)
self._assert_progress(
meter,
factories.Progress(
id=program_id,
completed=self._extract_names(program, 0),
in_progress=self._extract_names(program, 1)
)
)
self.assertEqual(meter.completed_programs, [])
# Invalid certificate earned, still one course code to complete.
mock_get_completed_courses.return_value = [
{'course_id': first_course_id, 'mode': MODES.verified},
{'course_id': second_course_id, 'mode': MODES.honor},
]
meter = utils.ProgramProgressMeter(self.user)
self._assert_progress(
meter,
factories.Progress(
id=program_id,
completed=self._extract_names(program, 0),
in_progress=self._extract_names(program, 1)
)
)
self.assertEqual(meter.completed_programs, [])
# Second valid certificate obtained, all course codes complete.
mock_get_completed_courses.return_value = [
{'course_id': first_course_id, 'mode': MODES.verified},
{'course_id': second_course_id, 'mode': MODES.verified},
]
meter = utils.ProgramProgressMeter(self.user)
self._assert_progress(
meter,
factories.Progress(
id=program_id,
completed=self._extract_names(program, 0, 1)
)
)
self.assertEqual(meter.completed_programs, [program_id])
@mock.patch(UTILS_MODULE + '.get_completed_courses')
def test_nonstandard_run_mode_completion(self, mock_get_completed_courses):
"""
A valid run mode isn't necessarily verified. Verify that a program can
still be completed when this is the case.
"""
course_id = 'org/course/run'
data = [
factories.Program(
organizations=[factories.Organization()],
course_codes=[
factories.CourseCode(run_modes=[
factories.RunMode(
course_key=course_id,
mode_slug=MODES.honor
),
factories.RunMode(),
]),
]
),
factories.Program(
organizations=[factories.Organization()],
course_codes=[
factories.CourseCode(run_modes=[factories.RunMode()]),
]
),
]
self._mock_programs_api(data)
self._create_enrollments(course_id)
mock_get_completed_courses.return_value = [
{'course_id': course_id, 'mode': MODES.honor},
]
meter = utils.ProgramProgressMeter(self.user)
program, program_id = data[0], data[0]['id']
self._assert_progress(
meter,
factories.Progress(id=program_id, completed=self._extract_names(program, 0))
)
self.assertEqual(meter.completed_programs, [program_id])
@mock.patch(UTILS_MODULE + '.get_completed_courses')
def test_completed_programs(self, mock_get_completed_courses):
"""Verify that completed programs are correctly identified."""
program_count, course_code_count, run_mode_count = 3, 2, 2
data = [
factories.Program(
organizations=[factories.Organization()],
course_codes=[
factories.CourseCode(run_modes=[factories.RunMode() for _ in range(run_mode_count)])
for _ in range(course_code_count)
]
)
for _ in range(program_count)
]
self._mock_programs_api(data)
program_ids = []
course_ids = []
for program in data:
program_ids.append(program['id'])
for course_code in program['course_codes']:
for run_mode in course_code['run_modes']:
course_ids.append(run_mode['course_key'])
# Verify that no programs are complete.
meter = utils.ProgramProgressMeter(self.user)
self.assertEqual(meter.completed_programs, [])
# "Complete" all programs.
self._create_enrollments(*course_ids)
mock_get_completed_courses.return_value = [
{'course_id': course_id, 'mode': MODES.verified} for course_id in course_ids
]
# Verify that all programs are complete.
meter = utils.ProgramProgressMeter(self.user)
self.assertEqual(meter.completed_programs, program_ids)
@ddt.ddt
@override_settings(ECOMMERCE_PUBLIC_URL_ROOT=ECOMMERCE_URL_ROOT)
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@mock.patch(UTILS_MODULE + '.get_run_marketing_url', mock.Mock(return_value=MARKETING_URL))
class TestProgramDataExtender(ProgramsApiConfigMixin, ModuleStoreTestCase):
"""Tests of the program data extender utility class."""
maxDiff = None
sku = 'abc123'
password = 'test'
checkout_path = '/basket'
def setUp(self):
super(TestProgramDataExtender, self).setUp()
self.user = UserFactory()
self.client.login(username=self.user.username, password=self.password)
ClientFactory(name=ProgramsApiConfig.OAUTH2_CLIENT_NAME, client_type=CONFIDENTIAL)
self.course = CourseFactory()
self.course.start = datetime.datetime.now(utc) - datetime.timedelta(days=1)
self.course.end = datetime.datetime.now(utc) + datetime.timedelta(days=1)
self.course = self.update_course(self.course, self.user.id) # pylint: disable=no-member
self.organization = factories.Organization()
self.run_mode = factories.RunMode(course_key=unicode(self.course.id)) # pylint: disable=no-member
self.course_code = factories.CourseCode(run_modes=[self.run_mode])
self.program = factories.Program(
organizations=[self.organization],
course_codes=[self.course_code]
)
def _assert_supplemented(self, actual, **kwargs):
"""DRY helper used to verify that program data is extended correctly."""
course_overview = CourseOverview.get_from_id(self.course.id) # pylint: disable=no-member
run_mode = dict(
factories.RunMode(
certificate_url=None,
course_image_url=course_overview.course_image_url,
course_key=unicode(self.course.id), # pylint: disable=no-member
course_url=reverse('course_root', args=[self.course.id]), # pylint: disable=no-member
end_date=self.course.end.replace(tzinfo=utc),
enrollment_open_date=strftime_localized(utils.DEFAULT_ENROLLMENT_START_DATE, 'SHORT_DATE'),
is_course_ended=self.course.end < datetime.datetime.now(utc),
is_enrolled=False,
is_enrollment_open=True,
marketing_url=MARKETING_URL,
start_date=self.course.start.replace(tzinfo=utc),
upgrade_url=None,
advertised_start=None
),
**kwargs
)
course_code = factories.CourseCode(display_name=self.course_code['display_name'], run_modes=[run_mode])
expected = copy.deepcopy(self.program)
expected['course_codes'] = [course_code]
self.assertEqual(actual, expected)
@ddt.data(
(False, None, False),
(True, MODES.audit, True),
(True, MODES.verified, False),
)
@ddt.unpack
@mock.patch(UTILS_MODULE + '.CourseMode.mode_for_course')
def test_student_enrollment_status(self, is_enrolled, enrolled_mode, is_upgrade_required, mock_get_mode):
"""Verify that program data is supplemented with the student's enrollment status."""
expected_upgrade_url = '{root}/{path}?sku={sku}'.format(
root=ECOMMERCE_URL_ROOT,
path=self.checkout_path.strip('/'),
sku=self.sku,
)
update_commerce_config(enabled=True, checkout_page=self.checkout_path)
mock_mode = mock.Mock()
mock_mode.sku = self.sku
mock_get_mode.return_value = mock_mode
if is_enrolled:
CourseEnrollmentFactory(user=self.user, course_id=self.course.id, mode=enrolled_mode) # pylint: disable=no-member
data = utils.ProgramDataExtender(self.program, self.user).extend()
self._assert_supplemented(
data,
is_enrolled=is_enrolled,
upgrade_url=expected_upgrade_url if is_upgrade_required else None
)
@ddt.data(MODES.audit, MODES.verified)
def test_inactive_enrollment_no_upgrade(self, enrolled_mode):
"""Verify that a student with an inactive enrollment isn't encouraged to upgrade."""
update_commerce_config(enabled=True, checkout_page=self.checkout_path)
CourseEnrollmentFactory(
user=self.user,
course_id=self.course.id, # pylint: disable=no-member
mode=enrolled_mode,
is_active=False,
)
data = utils.ProgramDataExtender(self.program, self.user).extend()
self._assert_supplemented(data)
@mock.patch(UTILS_MODULE + '.CourseMode.mode_for_course')
def test_ecommerce_disabled(self, mock_get_mode):
"""Verify that the utility can operate when the ecommerce service is disabled."""
update_commerce_config(enabled=False, checkout_page=self.checkout_path)
mock_mode = mock.Mock()
mock_mode.sku = self.sku
mock_get_mode.return_value = mock_mode
CourseEnrollmentFactory(user=self.user, course_id=self.course.id, mode=MODES.audit) # pylint: disable=no-member
data = utils.ProgramDataExtender(self.program, self.user).extend()
self._assert_supplemented(data, is_enrolled=True, upgrade_url=None)
@ddt.data(
(1, 1, False),
(1, -1, True),
)
@ddt.unpack
def test_course_enrollment_status(self, start_offset, end_offset, is_enrollment_open):
"""
Verify that course enrollment status is reflected correctly.
"""
self.course.enrollment_start = datetime.datetime.now(utc) - datetime.timedelta(days=start_offset)
self.course.enrollment_end = datetime.datetime.now(utc) - datetime.timedelta(days=end_offset)
self.course = self.update_course(self.course, self.user.id) # pylint: disable=no-member
data = utils.ProgramDataExtender(self.program, self.user).extend()
self._assert_supplemented(
data,
is_enrollment_open=is_enrollment_open,
enrollment_open_date=strftime_localized(self.course.enrollment_start, 'SHORT_DATE'),
)
def test_no_enrollment_start_date(self):
"""Verify that a closed course with no explicit enrollment start date doesn't cause an error.
Regression test for ECOM-4973.
"""
self.course.enrollment_end = datetime.datetime.now(utc) - datetime.timedelta(days=1)
self.course = self.update_course(self.course, self.user.id) # pylint: disable=no-member
data = utils.ProgramDataExtender(self.program, self.user).extend()
self._assert_supplemented(
data,
is_enrollment_open=False,
)
@ddt.data(True, False)
@mock.patch(UTILS_MODULE + '.certificate_api.certificate_downloadable_status')
@mock.patch(CERTIFICATES_API_MODULE + '.has_html_certificates_enabled')
def test_certificate_url_retrieval(self, is_uuid_available, mock_html_certs_enabled, mock_get_cert_data):
"""Verify that the student's run mode certificate is included, when available."""
test_uuid = uuid.uuid4().hex
mock_get_cert_data.return_value = {'uuid': test_uuid} if is_uuid_available else {}
mock_html_certs_enabled.return_value = True
data = utils.ProgramDataExtender(self.program, self.user).extend()
expected_url = reverse(
'certificates:render_cert_by_uuid',
kwargs={'certificate_uuid': test_uuid}
) if is_uuid_available else None
self._assert_supplemented(data, certificate_url=expected_url)
@ddt.data(-1, 0, 1)
def test_course_course_ended(self, days_offset):
self.course.end = datetime.datetime.now(utc) + datetime.timedelta(days=days_offset)
self.course = self.update_course(self.course, self.user.id) # pylint: disable=no-member
data = utils.ProgramDataExtender(self.program, self.user).extend()
self._assert_supplemented(data)
@mock.patch(UTILS_MODULE + '.get_organization_by_short_name')
def test_organization_logo_exists(self, mock_get_organization_by_short_name):
""" Verify the logo image is set from the organizations api """
mock_logo_url = 'edx/logo.png'
mock_image = mock.Mock()
mock_image.url = mock_logo_url
mock_get_organization_by_short_name.return_value = {
'logo': mock_image
}
data = utils.ProgramDataExtender(self.program, self.user).extend()
self.assertEqual(data['organizations'][0].get('img'), mock_logo_url)
@mock.patch(UTILS_MODULE + '.get_organization_by_short_name')
def test_organization_missing(self, mock_get_organization_by_short_name):
""" Verify the logo image is not set if the organizations api returns None """
mock_get_organization_by_short_name.return_value = None
data = utils.ProgramDataExtender(self.program, self.user).extend()
self.assertEqual(data['organizations'][0].get('img'), None)
@mock.patch(UTILS_MODULE + '.get_organization_by_short_name')
def test_organization_logo_missing(self, mock_get_organization_by_short_name):
"""
Verify the logo image is not set if the organizations api returns organization,
but the logo is not available
"""
mock_get_organization_by_short_name.return_value = {'logo': None}
data = utils.ProgramDataExtender(self.program, self.user).extend()
self.assertEqual(data['organizations'][0].get('img'), None)
| agpl-3.0 | -3,417,082,932,535,076,000 | 38.62623 | 126 | 0.608776 | false |
gewaltig/cython-neuron | pynest/examples/CampbellSiegert.py | 2 | 5435 | # -*- coding: utf-8 -*-
#
# CampbellSiegert.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# CampbellSiegert.py
#
# Example script that applies Campbell's theorem and Siegert's rate approximation.
#
# This script calculates the firing rate of an integrate-and-fire neuron
# in response to a series of Poisson generators, each specified with
# a rate and a synaptic weight.
# The calculated rate is compared with a simulation using the iaf_psc_alpha model
#
#
#
# Sven Schrader, Nov 2008, Siegert implementation by Tom Tetzlaff
from scipy.special import erf
from scipy.optimize import fmin
import numpy
from numpy import sqrt, exp
import pylab
import nest
# example 1
weights = [0.1] # mV psp amplitudes
rates = [8000.] # Hz
# example 2, should have same result as example 1
#weights = [0.1, 0.1]
#rates = [4000., 4000.]
Cm = 250. # pF, capacitance
tau_syn_ex = 0.5 # ms, synaptic time constants
tau_syn_in = 2.0 #
tau_m = 20. # ms, membrane time constant
tref = 2.0 # ms, refractory period
V0 = 0.0 # mV, resting potential
Vth = 20.0 # mV, firing threshold
simtime = 20000 # ms
n_neurons = 10 # number of simulated neurons
pi = numpy.pi
e = exp(1)
pF = 1e-12
ms = 1e-3
pA = 1e-12
mV = 1e-3
mu = 0.0
sigma2 = 0.0
J = []
assert(len(weights) == len(rates))
########################################################################################
# Analytical section
for rate, weight in zip(rates, weights):
if weight >0:
tau_s = tau_syn_ex
else:
tau_s = tau_syn_in
t_psp = numpy.arange(0, 10 * (tau_m*ms + tau_s*ms),0.0001 )
# calculation of a single PSP
psp = lambda x: -(Cm*pF) / (tau_s*ms) * (1/(Cm*pF)) * (e/(tau_s*ms)) * \
(((-x * exp(-x/(tau_s*ms))) / (1/(tau_s*ms )-1 / (tau_m*ms))) +\
(exp(-x/(tau_m*ms)) - exp(-x/(tau_s*ms))) / ((1/(tau_s*ms) - 1/(tau_m*ms))**2) )
min_result = fmin(psp, [0], full_output=1, disp=0)
fudge = -1./min_result[1] # fudge is used here to scale psC amplitude from psP amplitude
J.append( Cm*weight/tau_s*fudge) # <-------|
# Campbell's Theorem
# the mean membrane potential mu and variance sigma adds up for each Poisson source
mu += ((V0*mV) + rate * \
(J[-1]*pA) * (tau_s*ms) * e * (tau_m*ms) / (Cm*pF))
sigma2 += rate * \
(2* tau_m*ms + tau_s*ms ) * \
(J[-1]*pA * tau_s*ms *e * tau_m*ms/ ( 2 * (Cm*pF) * (tau_m*ms + tau_s*ms) ) ) ** 2
sigma = sqrt(sigma2)
# Siegert's rate approximation
num_iterations = 100
ul = (Vth*mV - mu) / (sigma)/sqrt(2)
ll = (V0*mV - mu) / (sigma)/sqrt(2)
interval = (ul-ll)/num_iterations
tmpsum = 0.0
for cu in range(0,num_iterations+1):
u = ll + cu * interval
f = exp(u**2)*(1+erf(u))
tmpsum += interval * sqrt(pi) * f
r = 1. / (tref*ms + tau_m*ms * tmpsum)
########################################################################################
# Simulation section
nest.ResetKernel()
nest.sr('20 setverbosity')
neurondict = {'V_th':Vth, 'tau_m':tau_m, 'tau_syn_ex':tau_syn_ex,'tau_syn_in':tau_syn_in, 'C_m':Cm, 'E_L':V0, 't_ref':tref, 'V_m': V0, 'V_reset': V0}
if (mu*1000) < Vth:
neurondict['V_m'] = mu*1000.
nest.SetDefaults('iaf_psc_alpha', neurondict)
n = nest.Create('iaf_psc_alpha', n_neurons)
n_free = nest.Create('iaf_psc_alpha', 1 ,[{'V_th':999999.}]) # high threshold as we want free membrane potential
pg = nest.Create('poisson_generator', len(rates), [ {'rate':float(rate_i)} for rate_i in rates] )
vm = nest.Create('voltmeter', 1, [{'record_to':['memory'], 'withtime':True, 'withgid':True, 'interval':.1}])
sd = nest.Create('spike_detector',1, [{'record_to':['memory'], 'withtime':True, 'withgid':True}])
for i, currentpg in enumerate(pg):
nest.DivergentConnect([currentpg],n,weight=float(J[i]), delay=0.1)
nest.Connect([currentpg],n_free, {'weight':J[i]})
nest.Connect(vm,n_free)
nest.ConvergentConnect(n,sd)
nest.Simulate(simtime)
# free membrane potential (first 100 steps are omitted)
v_free = nest.GetStatus(vm,'events')[0]['V_m'][100:-1]
print 'mean membrane potential (actual/calculated):', numpy.mean(v_free), mu*1000
print 'variance (actual/calculated): ', numpy.var(v_free), sigma2*1e6
print 'firing rate (actual/calculated): ', nest.GetStatus(sd,'n_events')[0] / (n_neurons*simtime*ms), r
| gpl-2.0 | -3,278,509,550,077,527,000 | 33.18239 | 150 | 0.562833 | false |
sgzsh269/django | tests/forms_tests/field_tests/test_datefield.py | 33 | 8191 | # -*- coding: utf-8 -*-
from datetime import date, datetime
from django.forms import (
DateField, Form, HiddenInput, SelectDateWidget, ValidationError,
)
from django.test import SimpleTestCase, override_settings
from django.utils import translation
class GetDate(Form):
mydate = DateField(widget=SelectDateWidget)
class DateFieldTest(SimpleTestCase):
def test_form_field(self):
a = GetDate({'mydate_month': '4', 'mydate_day': '1', 'mydate_year': '2008'})
self.assertTrue(a.is_valid())
self.assertEqual(a.cleaned_data['mydate'], date(2008, 4, 1))
# As with any widget that implements get_value_from_datadict(), we must
# accept the input from the "as_hidden" rendering as well.
self.assertHTMLEqual(
a['mydate'].as_hidden(),
'<input type="hidden" name="mydate" value="2008-4-1" id="id_mydate" />',
)
b = GetDate({'mydate': '2008-4-1'})
self.assertTrue(b.is_valid())
self.assertEqual(b.cleaned_data['mydate'], date(2008, 4, 1))
# Invalid dates shouldn't be allowed
c = GetDate({'mydate_month': '2', 'mydate_day': '31', 'mydate_year': '2010'})
self.assertFalse(c.is_valid())
self.assertEqual(c.errors, {'mydate': ['Enter a valid date.']})
# label tag is correctly associated with month dropdown
d = GetDate({'mydate_month': '1', 'mydate_day': '1', 'mydate_year': '2010'})
self.assertIn('<label for="id_mydate_month">', d.as_p())
@override_settings(USE_L10N=True)
@translation.override('nl')
def test_l10n_date_changed(self):
"""
DateField.has_changed() with SelectDateWidget works with a localized
date format (#17165).
"""
# With Field.show_hidden_initial=False
b = GetDate({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '1',
}, initial={'mydate': date(2008, 4, 1)})
self.assertFalse(b.has_changed())
b = GetDate({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '2',
}, initial={'mydate': date(2008, 4, 1)})
self.assertTrue(b.has_changed())
# With Field.show_hidden_initial=True
class GetDateShowHiddenInitial(Form):
mydate = DateField(widget=SelectDateWidget, show_hidden_initial=True)
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '1',
'initial-mydate': HiddenInput().format_value(date(2008, 4, 1)),
}, initial={'mydate': date(2008, 4, 1)})
self.assertFalse(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput().format_value(date(2008, 4, 1)),
}, initial={'mydate': date(2008, 4, 1)})
self.assertTrue(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput().format_value(date(2008, 4, 1)),
}, initial={'mydate': date(2008, 4, 22)})
self.assertTrue(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput().format_value(date(2008, 4, 22)),
}, initial={'mydate': date(2008, 4, 1)})
self.assertFalse(b.has_changed())
@override_settings(USE_L10N=True)
@translation.override('nl')
def test_l10n_invalid_date_in(self):
# Invalid dates shouldn't be allowed
a = GetDate({'mydate_month': '2', 'mydate_day': '31', 'mydate_year': '2010'})
self.assertFalse(a.is_valid())
# 'Geef een geldige datum op.' = 'Enter a valid date.'
self.assertEqual(a.errors, {'mydate': ['Geef een geldige datum op.']})
@override_settings(USE_L10N=True)
@translation.override('nl')
def test_form_label_association(self):
# label tag is correctly associated with first rendered dropdown
a = GetDate({'mydate_month': '1', 'mydate_day': '1', 'mydate_year': '2010'})
self.assertIn('<label for="id_mydate_day">', a.as_p())
def test_datefield_1(self):
f = DateField()
self.assertEqual(date(2006, 10, 25), f.clean(date(2006, 10, 25)))
self.assertEqual(date(2006, 10, 25), f.clean(datetime(2006, 10, 25, 14, 30)))
self.assertEqual(date(2006, 10, 25), f.clean(datetime(2006, 10, 25, 14, 30, 59)))
self.assertEqual(date(2006, 10, 25), f.clean(datetime(2006, 10, 25, 14, 30, 59, 200)))
self.assertEqual(date(2006, 10, 25), f.clean('2006-10-25'))
self.assertEqual(date(2006, 10, 25), f.clean('10/25/2006'))
self.assertEqual(date(2006, 10, 25), f.clean('10/25/06'))
self.assertEqual(date(2006, 10, 25), f.clean('Oct 25 2006'))
self.assertEqual(date(2006, 10, 25), f.clean('October 25 2006'))
self.assertEqual(date(2006, 10, 25), f.clean('October 25, 2006'))
self.assertEqual(date(2006, 10, 25), f.clean('25 October 2006'))
self.assertEqual(date(2006, 10, 25), f.clean('25 October, 2006'))
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean('2006-4-31')
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean('200a-10-25')
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean('25/10/06')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
def test_datefield_2(self):
f = DateField(required=False)
self.assertIsNone(f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertIsNone(f.clean(''))
self.assertEqual('None', repr(f.clean('')))
def test_datefield_3(self):
f = DateField(input_formats=['%Y %m %d'])
self.assertEqual(date(2006, 10, 25), f.clean(date(2006, 10, 25)))
self.assertEqual(date(2006, 10, 25), f.clean(datetime(2006, 10, 25, 14, 30)))
self.assertEqual(date(2006, 10, 25), f.clean('2006 10 25'))
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean('2006-10-25')
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean('10/25/2006')
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean('10/25/06')
def test_datefield_4(self):
# Test whitespace stripping behavior (#5714)
f = DateField()
self.assertEqual(date(2006, 10, 25), f.clean(' 10/25/2006 '))
self.assertEqual(date(2006, 10, 25), f.clean(' 10/25/06 '))
self.assertEqual(date(2006, 10, 25), f.clean(' Oct 25 2006 '))
self.assertEqual(date(2006, 10, 25), f.clean(' October 25 2006 '))
self.assertEqual(date(2006, 10, 25), f.clean(' October 25, 2006 '))
self.assertEqual(date(2006, 10, 25), f.clean(' 25 October 2006 '))
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean(' ')
def test_datefield_5(self):
# Test null bytes (#18982)
f = DateField()
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean('a\x00b')
def test_datefield_changed(self):
format = '%d/%m/%Y'
f = DateField(input_formats=[format])
d = date(2007, 9, 17)
self.assertFalse(f.has_changed(d, '17/09/2007'))
def test_datefield_strptime(self):
"""field.strptime() doesn't raise a UnicodeEncodeError (#16123)"""
f = DateField()
try:
f.strptime('31 мая 2011', '%d-%b-%y')
except Exception as e:
# assertIsInstance or assertRaises cannot be used because UnicodeEncodeError
# is a subclass of ValueError
self.assertEqual(e.__class__, ValueError)
| bsd-3-clause | 3,422,494,213,169,096,700 | 42.094737 | 94 | 0.586957 | false |
marcore/edx-platform | lms/djangoapps/certificates/management/commands/create_fake_cert.py | 44 | 3520 | """Utility for testing certificate display.
This command will create a fake certificate for a user
in a course. The certificate will display on the student's
dashboard, but no PDF will be generated.
Example usage:
$ ./manage.py lms create_fake_cert test_user edX/DemoX/Demo_Course --mode honor --grade 0.89
"""
import logging
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from optparse import make_option
from opaque_keys.edx.keys import CourseKey
from certificates.models import GeneratedCertificate, CertificateStatuses
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
"""Create a fake certificate for a user in a course. """
USAGE = u'Usage: create_fake_cert <USERNAME> <COURSE_KEY> --mode <MODE> --status <STATUS> --grade <GRADE>'
option_list = BaseCommand.option_list + (
make_option(
'-m', '--mode',
metavar='CERT_MODE',
dest='cert_mode',
default='honor',
help='The course mode of the certificate (e.g. "honor", "verified", or "professional")'
),
make_option(
'-s', '--status',
metavar='CERT_STATUS',
dest='status',
default=CertificateStatuses.downloadable,
help='The status of the certificate'
),
make_option(
'-g', '--grade',
metavar='CERT_GRADE',
dest='grade',
default='',
help='The grade for the course, as a decimal (e.g. "0.89" for 89%)'
),
)
def handle(self, *args, **options):
"""Create a fake certificate for a user.
Arguments:
username (unicode): Identifier for the certificate's user.
course_key (unicode): Identifier for the certificate's course.
Keyword Arguments:
cert_mode (str): The mode of the certificate (e.g "honor")
status (str): The status of the certificate (e.g. "downloadable")
grade (str): The grade of the certificate (e.g "0.89" for 89%)
Raises:
CommandError
"""
if len(args) < 2:
raise CommandError(self.USAGE)
user = User.objects.get(username=args[0])
course_key = CourseKey.from_string(args[1])
cert_mode = options.get('cert_mode', 'honor')
status = options.get('status', CertificateStatuses.downloadable)
grade = options.get('grade', '')
cert, created = GeneratedCertificate.eligible_certificates.get_or_create(
user=user,
course_id=course_key
)
cert.mode = cert_mode
cert.status = status
cert.grade = grade
if status == CertificateStatuses.downloadable:
cert.download_uuid = 'test'
cert.verify_uuid = 'test'
cert.download_url = 'http://www.example.com'
cert.save()
if created:
LOGGER.info(
u"Created certificate for user %s in course %s "
u"with mode %s, status %s, "
u"and grade %s",
user.id, unicode(course_key),
cert_mode, status, grade
)
else:
LOGGER.info(
u"Updated certificate for user %s in course %s "
u"with mode %s, status %s, "
u"and grade %s",
user.id, unicode(course_key),
cert_mode, status, grade
)
| agpl-3.0 | 6,453,031,393,438,975,000 | 31 | 110 | 0.57017 | false |
mkhuthir/learnPython | Book_pythonlearn_com/code3/bs4/element.py | 16 | 65678 | __license__ = "MIT"
from pdb import set_trace
import collections
import re
import sys
import warnings
from bs4.dammit import EntitySubstitution
DEFAULT_OUTPUT_ENCODING = "utf-8"
PY3K = (sys.version_info[0] > 2)
whitespace_re = re.compile("\s+")
def _alias(attr):
"""Alias one attribute name to another for backward compatibility"""
@property
def alias(self):
return getattr(self, attr)
@alias.setter
def alias(self):
return setattr(self, attr)
return alias
class NamespacedAttribute(str):
def __new__(cls, prefix, name, namespace=None):
if name is None:
obj = str.__new__(cls, prefix)
elif prefix is None:
# Not really namespaced.
obj = str.__new__(cls, name)
else:
obj = str.__new__(cls, prefix + ":" + name)
obj.prefix = prefix
obj.name = name
obj.namespace = namespace
return obj
class AttributeValueWithCharsetSubstitution(str):
"""A stand-in object for a character encoding specified in HTML."""
class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'charset' attribute.
When Beautiful Soup parses the markup '<meta charset="utf8">', the
value of the 'charset' attribute will be one of these objects.
"""
def __new__(cls, original_value):
obj = str.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
return encoding
class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'content' attribute.
When Beautiful Soup parses the markup:
<meta http-equiv="content-type" content="text/html; charset=utf8">
The value of the 'content' attribute will be one of these objects.
"""
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def __new__(cls, original_value):
match = cls.CHARSET_RE.search(original_value)
if match is None:
# No substitution necessary.
return str.__new__(str, original_value)
obj = str.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
def rewrite(match):
return match.group(1) + encoding
return self.CHARSET_RE.sub(rewrite, self.original_value)
class HTMLAwareEntitySubstitution(EntitySubstitution):
"""Entity substitution rules that are aware of some HTML quirks.
Specifically, the contents of <script> and <style> tags should not
undergo entity substitution.
Incoming NavigableString objects are checked to see if they're the
direct children of a <script> or <style> tag.
"""
cdata_containing_tags = set(["script", "style"])
preformatted_tags = set(["pre"])
@classmethod
def _substitute_if_appropriate(cls, ns, f):
if (isinstance(ns, NavigableString)
and ns.parent is not None
and ns.parent.name in cls.cdata_containing_tags):
# Do nothing.
return ns
# Substitute.
return f(ns)
@classmethod
def substitute_html(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_html)
@classmethod
def substitute_xml(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_xml)
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
# There are five possible values for the "formatter" argument passed in
# to methods like encode() and prettify():
#
# "html" - All Unicode characters with corresponding HTML entities
# are converted to those entities on output.
# "minimal" - Bare ampersands and angle brackets are converted to
# XML entities: & < >
# None - The null formatter. Unicode characters are never
# converted to entities. This is not recommended, but it's
# faster than "minimal".
# A function - This function will be called on every string that
# needs to undergo entity substitution.
#
# In an HTML document, the default "html" and "minimal" functions
# will leave the contents of <script> and <style> tags alone. For
# an XML document, all tags will be given the same treatment.
HTML_FORMATTERS = {
"html" : HTMLAwareEntitySubstitution.substitute_html,
"minimal" : HTMLAwareEntitySubstitution.substitute_xml,
None : None
}
XML_FORMATTERS = {
"html" : EntitySubstitution.substitute_html,
"minimal" : EntitySubstitution.substitute_xml,
None : None
}
def format_string(self, s, formatter='minimal'):
"""Format the given string using the given formatter."""
if not isinstance(formatter, collections.Callable):
formatter = self._formatter_for_name(formatter)
if formatter is None:
output = s
else:
output = formatter(s)
return output
@property
def _is_xml(self):
"""Is this element part of an XML tree or an HTML tree?
This is used when mapping a formatter name ("minimal") to an
appropriate function (one that performs entity-substitution on
the contents of <script> and <style> tags, or not). It's
inefficient, but it should be called very rarely.
"""
if self.parent is None:
# This is the top-level object. It should have .is_xml set
# from tree creation. If not, take a guess--BS is usually
# used on HTML markup.
return getattr(self, 'is_xml', False)
return self.parent._is_xml
def _formatter_for_name(self, name):
"Look up a formatter function based on its name and the tree."
if self._is_xml:
return self.XML_FORMATTERS.get(
name, EntitySubstitution.substitute_xml)
else:
return self.HTML_FORMATTERS.get(
name, HTMLAwareEntitySubstitution.substitute_xml)
def setup(self, parent=None, previous_element=None, next_element=None,
previous_sibling=None, next_sibling=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous_element = previous_element
if previous_element is not None:
self.previous_element.next_element = self
self.next_element = next_element
if self.next_element:
self.next_element.previous_element = self
self.next_sibling = next_sibling
if self.next_sibling:
self.next_sibling.previous_sibling = self
if (not previous_sibling
and self.parent is not None and self.parent.contents):
previous_sibling = self.parent.contents[-1]
self.previous_sibling = previous_sibling
if previous_sibling:
self.previous_sibling.next_sibling = self
nextSibling = _alias("next_sibling") # BS3
previousSibling = _alias("previous_sibling") # BS3
def replace_with(self, replace_with):
if not self.parent:
raise ValueError(
"Cannot replace one element with another when the"
"element to be replaced is not part of a tree.")
if replace_with is self:
return
if replace_with is self.parent:
raise ValueError("Cannot replace a Tag with its parent.")
old_parent = self.parent
my_index = self.parent.index(self)
self.extract()
old_parent.insert(my_index, replace_with)
return self
replaceWith = replace_with # BS3
def unwrap(self):
my_parent = self.parent
if not self.parent:
raise ValueError(
"Cannot replace an element with its contents when that"
"element is not part of a tree.")
my_index = self.parent.index(self)
self.extract()
for child in reversed(self.contents[:]):
my_parent.insert(my_index, child)
return self
replace_with_children = unwrap
replaceWithChildren = unwrap # BS3
def wrap(self, wrap_inside):
me = self.replace_with(wrap_inside)
wrap_inside.append(me)
return wrap_inside
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent is not None:
del self.parent.contents[self.parent.index(self)]
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
last_child = self._last_descendant()
next_element = last_child.next_element
if (self.previous_element is not None and
self.previous_element is not next_element):
self.previous_element.next_element = next_element
if next_element is not None and next_element is not self.previous_element:
next_element.previous_element = self.previous_element
self.previous_element = None
last_child.next_element = None
self.parent = None
if (self.previous_sibling is not None
and self.previous_sibling is not self.next_sibling):
self.previous_sibling.next_sibling = self.next_sibling
if (self.next_sibling is not None
and self.next_sibling is not self.previous_sibling):
self.next_sibling.previous_sibling = self.previous_sibling
self.previous_sibling = self.next_sibling = None
return self
def _last_descendant(self, is_initialized=True, accept_self=True):
"Finds the last element beneath this object to be parsed."
if is_initialized and self.next_sibling:
last_child = self.next_sibling.previous_element
else:
last_child = self
while isinstance(last_child, Tag) and last_child.contents:
last_child = last_child.contents[-1]
if not accept_self and last_child is self:
last_child = None
return last_child
# BS3: Not part of the API!
_lastRecursiveChild = _last_descendant
def insert(self, position, new_child):
if new_child is None:
raise ValueError("Cannot insert None into a tag.")
if new_child is self:
raise ValueError("Cannot insert a tag into itself.")
if (isinstance(new_child, str)
and not isinstance(new_child, NavigableString)):
new_child = NavigableString(new_child)
position = min(position, len(self.contents))
if hasattr(new_child, 'parent') and new_child.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if new_child.parent is self:
current_index = self.index(new_child)
if current_index < position:
# We're moving this element further down the list
# of this object's children. That means that when
# we extract this element, our target index will
# jump down one.
position -= 1
new_child.extract()
new_child.parent = self
previous_child = None
if position == 0:
new_child.previous_sibling = None
new_child.previous_element = self
else:
previous_child = self.contents[position - 1]
new_child.previous_sibling = previous_child
new_child.previous_sibling.next_sibling = new_child
new_child.previous_element = previous_child._last_descendant(False)
if new_child.previous_element is not None:
new_child.previous_element.next_element = new_child
new_childs_last_element = new_child._last_descendant(False)
if position >= len(self.contents):
new_child.next_sibling = None
parent = self
parents_next_sibling = None
while parents_next_sibling is None and parent is not None:
parents_next_sibling = parent.next_sibling
parent = parent.parent
if parents_next_sibling is not None:
# We found the element that comes next in the document.
break
if parents_next_sibling is not None:
new_childs_last_element.next_element = parents_next_sibling
else:
# The last element of this tag is the last element in
# the document.
new_childs_last_element.next_element = None
else:
next_child = self.contents[position]
new_child.next_sibling = next_child
if new_child.next_sibling is not None:
new_child.next_sibling.previous_sibling = new_child
new_childs_last_element.next_element = next_child
if new_childs_last_element.next_element is not None:
new_childs_last_element.next_element.previous_element = new_childs_last_element
self.contents.insert(position, new_child)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def insert_before(self, predecessor):
"""Makes the given element the immediate predecessor of this one.
The two elements will have the same parent, and the given element
will be immediately before this one.
"""
if self is predecessor:
raise ValueError("Can't insert an element before itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'before' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(predecessor, PageElement):
predecessor.extract()
index = parent.index(self)
parent.insert(index, predecessor)
def insert_after(self, successor):
"""Makes the given element the immediate successor of this one.
The two elements will have the same parent, and the given element
will be immediately after this one.
"""
if self is successor:
raise ValueError("Can't insert an element after itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'after' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(successor, PageElement):
successor.extract()
index = parent.index(self)
parent.insert(index+1, successor)
def find_next(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._find_one(self.find_all_next, name, attrs, text, **kwargs)
findNext = find_next # BS3
def find_all_next(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.next_elements,
**kwargs)
findAllNext = find_all_next # BS3
def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._find_one(self.find_next_siblings, name, attrs, text,
**kwargs)
findNextSibling = find_next_sibling # BS3
def find_next_siblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.next_siblings, **kwargs)
findNextSiblings = find_next_siblings # BS3
fetchNextSiblings = find_next_siblings # BS2
def find_previous(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._find_one(
self.find_all_previous, name, attrs, text, **kwargs)
findPrevious = find_previous # BS3
def find_all_previous(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.previous_elements,
**kwargs)
findAllPrevious = find_all_previous # BS3
fetchPrevious = find_all_previous # BS2
def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._find_one(self.find_previous_siblings, name, attrs, text,
**kwargs)
findPreviousSibling = find_previous_sibling # BS3
def find_previous_siblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.previous_siblings, **kwargs)
findPreviousSiblings = find_previous_siblings # BS3
fetchPreviousSiblings = find_previous_siblings # BS2
def find_parent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _find_one because findParents takes a different
# set of arguments.
r = None
l = self.find_parents(name, attrs, 1, **kwargs)
if l:
r = l[0]
return r
findParent = find_parent # BS3
def find_parents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._find_all(name, attrs, None, limit, self.parents,
**kwargs)
findParents = find_parents # BS3
fetchParents = find_parents # BS2
@property
def next(self):
return self.next_element
@property
def previous(self):
return self.previous_element
#These methods do the real heavy lifting.
def _find_one(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _find_all(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if text is None and 'string' in kwargs:
text = kwargs['string']
del kwargs['string']
if isinstance(name, SoupStrainer):
strainer = name
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
if text is None and not limit and not attrs and not kwargs:
if name is True or name is None:
# Optimization to find all tags.
result = (element for element in generator
if isinstance(element, Tag))
return ResultSet(strainer, result)
elif isinstance(name, str):
# Optimization to find all tags with a given name.
result = (element for element in generator
if isinstance(element, Tag)
and element.name == name)
return ResultSet(strainer, result)
results = ResultSet(strainer)
while True:
try:
i = next(generator)
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These generators can be used to navigate starting from both
#NavigableStrings and Tags.
@property
def next_elements(self):
i = self.next_element
while i is not None:
yield i
i = i.next_element
@property
def next_siblings(self):
i = self.next_sibling
while i is not None:
yield i
i = i.next_sibling
@property
def previous_elements(self):
i = self.previous_element
while i is not None:
yield i
i = i.previous_element
@property
def previous_siblings(self):
i = self.previous_sibling
while i is not None:
yield i
i = i.previous_sibling
@property
def parents(self):
i = self.parent
while i is not None:
yield i
i = i.parent
# Methods for supporting CSS selectors.
tag_name_re = re.compile('^[a-zA-Z0-9][-.a-zA-Z0-9:_]*$')
# /^([a-zA-Z0-9][-.a-zA-Z0-9:_]*)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
# \---------------------------/ \---/\-------------/ \-------/
# | | | |
# | | | The value
# | | ~,|,^,$,* or =
# | Attribute
# Tag
attribselect_re = re.compile(
r'^(?P<tag>[a-zA-Z0-9][-.a-zA-Z0-9:_]*)?\[(?P<attribute>[\w-]+)(?P<operator>[=~\|\^\$\*]?)' +
r'=?"?(?P<value>[^\]"]*)"?\]$'
)
def _attr_value_as_string(self, value, default=None):
"""Force an attribute value into a string representation.
A multi-valued attribute will be converted into a
space-separated stirng.
"""
value = self.get(value, default)
if isinstance(value, list) or isinstance(value, tuple):
value =" ".join(value)
return value
def _tag_name_matches_and(self, function, tag_name):
if not tag_name:
return function
else:
def _match(tag):
return tag.name == tag_name and function(tag)
return _match
def _attribute_checker(self, operator, attribute, value=''):
"""Create a function that performs a CSS selector operation.
Takes an operator, attribute and optional value. Returns a
function that will return True for elements that match that
combination.
"""
if operator == '=':
# string representation of `attribute` is equal to `value`
return lambda el: el._attr_value_as_string(attribute) == value
elif operator == '~':
# space-separated list representation of `attribute`
# contains `value`
def _includes_value(element):
attribute_value = element.get(attribute, [])
if not isinstance(attribute_value, list):
attribute_value = attribute_value.split()
return value in attribute_value
return _includes_value
elif operator == '^':
# string representation of `attribute` starts with `value`
return lambda el: el._attr_value_as_string(
attribute, '').startswith(value)
elif operator == '$':
# string represenation of `attribute` ends with `value`
return lambda el: el._attr_value_as_string(
attribute, '').endswith(value)
elif operator == '*':
# string representation of `attribute` contains `value`
return lambda el: value in el._attr_value_as_string(attribute, '')
elif operator == '|':
# string representation of `attribute` is either exactly
# `value` or starts with `value` and then a dash.
def _is_or_starts_with_dash(element):
attribute_value = element._attr_value_as_string(attribute, '')
return (attribute_value == value or attribute_value.startswith(
value + '-'))
return _is_or_starts_with_dash
else:
return lambda el: el.has_attr(attribute)
# Old non-property versions of the generators, for backwards
# compatibility with BS3.
def nextGenerator(self):
return self.next_elements
def nextSiblingGenerator(self):
return self.next_siblings
def previousGenerator(self):
return self.previous_elements
def previousSiblingGenerator(self):
return self.previous_siblings
def parentGenerator(self):
return self.parents
class NavigableString(str, PageElement):
PREFIX = ''
SUFFIX = ''
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, str):
u = str.__new__(cls, value)
else:
u = str.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
u.setup()
return u
def __copy__(self):
"""A copy of a NavigableString has the same contents and class
as the original, but it is not connected to the parse tree.
"""
return type(self)(self)
def __getnewargs__(self):
return (str(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
def output_ready(self, formatter="minimal"):
output = self.format_string(self, formatter)
return self.PREFIX + output + self.SUFFIX
@property
def name(self):
return None
@name.setter
def name(self, name):
raise AttributeError("A NavigableString cannot be given a name.")
class PreformattedString(NavigableString):
"""A NavigableString not subject to the normal formatting rules.
The string will be passed into the formatter (to trigger side effects),
but the return value will be ignored.
"""
def output_ready(self, formatter="minimal"):
"""CData strings are passed into the formatter.
But the return value is ignored."""
self.format_string(self, formatter)
return self.PREFIX + self + self.SUFFIX
class CData(PreformattedString):
PREFIX = '<![CDATA['
SUFFIX = ']]>'
class ProcessingInstruction(PreformattedString):
PREFIX = '<?'
SUFFIX = '>'
class Comment(PreformattedString):
PREFIX = '<!--'
SUFFIX = '-->'
class Declaration(PreformattedString):
PREFIX = '<?'
SUFFIX = '?>'
class Doctype(PreformattedString):
@classmethod
def for_name_and_ids(cls, name, pub_id, system_id):
value = name or ''
if pub_id is not None:
value += ' PUBLIC "%s"' % pub_id
if system_id is not None:
value += ' "%s"' % system_id
elif system_id is not None:
value += ' SYSTEM "%s"' % system_id
return Doctype(value)
PREFIX = '<!DOCTYPE '
SUFFIX = '>\n'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def __init__(self, parser=None, builder=None, name=None, namespace=None,
prefix=None, attrs=None, parent=None, previous=None):
"Basic constructor."
if parser is None:
self.parser_class = None
else:
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected.
self.parser_class = parser.__class__
if name is None:
raise ValueError("No value provided for new tag's name.")
self.name = name
self.namespace = namespace
self.prefix = prefix
if attrs is None:
attrs = {}
elif attrs:
if builder is not None and builder.cdata_list_attributes:
attrs = builder._replace_cdata_list_attribute_values(
self.name, attrs)
else:
attrs = dict(attrs)
else:
attrs = dict(attrs)
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
# Set up any substitutions, such as the charset in a META tag.
if builder is not None:
builder.set_up_substitutions(self)
self.can_be_empty_element = builder.can_be_empty_element(name)
else:
self.can_be_empty_element = False
parserClass = _alias("parser_class") # BS3
def __copy__(self):
"""A copy of a Tag is a new Tag, unconnected to the parse tree.
Its contents are a copy of the old Tag's contents.
"""
clone = type(self)(None, self.builder, self.name, self.namespace,
self.nsprefix, self.attrs)
for attr in ('can_be_empty_element', 'hidden'):
setattr(clone, attr, getattr(self, attr))
for child in self.contents:
clone.append(child.__copy__())
return clone
@property
def is_empty_element(self):
"""Is this tag an empty-element tag? (aka a self-closing tag)
A tag that has contents is never an empty-element tag.
A tag that has no contents may or may not be an empty-element
tag. It depends on the builder used to create the tag. If the
builder has a designated list of empty-element tags, then only
a tag whose name shows up in that list is considered an
empty-element tag.
If the builder has no designated list of empty-element tags,
then any tag with no contents is an empty-element tag.
"""
return len(self.contents) == 0 and self.can_be_empty_element
isSelfClosing = is_empty_element # BS3
@property
def string(self):
"""Convenience property to get the single string within this tag.
:Return: If this tag has a single string child, return value
is that string. If this tag has no children, or more than one
child, return value is None. If this tag has one child tag,
return value is the 'string' attribute of the child tag,
recursively.
"""
if len(self.contents) != 1:
return None
child = self.contents[0]
if isinstance(child, NavigableString):
return child
return child.string
@string.setter
def string(self, string):
self.clear()
self.append(string.__class__(string))
def _all_strings(self, strip=False, types=(NavigableString, CData)):
"""Yield all strings of certain classes, possibly stripping them.
By default, yields only NavigableString and CData objects. So
no comments, processing instructions, etc.
"""
for descendant in self.descendants:
if (
(types is None and not isinstance(descendant, NavigableString))
or
(types is not None and type(descendant) not in types)):
continue
if strip:
descendant = descendant.strip()
if len(descendant) == 0:
continue
yield descendant
strings = property(_all_strings)
@property
def stripped_strings(self):
for string in self._all_strings(True):
yield string
def get_text(self, separator="", strip=False,
types=(NavigableString, CData)):
"""
Get all child strings, concatenated using the given separator.
"""
return separator.join([s for s in self._all_strings(
strip, types=types)])
getText = get_text
text = property(get_text)
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
i = self
while i is not None:
next = i.next_element
i.__dict__.clear()
i.contents = []
i = next
def clear(self, decompose=False):
"""
Extract all children. If decompose is True, decompose instead.
"""
if decompose:
for element in self.contents[:]:
if isinstance(element, Tag):
element.decompose()
else:
element.extract()
else:
for element in self.contents[:]:
element.extract()
def index(self, element):
"""
Find the index of a child by identity, not value. Avoids issues with
tag.contents.index(element) getting the index of equal elements.
"""
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self.attrs.get(key, default)
def has_attr(self, key):
return key in self.attrs
def __hash__(self):
return str(self).__hash__()
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self.attrs[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __bool__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self.attrs[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
self.attrs.pop(key, None)
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
find_all() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return self.find_all(*args, **kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.endswith('Tag'):
# BS3: soup.aTag -> "soup.find("a")
tag_name = tag[:-3]
warnings.warn(
'.%sTag is deprecated, use .find("%s") instead.' % (
tag_name, tag_name))
return self.find(tag_name)
# We special case contents to avoid recursion.
elif not tag.startswith("__") and not tag=="contents":
return self.find(tag)
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__, tag))
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag."""
if self is other:
return True
if (not hasattr(other, 'name') or
not hasattr(other, 'attrs') or
not hasattr(other, 'contents') or
self.name != other.name or
self.attrs != other.attrs or
len(self) != len(other)):
return False
for i, my_child in enumerate(self.contents):
if my_child != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding="unicode-escape"):
"""Renders this tag as a string."""
if PY3K:
# "The return value must be a string object", i.e. Unicode
return self.decode()
else:
# "The return value must be a string object", i.e. a bytestring.
# By convention, the return value of __repr__ should also be
# an ASCII string.
return self.encode(encoding)
def __unicode__(self):
return self.decode()
def __str__(self):
if PY3K:
return self.decode()
else:
return self.encode()
if PY3K:
__str__ = __repr__ = __unicode__
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
indent_level=None, formatter="minimal",
errors="xmlcharrefreplace"):
# Turn the data structure into Unicode, then encode the
# Unicode.
u = self.decode(indent_level, encoding, formatter)
return u.encode(encoding, errors)
def _should_pretty_print(self, indent_level):
"""Should this tag be pretty-printed?"""
return (
indent_level is not None and
(self.name not in HTMLAwareEntitySubstitution.preformatted_tags
or self._is_xml))
def decode(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a Unicode representation of this tag and its contents.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not isinstance(formatter, collections.Callable):
formatter = self._formatter_for_name(formatter)
attrs = []
if self.attrs:
for key, val in sorted(self.attrs.items()):
if val is None:
decoded = key
else:
if isinstance(val, list) or isinstance(val, tuple):
val = ' '.join(val)
elif not isinstance(val, str):
val = str(val)
elif (
isinstance(val, AttributeValueWithCharsetSubstitution)
and eventual_encoding is not None):
val = val.encode(eventual_encoding)
text = self.format_string(val, formatter)
decoded = (
str(key) + '='
+ EntitySubstitution.quoted_attribute_value(text))
attrs.append(decoded)
close = ''
closeTag = ''
prefix = ''
if self.prefix:
prefix = self.prefix + ":"
if self.is_empty_element:
close = '/'
else:
closeTag = '</%s%s>' % (prefix, self.name)
pretty_print = self._should_pretty_print(indent_level)
space = ''
indent_space = ''
if indent_level is not None:
indent_space = (' ' * (indent_level - 1))
if pretty_print:
space = indent_space
indent_contents = indent_level + 1
else:
indent_contents = None
contents = self.decode_contents(
indent_contents, eventual_encoding, formatter)
if self.hidden:
# This is the 'document root' object.
s = contents
else:
s = []
attribute_string = ''
if attrs:
attribute_string = ' ' + ' '.join(attrs)
if indent_level is not None:
# Even if this particular tag is not pretty-printed,
# we should indent up to the start of the tag.
s.append(indent_space)
s.append('<%s%s%s%s>' % (
prefix, self.name, attribute_string, close))
if pretty_print:
s.append("\n")
s.append(contents)
if pretty_print and contents and contents[-1] != "\n":
s.append("\n")
if pretty_print and closeTag:
s.append(space)
s.append(closeTag)
if indent_level is not None and closeTag and self.next_sibling:
# Even if this particular tag is not pretty-printed,
# we're now done with the tag, and we should add a
# newline if appropriate.
s.append("\n")
s = ''.join(s)
return s
def prettify(self, encoding=None, formatter="minimal"):
if encoding is None:
return self.decode(True, formatter=formatter)
else:
return self.encode(encoding, True, formatter=formatter)
def decode_contents(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a Unicode string.
:param indent_level: Each line of the rendering will be
indented this many spaces.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
:param formatter: The output formatter responsible for converting
entities to Unicode characters.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not isinstance(formatter, collections.Callable):
formatter = self._formatter_for_name(formatter)
pretty_print = (indent_level is not None)
s = []
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.output_ready(formatter)
elif isinstance(c, Tag):
s.append(c.decode(indent_level, eventual_encoding,
formatter))
if text and indent_level and not self.name == 'pre':
text = text.strip()
if text:
if pretty_print and not self.name == 'pre':
s.append(" " * (indent_level - 1))
s.append(text)
if pretty_print and not self.name == 'pre':
s.append("\n")
return ''.join(s)
def encode_contents(
self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a bytestring.
:param indent_level: Each line of the rendering will be
indented this many spaces.
:param eventual_encoding: The bytestring will be in this encoding.
:param formatter: The output formatter responsible for converting
entities to Unicode characters.
"""
contents = self.decode_contents(indent_level, encoding, formatter)
return contents.encode(encoding)
# Old method for BS3 compatibility
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
if not prettyPrint:
indentLevel = None
return self.encode_contents(
indent_level=indentLevel, encoding=encoding)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.find_all(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def find_all(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.descendants
if not recursive:
generator = self.children
return self._find_all(name, attrs, text, limit, generator, **kwargs)
findAll = find_all # BS3
findChildren = find_all # BS2
#Generator methods
@property
def children(self):
# return iter() to make the purpose of the method clear
return iter(self.contents) # XXX This seems to be untested.
@property
def descendants(self):
if not len(self.contents):
return
stopNode = self._last_descendant().next_element
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next_element
# CSS selector code
_selector_combinators = ['>', '+', '~']
_select_debug = False
def select_one(self, selector):
"""Perform a CSS selection operation on the current element."""
value = self.select(selector, limit=1)
if value:
return value[0]
return None
def select(self, selector, _candidate_generator=None, limit=None):
"""Perform a CSS selection operation on the current element."""
# Handle grouping selectors if ',' exists, ie: p,a
if ',' in selector:
context = []
for partial_selector in selector.split(','):
partial_selector = partial_selector.strip()
if partial_selector == '':
raise ValueError('Invalid group selection syntax: %s' % selector)
candidates = self.select(partial_selector, limit=limit)
for candidate in candidates:
if candidate not in context:
context.append(candidate)
if limit and len(context) >= limit:
break
return context
tokens = selector.split()
current_context = [self]
if tokens[-1] in self._selector_combinators:
raise ValueError(
'Final combinator "%s" is missing an argument.' % tokens[-1])
if self._select_debug:
print('Running CSS selector "%s"' % selector)
for index, token in enumerate(tokens):
new_context = []
new_context_ids = set([])
if tokens[index-1] in self._selector_combinators:
# This token was consumed by the previous combinator. Skip it.
if self._select_debug:
print(' Token was consumed by the previous combinator.')
continue
if self._select_debug:
print(' Considering token "%s"' % token)
recursive_candidate_generator = None
tag_name = None
# Each operation corresponds to a checker function, a rule
# for determining whether a candidate matches the
# selector. Candidates are generated by the active
# iterator.
checker = None
m = self.attribselect_re.match(token)
if m is not None:
# Attribute selector
tag_name, attribute, operator, value = m.groups()
checker = self._attribute_checker(operator, attribute, value)
elif '#' in token:
# ID selector
tag_name, tag_id = token.split('#', 1)
def id_matches(tag):
return tag.get('id', None) == tag_id
checker = id_matches
elif '.' in token:
# Class selector
tag_name, klass = token.split('.', 1)
classes = set(klass.split('.'))
def classes_match(candidate):
return classes.issubset(candidate.get('class', []))
checker = classes_match
elif ':' in token:
# Pseudo-class
tag_name, pseudo = token.split(':', 1)
if tag_name == '':
raise ValueError(
"A pseudo-class must be prefixed with a tag name.")
pseudo_attributes = re.match('([a-zA-Z\d-]+)\(([a-zA-Z\d]+)\)', pseudo)
found = []
if pseudo_attributes is None:
pseudo_type = pseudo
pseudo_value = None
else:
pseudo_type, pseudo_value = pseudo_attributes.groups()
if pseudo_type == 'nth-of-type':
try:
pseudo_value = int(pseudo_value)
except:
raise NotImplementedError(
'Only numeric values are currently supported for the nth-of-type pseudo-class.')
if pseudo_value < 1:
raise ValueError(
'nth-of-type pseudo-class value must be at least 1.')
class Counter(object):
def __init__(self, destination):
self.count = 0
self.destination = destination
def nth_child_of_type(self, tag):
self.count += 1
if self.count == self.destination:
return True
if self.count > self.destination:
# Stop the generator that's sending us
# these things.
raise StopIteration()
return False
checker = Counter(pseudo_value).nth_child_of_type
else:
raise NotImplementedError(
'Only the following pseudo-classes are implemented: nth-of-type.')
elif token == '*':
# Star selector -- matches everything
pass
elif token == '>':
# Run the next token as a CSS selector against the
# direct children of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.children
elif token == '~':
# Run the next token as a CSS selector against the
# siblings of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.next_siblings
elif token == '+':
# For each tag in the current context, run the next
# token as a CSS selector against the tag's next
# sibling that's a tag.
def next_tag_sibling(tag):
yield tag.find_next_sibling(True)
recursive_candidate_generator = next_tag_sibling
elif self.tag_name_re.match(token):
# Just a tag name.
tag_name = token
else:
raise ValueError(
'Unsupported or invalid CSS selector: "%s"' % token)
if recursive_candidate_generator:
# This happens when the selector looks like "> foo".
#
# The generator calls select() recursively on every
# member of the current context, passing in a different
# candidate generator and a different selector.
#
# In the case of "> foo", the candidate generator is
# one that yields a tag's direct children (">"), and
# the selector is "foo".
next_token = tokens[index+1]
def recursive_select(tag):
if self._select_debug:
print(' Calling select("%s") recursively on %s %s' % (next_token, tag.name, tag.attrs))
print('-' * 40)
for i in tag.select(next_token, recursive_candidate_generator):
if self._select_debug:
print('(Recursive select picked up candidate %s %s)' % (i.name, i.attrs))
yield i
if self._select_debug:
print('-' * 40)
_use_candidate_generator = recursive_select
elif _candidate_generator is None:
# By default, a tag's candidates are all of its
# children. If tag_name is defined, only yield tags
# with that name.
if self._select_debug:
if tag_name:
check = "[any]"
else:
check = tag_name
print(' Default candidate generator, tag name="%s"' % check)
if self._select_debug:
# This is redundant with later code, but it stops
# a bunch of bogus tags from cluttering up the
# debug log.
def default_candidate_generator(tag):
for child in tag.descendants:
if not isinstance(child, Tag):
continue
if tag_name and not child.name == tag_name:
continue
yield child
_use_candidate_generator = default_candidate_generator
else:
_use_candidate_generator = lambda tag: tag.descendants
else:
_use_candidate_generator = _candidate_generator
count = 0
for tag in current_context:
if self._select_debug:
print(" Running candidate generator on %s %s" % (
tag.name, repr(tag.attrs)))
for candidate in _use_candidate_generator(tag):
if not isinstance(candidate, Tag):
continue
if tag_name and candidate.name != tag_name:
continue
if checker is not None:
try:
result = checker(candidate)
except StopIteration:
# The checker has decided we should no longer
# run the generator.
break
if checker is None or result:
if self._select_debug:
print(" SUCCESS %s %s" % (candidate.name, repr(candidate.attrs)))
if id(candidate) not in new_context_ids:
# If a tag matches a selector more than once,
# don't include it in the context more than once.
new_context.append(candidate)
new_context_ids.add(id(candidate))
if limit and len(new_context) >= limit:
break
elif self._select_debug:
print(" FAILURE %s %s" % (candidate.name, repr(candidate.attrs)))
current_context = new_context
if self._select_debug:
print("Final verdict:")
for i in current_context:
print(" %s %s" % (i.name, i.attrs))
return current_context
# Old names for backwards compatibility
def childGenerator(self):
return self.children
def recursiveChildGenerator(self):
return self.descendants
def has_key(self, key):
"""This was kind of misleading because has_key() (attributes)
was different from __in__ (contents). has_key() is gone in
Python 3, anyway."""
warnings.warn('has_key is deprecated. Use has_attr("%s") instead.' % (
key))
return self.has_attr(key)
# Next, a couple classes to represent queries and their results.
class SoupStrainer(object):
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = self._normalize_search_value(name)
if not isinstance(attrs, dict):
# Treat a non-dict value for attrs as a search for the 'class'
# attribute.
kwargs['class'] = attrs
attrs = None
if 'class_' in kwargs:
# Treat class_="foo" as a search for the 'class'
# attribute, overriding any non-dict value for attrs.
kwargs['class'] = kwargs['class_']
del kwargs['class_']
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
normalized_attrs = {}
for key, value in list(attrs.items()):
normalized_attrs[key] = self._normalize_search_value(value)
self.attrs = normalized_attrs
self.text = self._normalize_search_value(text)
def _normalize_search_value(self, value):
# Leave it alone if it's a Unicode string, a callable, a
# regular expression, a boolean, or None.
if (isinstance(value, str) or isinstance(value, collections.Callable) or hasattr(value, 'match')
or isinstance(value, bool) or value is None):
return value
# If it's a bytestring, convert it to Unicode, treating it as UTF-8.
if isinstance(value, bytes):
return value.decode("utf8")
# If it's listlike, convert it into a list of strings.
if hasattr(value, '__iter__'):
new_value = []
for v in value:
if (hasattr(v, '__iter__') and not isinstance(v, bytes)
and not isinstance(v, str)):
# This is almost certainly the user's mistake. In the
# interests of avoiding infinite loops, we'll let
# it through as-is rather than doing a recursive call.
new_value.append(v)
else:
new_value.append(self._normalize_search_value(v))
return new_value
# Otherwise, convert it into a Unicode string.
# The unicode(str()) thing is so this will do the same thing on Python 2
# and Python 3.
return str(str(value))
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def search_tag(self, markup_name=None, markup_attrs={}):
found = None
markup = None
if isinstance(markup_name, Tag):
markup = markup_name
markup_attrs = markup
call_function_with_tag_data = (
isinstance(self.name, collections.Callable)
and not isinstance(markup_name, Tag))
if ((not self.name)
or call_function_with_tag_data
or (markup and self._matches(markup, self.name))
or (not markup and self._matches(markup_name, self.name))):
if call_function_with_tag_data:
match = self.name(markup_name, markup_attrs)
else:
match = True
markup_attr_map = None
for attr, match_against in list(self.attrs.items()):
if not markup_attr_map:
if hasattr(markup_attrs, 'get'):
markup_attr_map = markup_attrs
else:
markup_attr_map = {}
for k, v in markup_attrs:
markup_attr_map[k] = v
attr_value = markup_attr_map.get(attr)
if not self._matches(attr_value, match_against):
match = False
break
if match:
if markup:
found = markup
else:
found = markup_name
if found and self.text and not self._matches(found.string, self.text):
found = None
return found
searchTag = search_tag
def search(self, markup):
# print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, str)):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text or self.name or self.attrs:
found = self.search_tag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, str):
if not self.name and not self.attrs and self._matches(markup, self.text):
found = markup
else:
raise Exception(
"I don't know how to match against a %s" % markup.__class__)
return found
def _matches(self, markup, match_against):
# print u"Matching %s against %s" % (markup, match_against)
result = False
if isinstance(markup, list) or isinstance(markup, tuple):
# This should only happen when searching a multi-valued attribute
# like 'class'.
if (isinstance(match_against, str)
and ' ' in match_against):
# A bit of a special case. If they try to match "foo
# bar" on a multivalue attribute's value, only accept
# the literal value "foo bar"
#
# XXX This is going to be pretty slow because we keep
# splitting match_against. But it shouldn't come up
# too often.
return (whitespace_re.split(match_against) == markup)
else:
for item in markup:
if self._matches(item, match_against):
return True
return False
if match_against is True:
# True matches any non-None value.
return markup is not None
if isinstance(match_against, collections.Callable):
return match_against(markup)
# Custom callables take the tag as an argument, but all
# other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
# Ensure that `markup` is either a Unicode string, or None.
markup = self._normalize_search_value(markup)
if markup is None:
# None matches None, False, an empty string, an empty list, and so on.
return not match_against
if isinstance(match_against, str):
# Exact string match
return markup == match_against
if hasattr(match_against, 'match'):
# Regexp match
return match_against.search(markup)
if hasattr(match_against, '__iter__'):
# The markup must be an exact match against something
# in the iterable.
return markup in match_against
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source, result=()):
super(ResultSet, self).__init__(result)
self.source = source
| mit | 7,376,766,775,267,727,000 | 37.074203 | 114 | 0.557691 | false |
weizhenwei/iTerm2 | tests/esctest/tests/decstr.py | 31 | 5412 | from esc import BS, CR, ESC, LF, NUL
import esccmd
import escio
import esclog
from escutil import AssertEQ, AssertScreenCharsInRectEqual, GetCursorPosition, GetScreenSize, intentionalDeviationFromSpec, knownBug
from esctypes import Point, Rect
class DECSTRTests(object):
"""The following settings are reset:
DECTCEM Cursor enabled.
IRM Replace mode.
DECOM Absolute (cursor origin at upper-left of screen.)
DECAWM No autowrap.
DECNRCM Multinational set.
KAM Unlocked.
DECNKM Numeric characters.
DECCKM Normal (arrow keys).
DECSTBM Top margin = 1; bottom margin = page length.
G0, G1, G2, G3, GL, GR Default settings.
SGR Normal rendition.
DECSCA Normal (erasable by DECSEL and DECSED).
DECSC Home position.
DECAUPSS Set selected in Set-Up.
DECSASD Main display.
DECKPM Character codes.
DECRLM Reset (Left-to-right), regardless of NVR setting.
DECPCTERM Always reset."""
def test_DECSTR_DECSC(self):
# Save cursor position
esccmd.CUP(Point(5, 6))
esccmd.DECSC()
# Perform soft reset
esccmd.DECSTR()
# Ensure saved cursor position is the origin
esccmd.DECRC()
AssertEQ(GetCursorPosition(), Point(1, 1))
def test_DECSTR_IRM(self):
# Turn on insert mode
esccmd.SM(esccmd.IRM)
# Perform soft reset
esccmd.DECSTR()
# Ensure replace mode is on
esccmd.CUP(Point(1, 1))
escio.Write("a")
esccmd.CUP(Point(1, 1))
escio.Write("b")
AssertScreenCharsInRectEqual(Rect(1, 1, 1, 1), [ "b" ])
def test_DECSTR_DECOM(self):
# Define a scroll region
esccmd.DECSTBM(3, 4)
# Turn on origin mode
esccmd.DECSET(esccmd.DECOM)
# Perform soft reset
esccmd.DECSTR()
# Define scroll region again
esccmd.DECSET(esccmd.DECLRMM)
esccmd.DECSLRM(3, 4)
esccmd.DECSTBM(4, 5)
# Move to 1,1 (or 3,4 if origin mode is still on) and write an X
esccmd.CUP(Point(1, 1))
escio.Write("X")
# Turn off origin mode
esccmd.DECRESET(esccmd.DECOM)
# Make sure the X was at 1, 1, implying origin mode was off.
esccmd.DECSTBM()
esccmd.DECRESET(esccmd.DECLRMM)
AssertScreenCharsInRectEqual(Rect(1, 1, 3, 4), [ "X" + NUL * 2,
NUL * 3,
NUL * 3,
NUL * 3 ])
@intentionalDeviationFromSpec(terminal="iTerm2",
reason="For compatibility purposes, iTerm2 mimics xterm's behavior of turning on DECAWM by default.")
@intentionalDeviationFromSpec(terminal="iTerm2",
reason="For compatibility purposes, xterm turns on DECAWM by default.")
def test_DECSTR_DECAWM(self):
# Turn on autowrap
esccmd.DECSET(esccmd.DECAWM)
# Perform soft reset
esccmd.DECSTR()
# Make sure autowrap is still on
esccmd.CUP(Point(GetScreenSize().width() - 1, 1))
escio.Write("xxx")
position = GetCursorPosition()
AssertEQ(position.x(), 2)
def test_DECSTR_ReverseWraparound(self):
# Turn on reverse wraparound
esccmd.DECSET(esccmd.ReverseWraparound)
# Perform soft reset
esccmd.DECSTR()
# Verify reverse wrap is off
esccmd.CUP(Point(1, 2))
escio.Write(BS)
AssertEQ(GetCursorPosition().x(), 1)
def test_DECSTR_STBM(self):
# Set top and bottom margins
esccmd.DECSTBM(3, 4)
# Perform soft reset
esccmd.DECSTR()
# Ensure no margins
esccmd.CUP(Point(1, 4))
escio.Write(CR + LF)
AssertEQ(GetCursorPosition().y(), 5)
@knownBug(terminal="iTerm2", reason="DECSCA not implemented")
def test_DECSTR_DECSCA(self):
# Turn on character protection
esccmd.DECSCA(1)
# Perform soft reset
esccmd.DECSTR()
# Ensure character protection is off
esccmd.CUP(Point(1, 1))
escio.Write("X")
esccmd.DECSED(2)
AssertScreenCharsInRectEqual(Rect(1, 1, 1, 1), [ NUL ])
def test_DECSTR_DECSASD(self):
# Direct output to status line
esccmd.DECSASD(1)
# Perform soft reset
esccmd.DECSTR()
# Ensure output goes to screen
escio.Write("X")
AssertScreenCharsInRectEqual(Rect(1, 1, 1, 1), [ "X" ])
def test_DECSTR_DECRLM(self):
# Set right-to-left mode
esccmd.DECSET(esccmd.DECRLM)
# Perform soft reset
esccmd.DECSTR()
# Ensure text goes left to right
esccmd.CUP(Point(2, 1))
escio.Write("a")
escio.Write("b")
AssertScreenCharsInRectEqual(Rect(2, 1, 2, 1), [ "a" ])
AssertScreenCharsInRectEqual(Rect(3, 1, 3, 1), [ "b" ])
def test_DECSTR_DECLRMM(self):
# This isn't in the vt 510 docs but xterm does it and it makes sense to do.
esccmd.DECSET(esccmd.DECLRMM)
esccmd.DECSLRM(5, 6)
# Perform soft reset
esccmd.DECSTR()
# Ensure margins are gone.
esccmd.CUP(Point(5, 5))
escio.Write("ab")
AssertEQ(GetCursorPosition().x(), 7)
def test_DECSTR_CursorStaysPut(self):
esccmd.CUP(Point(5, 6))
esccmd.DECSTR()
position = GetCursorPosition()
AssertEQ(position.x(), 5)
AssertEQ(position.y(), 6)
| gpl-2.0 | -8,559,708,736,953,043,000 | 28.413043 | 133 | 0.604767 | false |
rmaestre/SVM-for-domains-detection | svm-training.py | 1 | 4129 | # -*- coding: utf-8 -*-
import pickle
import re
import string
from sklearn import svm
from sklearn import cross_validation
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.externals import joblib
from numpy import array
def insert_processed_review(corpus_result, labels_result, review, label, vectors_size, corpus_number_perdomain):
"""
Review cleaning (punctuation).
Being a review: "word1 word2 word3 word4 word5 word6" and vectors_size = 3
2 vectors are extracted: ["word1 word2 word3","word4 word5 word6"]
which are inserted in 'corpus_result' (and their corresponding label (the review one) in 'labels_result')
"""
# exclude = set(string.punctuation).union(['¡', '¿', u'£', '€', '$']) # Spanish
# cleaned_review = ''.join(ch if ch not in exclude else ' ' for ch in review) # list comprehension
# words = cleaned_review.split()
review = re.sub('\x93', ' ', review)
review = re.sub('\x94', ' ', review)
words = re.findall(r'\b[a-zA-ZáéíóúüÁÉÍÓÚÜ]+\b', review)
if len(words) > 0:
corpus_result.append(' '.join(words))
labels_result.append(label)
# Create vectors of length = vectors_size
# for i in range(0, len(words), vectors_size):
# if len(corpus_result) < corpus_number_perdomain:
# corpus_result.append(' '.join(words[i:i+vectors_size]))
# labels_result.append(label)
# Parameters to control the length of the vectors and the Matrix rows
corpus_number_perdomain = 40000
vectors_size = 20
langs = {0: "sp", 1: "en"}
lang = langs[1]
# DS to save corpus and labels
corpus = []
labels = []
domains = {"data/%s/electronics/electronics.txt"%lang: 0, "data/%s/hotels/hotels.txt"%lang: 1}
for file_name in domains:
# Loading i-domain review-corpus
with open(file_name, "r") as file_in:
for line in file_in.readlines(): # 1 review per line!
# Update corpus with new vectors
insert_processed_review(corpus, labels, line, domains[file_name], vectors_size, corpus_number_perdomain)
if len(corpus) == corpus_number_perdomain:
break
corpus_number_perdomain *= 2 # corpus/labels shared for all domains
# We need the same length for both DS
assert(len(corpus) == len(labels))
print("Corpus size: %d " % len(corpus))
# Vectorization: transforming text corpora to TF matrices
# http://scikit-learn.org/stable/modules/feature_extraction.html#common-vectorizer-usage
vectorizer = CountVectorizer(min_df=1)
X = vectorizer.fit_transform(corpus) # vocabulary generated for this input corpus
#X.toarray()
# Transform list of labels to an array
y = array(labels)
print("Corpus matrix shape: %s " % str(X.shape))
print("Labels vector shape: %s " % str(y.shape))
# Training and validation data (k-fold = 30%)
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.3, random_state=1)
# With a RBF kernel, gamma=0.01, corpus_number_perdomain = 5000 and
# vectors_size = 20 to reach the maximum score, however we
# use a linear kernel because we have a 0.1 less precison but we less
# support vectors. This is important in classification time
clf = svm.SVC(kernel='linear', probability=False)
clf.fit(X_train, y_train) ## classifier generated
# Save model to disk and also a vectorizer index
joblib.dump(clf, 'models/%s/svm_model.pkl'%lang)
with open('models/%s/vectorizer.pkl'%lang, 'wb') as o_file:
pickle.dump(vectorizer, o_file)
# Dump info about the model
print("\nSupported vectors length: %s" % str(clf.support_vectors_.shape))
print("Dual coef. length: %s" % str(clf.dual_coef_.shape))
score = clf.score(X_test, y_test)
print("\nScore k-fold validation: %.4f%%" % round(score, 4))
# I love the equalizer in my new scotch walkman
# The cell batery works bad
# The hotel is in a great location close to all that downtown Portsmouth has to offer
# We had a shot of scotch whiskey at the hotel bar
sample = vectorizer.transform(['We had a shot of scotch whiskey at the hotel bar']).toarray()
print(sample.shape)
print(clf.predict(sample))
| apache-2.0 | 2,173,177,710,875,253,800 | 38.538462 | 116 | 0.690418 | false |
tensorflow/ngraph-bridge | examples/mnist/mnist_softmax_distributed.py | 1 | 5698 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A very simple MNIST classifier.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/beginners
Reference to the original source code:
https://github.com/tensorflow/tensorflow/blob/r1.2/tensorflow/examples/tutorials/mnist/mnist_softmax.py
Add distributed fetaure with horovod
1. hvd.init()
2. Add distributed wrapper from hvd.DistributedOptimizer
3. Broadcast the variables from root rank to the rest processors: hvd.BroadcastGlobalVariablesHook(0)
4. Print the output for root rank only
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import time
from keras.datasets import mnist
from keras.utils.np_utils import to_categorical
import tensorflow as tf
import ngraph_bridge
import tensorflow.compat.v1 as tf
tf.compat.v1.disable_eager_execution()
import numpy as np
import horovod.tensorflow as hvd
FLAGS = None
hvd.init()
def main(_):
run_mnist(_)
def run_mnist(_):
# Create the model
with tf.name_scope("mnist_placholder"):
x = tf.compat.v1.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.matmul(x, W) + b
# Define loss and optimizer
y_ = tf.compat.v1.placeholder(tf.float32, [None, 10])
# The raw formulation of cross-entropy,
#
# tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
# reduction_indices=[1]))
#
# can be numerically unstable.
#
# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
# outputs of 'y', and then average across the batch.
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
#global_step = tf.train.get_or_create_global_step()
global_step = tf.contrib.framework.get_or_create_global_step()
opt = tf.train.GradientDescentOptimizer(0.5)
# Add MPI Distributed Optimizer
with tf.name_scope("horovod_opt"):
opt = hvd.DistributedOptimizer(opt)
train_step = opt.minimize(cross_entropy, global_step=global_step)
# The StopAtStepHook handles stopping after running given steps.
hooks = [
hvd.BroadcastGlobalVariablesHook(0),
tf.train.StopAtStepHook(last_step=10)
]
# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Enable soft placement and tracing as needed
config = tf.compat.v1.ConfigProto(
allow_soft_placement=True,
log_device_placement=True,
inter_op_parallelism_threads=1)
config_ngraph_enabled = ngraph_bridge.update_config(config)
#config.graph_options.optimizer_options.global_jit_level = jit_level
run_metadata = tf.compat.v1.RunMetadata()
#init_op = tf.global_variables_initializer()
print("Variables initialized ...")
# The MonitoredTrainingSession takes care of session initialization
with tf.train.MonitoredTrainingSession(
hooks=hooks, config=config_ngraph_enabled) as mon_sess:
start = time.time()
train_writer = tf.compat.v1.summary.FileWriter(FLAGS.log_dir,
mon_sess.graph)
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = np.reshape(x_train, (60000, 784))
x_train = x_train.astype(np.float32) / 255
y_train = to_categorical(y_train, num_classes=10)
while not mon_sess.should_stop():
# Train
index = np.random.choice(60000, 100)
batch_xs = x_train[index]
batch_ys = y_train[index]
mon_sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# Test trained model
x_test = np.reshape(x_test, (10000, 784))
x_test = x_test.astype(np.float32) / 255
y_test = to_categorical(y_test, num_classes=10)
if not mon_sess.should_stop():
print("Accuracy: ",
mon_sess.run(accuracy, feed_dict={
x: x_test,
y_: y_test
}))
end = time.time()
if hvd.rank() == 0:
print("Training time: %f seconds" % (end - start))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_dir',
type=str,
default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
parser.add_argument(
'--log_dir',
type=str,
default='/tmp/tensorflow/mnist/logs/mnist_with_summaries',
help='Summaries log directory')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
# run command for this distributed script
# mpirun -np 2 python mnist_softmax_distributed.py --data_dir=/mnt/data/mnist
| apache-2.0 | -2,507,513,872,154,648,600 | 35.525641 | 103 | 0.641102 | false |
jaor/python | bigml/tests/test_26_statistical_test.py | 2 | 3058 | # -*- coding: utf-8 -*-
#
# Copyright 2015-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Creating test
"""
from .world import world, setup_module, teardown_module
from . import create_source_steps as source_create
from . import create_dataset_steps as dataset_create
from . import create_statistical_tst_steps as statistical_tst_create
class TestStatisticalTest(object):
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def teardown(self):
"""
Debug information
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully creating an statistical test from a dataset:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create an statistical test from a dataset
And I wait until the statistical test is ready less than <time_3> secs
And I update the statistical test name to "<test_name>"
When I wait until the statistical test is ready less than <time_4> secs
Then the statistical test name is "<correlation_name>"
Examples:
| data | time_1 | time_2 | time_3 | time_4 | test_name |
| ../data/iris.csv | 10 | 10 | 20 | 20 | my new statistical test name |
"""
print(self.test_scenario1.__doc__)
examples = [
['data/iris.csv', '10', '10', '20', '20', 'my new statistical test name']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
statistical_tst_create.i_create_a_tst_from_dataset(self)
statistical_tst_create.the_tst_is_finished_in_less_than(self, example[3])
statistical_tst_create.i_update_tst_name(self, example[5])
statistical_tst_create.the_tst_is_finished_in_less_than(self, example[4])
statistical_tst_create.i_check_tst_name(self, example[5])
| apache-2.0 | 636,229,009,120,483,300 | 42.685714 | 100 | 0.61053 | false |
mattaustin/django-thummer | setup.py | 1 | 3784 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2018 Matt Austin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals
import codecs
from os import path
from distutils.core import Command
from setuptools import find_packages, setup
from thummer import __license__, __title__, __url__, __version__
BASE_DIR = path.dirname(path.abspath(__file__))
# Get the long description from the README file
with codecs.open(path.join(BASE_DIR, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
class DjangoCommand(Command):
django_command_args = []
django_settings = {
'DATABASES': {'default': {'ENGINE': 'django.db.backends.sqlite3'}},
'INSTALLED_APPS': ['thummer'],
'MEDIA_ROOT': '/tmp/django-thummer/media/',
'MIDDLEWARE_CLASSES': [],
'ROOT_URLCONF': 'thummer.urls',
}
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import django
from django.conf import settings
from django.core.management import call_command
settings.configure(**self.django_settings)
django.setup()
return call_command(*self.django_command_args, verbosity=3)
class CheckCommand(DjangoCommand):
django_command_args = ['check']
class MakeMigrationsCommand(DjangoCommand):
django_command_args = ['makemigrations', 'thummer']
class TestCommand(DjangoCommand):
django_command_args = ['test', 'thummer.tests']
setup(
name=__title__,
version=__version__,
description='A website screenshot and thumbnailing app for Django.',
long_description=long_description,
url=__url__,
author='Matt Austin',
author_email='[email protected]',
license=__license__,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Multimedia :: Graphics :: Capture',
],
keywords='thummer django website snapshot screenshot thumbnail',
packages=find_packages(),
cmdclass={
'djangocheck': CheckCommand,
'makemigrations': MakeMigrationsCommand,
'test': TestCommand
},
install_requires=[
'django>=1.8,!=1.9.*,!=1.10.*,<=2.1',
'pillow~=5.0',
'python-dateutil~=2.6',
'selenium~=3.9',
'sorl-thumbnail~=12.4',
],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4',
extras_require={
'tests': [
'coverage~=4.5',
'freezegun~=0.3',
'mock~=2.0',
'pytz',
],
},
)
| apache-2.0 | 4,495,303,831,342,000,000 | 24.567568 | 75 | 0.615751 | false |
otfbot/otfbot | otfbot/services/config.py | 1 | 15720 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# (c) 2005-2008 by Alexander Schier
# (c) 2008 by Robert Weidlich
#
""" configuration service """
import sys
import os
import logging
import glob
from twisted.application import internet, service
from twisted.internet.defer import Deferred
import yaml
from copy import deepcopy
class configService(service.Service):
name = "config"
def __init__(self, filename=None, is_subconfig=False):
"""Initialize the config class and load a config"""
self.logger = logging.getLogger("config")
self.generic_options = {}
self.network_options = {}
self.filename = filename
self.name = "config"
#still the default value?
self.generic_options_default = {}
if not filename:
return
try:
configs = yaml.load_all(open(filename, "r"))
self.generic_options = configs.next()
if not is_subconfig:
self.network_options = configs.next()
if not self.network_options:
self.network_options = {}
for option in self.generic_options.keys():
self.generic_options_default[option] = False
except IOError:
pass #does not exist
except StopIteration:
self.logger.warn("invalid config: "
"config does not contain two sections.")
def _create_preceding(self, network, channel=None):
"""
create preceding dictionary entries for network/channel options
>>> c=configService()
>>> c.network_options
{}
>>> c._create_preceding("samplenetwork", "#samplechannel")
>>> c.network_options
{'samplenetwork': {'#samplechannel': {}}}
>>> c._create_preceding("othernetwork")
>>> c.network_options
{'othernetwork': {}, 'samplenetwork': {'#samplechannel': {}}}
"""
if network:
if network not in self.network_options:
# empty network option/channel-list
self.network_options[network] = {}
if channel:
if channel not in self.network_options[network]:
# emtpy option-list for the given channel
self.network_options[network][channel] = {}
def get(self, option, default, module=None, network=None,
channel=None, set_default=True):
"""
get an option and set the default value, if the option is unset.
>>> c=configService()
>>> c.get("option", "default")
'default'
>>> c.get("option", "unset?")
'default'
@param set_default: if True, the default will be set in the
config, if its used. If False, the default
will be returned, but the config will not
be changed.
"""
if module:
option = module + "." + option
#do NOT create channel config for queries!
#see rfc2811 section 2.1, these four are the only valid channel signs
if channel and not channel[0] in '#+!&':
channel=None
#This part tries to get the config value for an option only
if network in self.network_options:
if channel in self.network_options[network]:
if option in self.network_options[network][channel]:
#1) choice: channel specific
return deepcopy(self.network_options[network][channel][option])
if option in self.network_options[network]:
#2) choice: network specific
return deepcopy(self.network_options[network][option])
if option in self.generic_options:
#3) choice: general key
return deepcopy(self.generic_options[option])
# if we did not return above, we need to check if the
# default should be written to config and return the default
if network:
if channel:
self._create_preceding(network, channel)
# set the default
self.network_options[network][channel][option] = default
else:
self._create_preceding(network)
# set the default
self.network_options[network][option] = default
else:
# config.writeDefaultValues is a global setting,
# which decides if the get default-values are written to config,
# if they are in no defaultconfig-snippets present
# set_default is a local setting, which decides the same,
# so modules can decide, if they want to write the default
# value to the config. If the global setting is false, its
# never written to config.
# write this config.writeDefaultValues option as default value,
# even if the default is not to write default values.
if option == "config.writeDefaultValues" or (
self.has("config.writeDefaultValues") and
self.getBool("config.writeDefaultValues", False) and
set_default):
# this will write the default value to the config
self.set(option, default, still_default=False)
else:
# this will avoid a config with a lot of default options.
self.set(option, default, still_default=True)
return default
def has(self, option, module=None):
"""
Test, in which networks/channels a option is set.
Returns a tuple: (general_bool, network_list, (network, channel) list)
>>> c=configService()
>>> c.has("testkey")
(False, [], [])
>>> c.set("testkey", "testvalue")
>>> c.has("testkey")
(True, [], [])
>>> c.set("testkey", "othervalue", network="samplenetwork")
>>> c.has("testkey")
(True, ['samplenetwork'], [])
"""
general = False
networks = []
channels = []
if module:
option = module + "." + option
for item in self.generic_options.keys():
if item == option:
general = True
for network in self.network_options.keys():
if option in self.network_options[network].keys():
networks.append(network)
for network in self.network_options.keys():
for channel in self.network_options[network].keys():
if type(self.network_options[network][channel]) == dict:
if option in self.network_options[network][channel].keys():
channels.append((network, channel))
return (general, networks, channels)
def set(self, option, value, module=None, network=None,
channel=None, still_default=False):
if module:
option = module + "." + option
#do NOT create channel config for queries!
#see rfc2811 section 2.1, these four are the only valid channel signs
if channel and not channel[0] in '#+!&':
channel=None
if network:
if channel:
self._create_preceding(network, channel)
self.network_options[network][channel][option] = value
else:
self._create_preceding(network)
self.network_options[network][option] = value
else:
self.generic_options[option] = value
self.generic_options_default[option] = still_default
self.writeConfig()
def delete(self, option, module=None, network=None, channel=None):
"""
>>> c=configService()
>>> c.set("key", "value")
>>> c.get("key", "unset")
'value'
>>> c.delete("key")
>>> c.get("key", "unset")
'unset'
"""
if module:
option = module + "." + option
if network:
if channel:
try:
del self.network_options[network][channel][option]
except IndexError:
pass #does not exist anyway
else:
try:
#this can be used to delete a channel definition
del self.network_options[network][option]
except IndexError:
pass #does not exist anyway
else:
try:
del self.generic_options[option]
except IndexError:
pass #does not exist anyway
def getNetworks(self):
ret = []
for network in self.network_options.keys():
ret.append(network)
return ret
def getChannels(self, network):
if network in self.network_options.keys():
try:
options = self.network_options[network].keys()
ret = []
for option in options:
if type(self.network_options[network][option]) == dict:
ret.append(option)
return ret
except AttributeError:
return []
##########################################################################
#some highlevel functions
##########################################################################
def setConfig(self, opt, value, module=None, network=None, channel=None):
self.logger.debug("deprecated call to setConfig for opt %s" % opt)
self.set(opt, value, module, network, channel)
def delConfig(self, opt, module=None, network=None, channel=None):
self.logger.debug("deprecated call to delConfig for opt %s" % opt)
delete(opt, module, network, channel)
def hasConfig(self, option, module=None):
self.logger.debug("deprecated call to hasConfig for opt %s" % option)
return self.has(option, module)
def getConfig(self, option, defaultvalue="", module=None,
network=None, channel=None, set_default=True):
self.logger.debug("deprecated call to getConfig for opt %s" % option)
return self.get(option, defaultvalue, module,
network, channel, set_default)
def getPath(self, option, datadir, defaultvalue="",
module=None, network=None, channel=None):
value = self.get(option, defaultvalue, module, network, channel)
if value[0] == "/":
return value
else:
return datadir + "/" + value
def getBool(self, option, defaultvalue="", module=None,
network=None, channel=None):
"""
>>> c=configService()
>>> c.set("key", "1")
>>> c.set("key2", "on")
>>> c.set("key3", "True")
>>> c.getBool("key") and c.getBool("key2") and c.getBool("key3")
True
>>> c.set("key", "False")
>>> c.set("key2", "any string which is not in [True, true, on, On, 1]")
>>> c.getBool("key") or c.getBool("key2")
False
"""
return self.get(option, defaultvalue, module,
network, channel) in ["True", "true", "On", "on", "1", True, 1]
def writeConfig(self):
if not self.filename:
return False
file = open(self.filename, "w")
#still_default options
generic_options=deepcopy(self.generic_options)
if not self.getBool("writeDefaultValues", False, "config"):
for option in self.generic_options_default.keys():
if option in generic_options \
and self.generic_options_default[option]:
del(generic_options[option])
file.write(yaml.dump_all([generic_options, self.network_options],
default_flow_style=False))
file.close()
return True
def startService(self):
service.Service.startService(self)
def stopService(self):
self.writeConfig()
service.Service.stopService(self)
def loadConfig(myconfigfile, modulesconfigdirglob):
if os.path.exists(myconfigfile):
myconfig = configService(myconfigfile)
#something like plugins/*/*.yaml
for file in glob.glob(modulesconfigdirglob):
tmp = configService(file, is_subconfig=True)
for option in tmp.generic_options.keys():
if not myconfig.has(option)[0]:
myconfig.set(option, tmp.get(option, ""), still_default=True)
del(tmp)
return myconfig
else:
return None
if __name__ == '__main__':
import doctest, unittest
doctest.testmod()
class configTest(unittest.TestCase):
def setUp(self):
os.mkdir("test_configsnippets")
os.mkdir("test_configsnippets2") #empty
file = open("test_configsnippets/foomod.yaml", "w")
file.write("""fooMod.setting1: 'blub'
fooMod.setting2: true
fooMod.setting3: false""")
file.close()
c = configService("testconfig.yaml")
#c.setConfig("writeDefaultValues", True, "config")
c.writeConfig()
self.config = loadConfig("testconfig.yaml", "test_configsnippets/*.yaml")
def tearDown(self):
os.remove("test_configsnippets/foomod.yaml")
os.rmdir("test_configsnippets")
os.rmdir("test_configsnippets2")
os.remove("testconfig.yaml")
def testDefaults(self):
blub = self.config.get("setting1", "unset", "fooMod")
self.assertTrue(blub == "blub", "fooMod.setting1 is '%s' instead of 'blub'" % blub)
blub2 = self.config.get("setting4", "new_setting", "fooMod")
self.assertTrue(blub2 == "new_setting", "blub2 is '%s' instead of 'new_setting'" % blub2)
self.config.writeConfig()
config2 = loadConfig("testconfig.yaml", "test_configsnippets2/*.yaml")
self.assertTrue(config2.hasConfig("setting1", "fooMod")[0] == False)
self.assertTrue(config2.hasConfig("setting4", "fooMod")[0] == False)
def testWriteDefaults(self):
self.config.set("writeDefaultValues", True, "config")
blub = self.config.get("setting1", "unset", "fooMod")
self.assertTrue(blub == "blub", "fooMod.setting1 is '%s' instead of 'blub'" % blub)
blub2 = self.config.get("setting4", "new_setting", "fooMod")
self.assertTrue(blub2 == "new_setting", "blub2 is '%s' instead of 'new_setting'" % blub2)
self.config.writeConfig()
config2 = loadConfig("testconfig.yaml", "test_configsnippets2/*.yaml")
self.assertTrue(config2.hasConfig("setting1", "fooMod")[0] == True)
self.assertTrue(config2.hasConfig("setting4", "fooMod")[0] == True)
unittest.main()
| gpl-2.0 | 3,308,782,397,604,642,000 | 38.898477 | 101 | 0.560941 | false |
sjones4/eutester | testcases/cloud_user/cloudwatch/monitor_command.py | 5 | 3619 | #!/usr/bin/python
import subprocess
from eucaops import Eucaops
from eucaops import EC2ops
from eutester.eutestcase import EutesterTestCase
from eutester.sshconnection import CommandExitCodeException
class CloudWatchCustom(EutesterTestCase):
def __init__(self, extra_args= None):
self.setuptestcase()
self.setup_parser()
self.parser.add_argument('-c', '--command', help='Command to monitor')
self.parser.add_argument('-n', '--namespace', help='Namespace to put data under')
self.parser.add_argument('-m', '--metric-name', help='Metric name to put data under')
self.parser.add_argument('-u', '--unit', default=None, help="Unit that the value be returned with")
self.parser.add_argument('-i', '--interval', default=10, type=int, help='Time between exectutions of the monitoring task')
if extra_args:
for arg in extra_args:
self.parser.add_argument(arg)
self.get_args()
# Setup basic eutester object
if self.args.region:
self.tester = EC2ops( credpath=self.args.credpath, region=self.args.region)
else:
self.tester = Eucaops( credpath=self.args.credpath, config_file=self.args.config,password=self.args.password)
def clean_method(self):
pass
def MonitorLocal(self):
while True:
try:
output = self.tester.local(self.args.command)
self.tester.debug(output)
value = int(output)
self.tester.put_metric_data(self.args.namespace, self.args.metric_name, value=value, unit=self.args.unit)
except subprocess.CalledProcessError:
self.tester.critical("Command exited Non-zero not putting data")
except ValueError:
self.tester.critical("Command returned non-integer")
self.tester.sleep(self.args.interval)
def MonitorRemotes(self):
while True:
for machine in self.tester.get_component_machines():
try:
output = "".join(machine.sys(self.args.command, code=0))
self.tester.debug(output)
value = int(output)
### Push to Hostname dimension
self.tester.put_metric_data(self.args.namespace, self.args.metric_name, unit=self.args.unit,
dimensions={"Hostname": machine.hostname}, value=value)
### Push to aggregate metric as well
self.tester.put_metric_data(self.args.namespace, self.args.metric_name, unit=self.args.unit, value=value)
except CommandExitCodeException:
self.tester.critical("Command exited Non-zero not putting data")
except ValueError:
self.tester.critical("Command returned non-integer")
except Exception, e:
self.tester.critical("Unknown failure: " + str(e))
self.tester.sleep(self.args.interval)
if __name__ == "__main__":
testcase = CloudWatchCustom()
### Use the list of tests passed from config/command line to determine what subset of tests to run
### or use a predefined list
list = testcase.args.tests or [ "MonitorLocal"]
### Convert test suite methods to EutesterUnitTest objects
unit_list = [ ]
for test in list:
unit_list.append( testcase.create_testunit_by_name(test) )
### Run the EutesterUnitTest objects
result = testcase.run_test_case_list(unit_list,clean_on_exit=True)
exit(result) | bsd-2-clause | -4,465,803,839,468,287,500 | 45.410256 | 130 | 0.617021 | false |
davidbstein/moderator | src/model/org.py | 1 | 1449 | import os
import json
from model.helpers import (
r2d,
DB,
PermissionError,
)
from model.user import User
_DOMAIN_MAPS = json.loads(os.environ.get("DOMAIN_MAPS", "{}"))
class Org:
def __init__(self):
raise Exception("This class is a db wrapper and should not be instantiated.")
@classmethod
def get(cls, domain, user_email, **__):
user = User.get(user_email)
user_domain = _DOMAIN_MAPS.get(user['domain'], user['domain'])
if user_domain != domain:
raise PermissionError("%s does not have a %s email" % (user_email, domain))
org = DB.ex(
DB.orgs.select(DB.orgs.columns.domain == domain)
).fetchone()
assert org, "There is no org for the @" + user['domain'] + " domain yet! Are you signed in with your work account?"
return r2d(org)
@classmethod
def create(cls, domain, user_email, **__):
command = DB.orgs.insert({
"domain": domain,
"moderators": [user_email],
"title": domain,
})
DB.ex(command)
return Org.get(domain, user_email)
@classmethod
def update(cls, domain, user_email, moderators=None, title=None, **__):
values = {}
Org.get(domain, user_email)
if moderators:
values["moderators"] = moderators
if title:
values["title"] = title
command = DB.orgs.update(
).where(
DB.orgs.columns.domain == domain
).values(
**values)
DB.ex(command)
return Org.get(domain, user_email)
| mit | 3,625,527,651,113,419,300 | 25.833333 | 119 | 0.622498 | false |
ptkool/spark | python/pyspark/sql/session.py | 2 | 29686 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# To disallow implicit relative import. Remove this once we drop Python 2.
from __future__ import absolute_import
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
xrange = range
else:
from itertools import imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.pandas.conversion import SparkConversionMixin
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, \
_make_type_verifier, _infer_schema, _has_nulltype, _merge_type, _create_converter, \
_parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(SparkConversionMixin):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
.. autoattribute:: builder
:annotation:
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
_sc = None
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive SerDes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
def _sparkContext(self, sc):
with self._lock:
self._sc = sc
return self
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
if self._sc is not None:
sc = self._sc
else:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
# This SparkContext may be an existing one.
sc = SparkContext.getOrCreate(sparkConf)
# Do not update `SparkConf` for existing `SparkContext`, as it's shared
# by all sessions.
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
return session
builder = Builder()
"""A class attribute having a :class:`Builder` to construct :class:`SparkSession` instances."""
_instantiatedSession = None
_activeSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
if self._jvm.SparkSession.getDefaultSession().isDefined() \
and not self._jvm.SparkSession.getDefaultSession().get() \
.sparkContext().isStopped():
jsparkSession = self._jvm.SparkSession.getDefaultSession().get()
else:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
SparkSession._activeSession = self
self._jvm.SparkSession.setDefaultSession(self._jsparkSession)
self._jvm.SparkSession.setActiveSession(self._jsparkSession)
def _repr_html_(self):
return """
<div>
<p><b>SparkSession - {catalogImplementation}</b></p>
{sc_HTML}
</div>
""".format(
catalogImplementation=self.conf.get("spark.sql.catalogImplementation"),
sc_HTML=self.sparkContext._repr_html_()
)
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@classmethod
@since(3.0)
def getActiveSession(cls):
"""
Returns the active SparkSession for the current thread, returned by the builder.
>>> s = SparkSession.getActiveSession()
>>> l = [('Alice', 1)]
>>> rdd = s.sparkContext.parallelize(l)
>>> df = s.createDataFrame(rdd, ['name', 'age'])
>>> df.select("age").collect()
[Row(age=1)]
"""
from pyspark import SparkContext
sc = SparkContext._active_spark_context
if sc is None:
return None
else:
if sc._jvm.SparkSession.getActiveSession().isDefined():
SparkSession(sc, sc._jvm.SparkSession.getActiveSession().get())
return SparkSession._activeSession
else:
return None
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions, etc.
:return: :class:`Catalog`
"""
from pyspark.sql.catalog import Catalog
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.udf import UDFRegistration
return UDFRegistration(self)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data, names=None):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:param names: list of column names
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, (_infer_schema(row, names) for row in data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None, names=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first, names=names)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row, names=names))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio, names=schema)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data, names=schema)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
@staticmethod
def _create_shell_session():
"""
Initialize a SparkSession for a pyspark shell session. This is called from shell.py
to make error handling simpler without needing to declare local variables in that
script, which would expose those to users.
"""
import py4j
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
try:
# Try to access HiveConf, it will raise exception if Hive is not added
conf = SparkConf()
if conf.get('spark.sql.catalogImplementation', 'hive').lower() == 'hive':
SparkContext._jvm.org.apache.hadoop.hive.conf.HiveConf()
return SparkSession.builder\
.enableHiveSupport()\
.getOrCreate()
else:
return SparkSession.builder.getOrCreate()
except (py4j.protocol.Py4JError, TypeError):
if conf.get('spark.sql.catalogImplementation', '').lower() == 'hive':
warnings.warn("Fall back to non-hive support because failing to access HiveConf, "
"please make sure you build spark with hive")
return SparkSession.builder.getOrCreate()
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of either :class:`Row`,
:class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value".
Each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation (e.g. row, tuple, int, boolean,
etc.), :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
.. note:: Usage with spark.sql.execution.arrow.pyspark.enabled=True is experimental.
.. note:: When Arrow optimization is enabled, strings inside Pandas DataFrame in Python
2 are converted into bytes as they are bytes in Python 2 whereas regular strings are
left as strings. When using strings in Python 2, use unicode `u""` as Python standard
practice.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
SparkSession._activeSession = self
self._jvm.SparkSession.setActiveSession(self._jsparkSession)
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
elif isinstance(schema, (list, tuple)):
# Must re-encode any unicode strings to be consistent with StructField names
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
# Create a DataFrame from pandas DataFrame.
return super(SparkSession, self).createDataFrame(
data, schema, samplingRatio, verifySchema)
return self._create_dataframe(data, schema, samplingRatio, verifySchema)
def _create_dataframe(self, data, schema, samplingRatio, verifySchema):
if isinstance(schema, StructType):
verify_func = _make_type_verifier(schema) if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
verify_func = _make_type_verifier(
dataType, name="field value") if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj,
else:
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` instances active on `this` context.
.. note:: Evolving.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
# We should clean the default session up. See SPARK-23228.
self._jvm.SparkSession.clearDefaultSession()
self._jvm.SparkSession.clearActiveSession()
SparkSession._instantiatedSession = None
SparkSession._activeSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 | 7,578,671,752,249,879,000 | 38.476064 | 100 | 0.594422 | false |
SchoolIdolTomodachi/SchoolIdolAPI | api/migrations/0010_auto_20150123_1809.py | 4 | 1174 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0009_account_ownedcard'),
]
operations = [
migrations.AlterField(
model_name='account',
name='center',
field=models.ForeignKey(blank=True, to='api.Card', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='account',
name='friend_id',
field=models.PositiveIntegerField(null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='account',
name='language',
field=models.CharField(default=b'JP', max_length=10, choices=[(b'JP', b'Japanese'), (b'EN', b'English'), (b'KR', b'Korean'), (b'CH', b'Chinese')]),
preserve_default=True,
),
migrations.AlterField(
model_name='account',
name='rank',
field=models.PositiveIntegerField(null=True, blank=True),
preserve_default=True,
),
]
| apache-2.0 | -3,213,157,145,857,444,400 | 29.894737 | 159 | 0.553663 | false |
amnona/heatsequer | oldver/amnonutils.py | 1 | 6500 | #!/usr/bin/env python
"""
amnonscript
amnonutils.py
various utility functions
"""
import sys
import numpy as np
from sys import getsizeof, stderr
from itertools import chain
from collections import deque
try:
from reprlib import repr
except ImportError:
pass
__version__ = "0.2"
def Debug(dlevel,*args):
if dlevel>=DebugLevel:
print (args)
def reverse(seq):
oseq=''
for a in seq:
oseq=a+oseq
return oseq
def complement(seq):
seq=seq.upper()
oseq=''
for a in seq:
if a=='A':
oseq+='T'
elif a=='C':
oseq+='G'
elif a=='G':
oseq+='C'
elif a=='T':
oseq+='A'
else:
oseq+='N'
return oseq
def revcomp(seq):
return reverse(complement(seq))
def readfastaseqs(filename):
"""
read a fasta file and return a list of sequences
input:
filename - the fasta file name
output:
seqs - a list of sequences
headers - a list of the headers
"""
fl=open(filename,"rU")
cseq=''
seqs=[]
headers=[]
for cline in fl:
if cline[0]=='>':
headers.append(cline[1:].rstrip())
if cseq:
seqs.append(cseq)
cseq=''
else:
cseq+=cline.strip()
if cseq:
seqs.append(cseq)
return seqs,headers
def isort(clist,reverse=False):
"""
matlab style sort
returns both sorted list and the indices of the sort
input:
clist: a list to sort
reverse - true to reverse the sort direction
output:
(svals,sidx)
svals - the sorted values
sidx - the sorted indices
"""
res=sorted(enumerate(clist), key=lambda x:x[1],reverse=reverse)
svals=[i[1] for i in res]
sidx=[i[0] for i in res]
return svals,sidx
def tofloat(clist):
"""
convert a list of strings to a list of floats
input:
clist - list of strings
output:
res - list of floats
"""
res=[]
for s in clist:
try:
res.append(float(s))
except:
res.append(0)
return res
def reorder(clist,idx):
""""
reorder a list according to idx
"""
return [clist[i] for i in idx]
def delete(clist,idx):
"""
delete elements from list
"""
for i in sorted(idx, reverse=True):
del clist[i]
return clist
def clipstrings(clist,maxlen,reverse=False):
"""
clip all strings in a list to maxlen
input:
clist - list of strings
maxlen - maximal length for each string
reverse - if true - clip from end (otherwise from beginning)
"""
retlist=[]
for cstr in clist:
clen=min(maxlen,len(cstr))
if reverse:
retlist.append(cstr[-clen:])
else:
retlist.append(cstr[0:clen])
return retlist
def mlhash(cstr,emod=0):
"""
do a hash function on the string cstr
based on the matlab hash function string2hash
input:
cstr - the string to hash
emod - if 0, don't do modulu, otherwise do modulo
"""
chash = 5381
pnum=pow(2,32)-1
for cc in cstr:
chash=np.mod(chash*33+ord(cc),pnum)
if emod>0:
chash=np.mod(chash,emod)
return(chash)
def nicenum(num):
"""
get a nice string representation of the numnber
(turn to K/M if big, m/u if small, trim numbers after decimal point)
input:
num - the number
output:
numstr - the nice string of the number
"""
if num==0:
numstr="0"
elif abs(num)>1000000:
numstr="%.1fM" % (float(num)/1000000)
elif abs(num)>1000:
numstr="%.1fK" % (float(num)/1000)
elif abs(num)<0.000001:
numstr="%.1fu" % (num*1000000)
elif abs(num)<0.001:
numstr="%.1fm" % (num*1000)
else:
numstr=int(num)
return numstr
def SeqToArray(seq):
""" convert a string sequence to a numpy array"""
seqa=np.zeros(len(seq),dtype=np.int8)
for ind,base in enumerate(seq):
if base=='A':
seqa[ind]=0
elif base=='a':
seqa[ind]=0
elif base=='C':
seqa[ind]=1
elif base=='c':
seqa[ind]=1
elif base=='G':
seqa[ind]=2
elif base=='g':
seqa[ind]=2
elif base=='T':
seqa[ind]=3
elif base=='t':
seqa[ind]=3
elif base=='-':
seqa[ind]=4
else:
seqa[ind]=5
return(seqa)
def ArrayToSeq(seqa):
""" convert a numpy array to sequence (upper case)"""
seq=''
for cnuc in seqa:
if cnuc==0:
seq+='A'
elif cnuc==1:
seq+='C'
elif cnuc==2:
seq+='G'
elif cnuc==3:
seq+='T'
else:
seq+='N'
return(seq)
def fdr(pvalues, correction_type = "Benjamini-Hochberg"):
"""
consistent with R - print correct_pvalues_for_multiple_testing([0.0, 0.01, 0.029, 0.03, 0.031, 0.05, 0.069, 0.07, 0.071, 0.09, 0.1])
"""
pvalues = np.array(pvalues)
n = float(pvalues.shape[0])
new_pvalues = np.empty(n)
if correction_type == "Bonferroni":
new_pvalues = n * pvalues
elif correction_type == "Bonferroni-Holm":
values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ]
values.sort()
for rank, vals in enumerate(values):
pvalue, i = vals
new_pvalues[i] = (n-rank) * pvalue
elif correction_type == "Benjamini-Hochberg":
values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ]
values.sort()
values.reverse()
new_values = []
for i, vals in enumerate(values):
rank = n - i
pvalue, index = vals
new_values.append((n/rank) * pvalue)
for i in xrange(0, int(n)-1):
if new_values[i] < new_values[i+1]:
new_values[i+1] = new_values[i]
for i, vals in enumerate(values):
pvalue, index = vals
new_pvalues[index] = new_values[i]
return new_pvalues
def common_start(sa,sb):
"""
returns the longest common substring from the beginning of sa and sb
from http://stackoverflow.com/questions/18715688/find-common-substring-between-two-strings
"""
def _iter():
for a, b in zip(sa, sb):
if a == b:
yield a
else:
return
return ''.join(_iter())
DebugLevel=5
def listdel(dat,todel):
"""
delete elements with indices from list todel in the list dat
input:
dat - the list to remove elements from
todel - indices of the items to remove
output:
dat - the new deleted list
"""
for cind in sorted(todel, reverse=True):
del dat[cind]
return dat
def listtodict(dat):
"""
convert a list into a dict with keys as elements, values the position in the list
input:
dat - the list
output:
thedict
"""
thedict={}
for idx,cdat in enumerate(dat):
if cdat in thedict:
thedict[cdat].append(idx)
else:
thedict[cdat]=[idx]
return thedict
def savelisttofile(dat,filename,delimiter='\t'):
"""
save a list to a (tab delimited) file
inputL
dat - the list to save
filename - the filename to save to
delimiter - the delimiter to use
"""
with open(filename,'w') as fl:
fl.write(delimiter.join(dat))
def dictupper(dat):
"""
turn dict keys to upper case
input:
dat - a dict with string keys
output:
newdat - a dict with the upper case keys
"""
newdat = {k.upper(): v for k,v in dat.iteritems()}
return newdat
| bsd-3-clause | -8,276,015,355,563,108,000 | 17.84058 | 133 | 0.656769 | false |
jmwatte/beets | test/helper.py | 24 | 18275 | # This file is part of beets.
# Copyright 2015, Thomas Scholtes.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""This module includes various helpers that provide fixtures, capture
information or mock the environment.
- The `control_stdin` and `capture_stdout` context managers allow one to
interact with the user interface.
- `has_program` checks the presence of a command on the system.
- The `generate_album_info` and `generate_track_info` functions return
fixtures to be used when mocking the autotagger.
- The `TestImportSession` allows one to run importer code while
controlling the interactions through code.
- The `TestHelper` class encapsulates various fixtures that can be set up.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import sys
import os
import os.path
import shutil
import subprocess
from tempfile import mkdtemp, mkstemp
from contextlib import contextmanager
from StringIO import StringIO
from enum import Enum
import beets
from beets import logging
from beets import config
import beets.plugins
from beets.library import Library, Item, Album
from beets import importer
from beets.autotag.hooks import AlbumInfo, TrackInfo
from beets.mediafile import MediaFile, Image
from beets.ui import _arg_encoding
# TODO Move AutotagMock here
from test import _common
class LogCapture(logging.Handler):
def __init__(self):
logging.Handler.__init__(self)
self.messages = []
def emit(self, record):
self.messages.append(unicode(record.msg))
@contextmanager
def capture_log(logger='beets'):
capture = LogCapture()
log = logging.getLogger(logger)
log.addHandler(capture)
try:
yield capture.messages
finally:
log.removeHandler(capture)
@contextmanager
def control_stdin(input=None):
"""Sends ``input`` to stdin.
>>> with control_stdin('yes'):
... input()
'yes'
"""
org = sys.stdin
sys.stdin = StringIO(input)
sys.stdin.encoding = 'utf8'
try:
yield sys.stdin
finally:
sys.stdin = org
@contextmanager
def capture_stdout():
"""Save stdout in a StringIO.
>>> with capture_stdout() as output:
... print('spam')
...
>>> output.getvalue()
'spam'
"""
org = sys.stdout
sys.stdout = capture = StringIO()
sys.stdout.encoding = 'utf8'
try:
yield sys.stdout
finally:
sys.stdout = org
print(capture.getvalue())
def has_program(cmd, args=['--version']):
"""Returns `True` if `cmd` can be executed.
"""
full_cmd = [cmd] + args
for i, elem in enumerate(full_cmd):
if isinstance(elem, unicode):
full_cmd[i] = elem.encode(_arg_encoding())
try:
with open(os.devnull, 'wb') as devnull:
subprocess.check_call(full_cmd, stderr=devnull,
stdout=devnull, stdin=devnull)
except OSError:
return False
except subprocess.CalledProcessError:
return False
else:
return True
class TestHelper(object):
"""Helper mixin for high-level cli and plugin tests.
This mixin provides methods to isolate beets' global state provide
fixtures.
"""
# TODO automate teardown through hook registration
def setup_beets(self, disk=False):
"""Setup pristine global configuration and library for testing.
Sets ``beets.config`` so we can safely use any functionality
that uses the global configuration. All paths used are
contained in a temporary directory
Sets the following properties on itself.
- ``temp_dir`` Path to a temporary directory containing all
files specific to beets
- ``libdir`` Path to a subfolder of ``temp_dir``, containing the
library's media files. Same as ``config['directory']``.
- ``config`` The global configuration used by beets.
- ``lib`` Library instance created with the settings from
``config``.
Make sure you call ``teardown_beets()`` afterwards.
"""
self.create_temp_dir()
os.environ['BEETSDIR'] = self.temp_dir
self.config = beets.config
self.config.clear()
self.config.read()
self.config['plugins'] = []
self.config['verbose'] = 1
self.config['ui']['color'] = False
self.config['threaded'] = False
self.libdir = os.path.join(self.temp_dir, 'libdir')
os.mkdir(self.libdir)
self.config['directory'] = self.libdir
if disk:
dbpath = self.config['library'].as_filename()
else:
dbpath = ':memory:'
self.lib = Library(dbpath, self.libdir)
def teardown_beets(self):
del self.lib._connections
if 'BEETSDIR' in os.environ:
del os.environ['BEETSDIR']
self.remove_temp_dir()
self.config.clear()
beets.config.read(user=False, defaults=True)
def load_plugins(self, *plugins):
"""Load and initialize plugins by names.
Similar setting a list of plugins in the configuration. Make
sure you call ``unload_plugins()`` afterwards.
"""
# FIXME this should eventually be handled by a plugin manager
beets.config['plugins'] = plugins
beets.plugins.load_plugins(plugins)
beets.plugins.find_plugins()
# Take a backup of the original _types to restore when unloading
Item._original_types = dict(Item._types)
Album._original_types = dict(Album._types)
Item._types.update(beets.plugins.types(Item))
Album._types.update(beets.plugins.types(Album))
def unload_plugins(self):
"""Unload all plugins and remove the from the configuration.
"""
# FIXME this should eventually be handled by a plugin manager
beets.config['plugins'] = []
beets.plugins._classes = set()
beets.plugins._instances = {}
Item._types = Item._original_types
Album._types = Album._original_types
def create_importer(self, item_count=1, album_count=1):
"""Create files to import and return corresponding session.
Copies the specified number of files to a subdirectory of
`self.temp_dir` and creates a `TestImportSession` for this path.
"""
import_dir = os.path.join(self.temp_dir, 'import')
if not os.path.isdir(import_dir):
os.mkdir(import_dir)
album_no = 0
while album_count:
album = u'album {0}'.format(album_no)
album_dir = os.path.join(import_dir, album)
if os.path.exists(album_dir):
album_no += 1
continue
os.mkdir(album_dir)
album_count -= 1
track_no = 0
album_item_count = item_count
while album_item_count:
title = 'track {0}'.format(track_no)
src = os.path.join(_common.RSRC, 'full.mp3')
dest = os.path.join(album_dir, '{0}.mp3'.format(title))
if os.path.exists(dest):
track_no += 1
continue
album_item_count -= 1
shutil.copy(src, dest)
mediafile = MediaFile(dest)
mediafile.update({
'artist': 'artist',
'albumartist': 'album artist',
'title': title,
'album': album,
'mb_albumid': None,
'mb_trackid': None,
})
mediafile.save()
config['import']['quiet'] = True
config['import']['autotag'] = False
config['import']['resume'] = False
return TestImportSession(self.lib, loghandler=None, query=None,
paths=[import_dir])
# Library fixtures methods
def create_item(self, **values):
"""Return an `Item` instance with sensible default values.
The item receives its attributes from `**values` paratmeter. The
`title`, `artist`, `album`, `track`, `format` and `path`
attributes have defaults if they are not given as parameters.
The `title` attribute is formated with a running item count to
prevent duplicates. The default for the `path` attribute
respects the `format` value.
The item is attached to the database from `self.lib`.
"""
item_count = self._get_item_count()
values_ = {
'title': u't\u00eftle {0}',
'artist': u'the \u00e4rtist',
'album': u'the \u00e4lbum',
'track': item_count,
'format': 'MP3',
}
values_.update(values)
values_['title'] = values_['title'].format(item_count)
values_['db'] = self.lib
item = Item(**values_)
if 'path' not in values:
item['path'] = 'audio.' + item['format'].lower()
return item
def add_item(self, **values):
"""Add an item to the library and return it.
Creates the item by passing the parameters to `create_item()`.
If `path` is not set in `values` it is set to `item.destination()`.
"""
item = self.create_item(**values)
item.add(self.lib)
if 'path' not in values:
item['path'] = item.destination()
item.store()
return item
def add_item_fixture(self, **values):
"""Add an item with an actual audio file to the library.
"""
item = self.create_item(**values)
extension = item['format'].lower()
item['path'] = os.path.join(_common.RSRC, 'min.' + extension)
item.add(self.lib)
item.move(copy=True)
item.store()
return item
def add_album(self, **values):
item = self.add_item(**values)
return self.lib.add_album([item])
def add_item_fixtures(self, ext='mp3', count=1):
"""Add a number of items with files to the database.
"""
# TODO base this on `add_item()`
items = []
path = os.path.join(_common.RSRC, 'full.' + ext)
for i in range(count):
item = Item.from_path(bytes(path))
item.album = u'\u00e4lbum {0}'.format(i) # Check unicode paths
item.title = u't\u00eftle {0}'.format(i)
item.add(self.lib)
item.move(copy=True)
item.store()
items.append(item)
return items
def add_album_fixture(self, track_count=1, ext='mp3'):
"""Add an album with files to the database.
"""
items = []
path = os.path.join(_common.RSRC, 'full.' + ext)
for i in range(track_count):
item = Item.from_path(bytes(path))
item.album = u'\u00e4lbum' # Check unicode paths
item.title = u't\u00eftle {0}'.format(i)
item.add(self.lib)
item.move(copy=True)
item.store()
items.append(item)
return self.lib.add_album(items)
def create_mediafile_fixture(self, ext='mp3', images=[]):
"""Copies a fixture mediafile with the extension to a temporary
location and returns the path.
It keeps track of the created locations and will delete the with
`remove_mediafile_fixtures()`
`images` is a subset of 'png', 'jpg', and 'tiff'. For each
specified extension a cover art image is added to the media
file.
"""
src = os.path.join(_common.RSRC, 'full.' + ext)
handle, path = mkstemp()
os.close(handle)
shutil.copyfile(src, path)
if images:
mediafile = MediaFile(path)
imgs = []
for img_ext in images:
img_path = os.path.join(_common.RSRC,
'image-2x3.{0}'.format(img_ext))
with open(img_path, 'rb') as f:
imgs.append(Image(f.read()))
mediafile.images = imgs
mediafile.save()
if not hasattr(self, '_mediafile_fixtures'):
self._mediafile_fixtures = []
self._mediafile_fixtures.append(path)
return path
def remove_mediafile_fixtures(self):
if hasattr(self, '_mediafile_fixtures'):
for path in self._mediafile_fixtures:
os.remove(path)
def _get_item_count(self):
if not hasattr(self, '__item_count'):
count = 0
self.__item_count = count + 1
return count
# Running beets commands
def run_command(self, *args):
if hasattr(self, 'lib'):
lib = self.lib
else:
lib = Library(':memory:')
beets.ui._raw_main(list(args), lib)
def run_with_output(self, *args):
with capture_stdout() as out:
self.run_command(*args)
return out.getvalue().decode('utf-8')
# Safe file operations
def create_temp_dir(self):
"""Create a temporary directory and assign it into
`self.temp_dir`. Call `remove_temp_dir` later to delete it.
"""
self.temp_dir = mkdtemp()
def remove_temp_dir(self):
"""Delete the temporary directory created by `create_temp_dir`.
"""
shutil.rmtree(self.temp_dir)
def touch(self, path, dir=None, content=''):
"""Create a file at `path` with given content.
If `dir` is given, it is prepended to `path`. After that, if the
path is relative, it is resolved with respect to
`self.temp_dir`.
"""
if dir:
path = os.path.join(dir, path)
if not os.path.isabs(path):
path = os.path.join(self.temp_dir, path)
parent = os.path.dirname(path)
if not os.path.isdir(parent):
os.makedirs(parent)
with open(path, 'a+') as f:
f.write(content)
return path
class TestImportSession(importer.ImportSession):
"""ImportSession that can be controlled programaticaly.
>>> lib = Library(':memory:')
>>> importer = TestImportSession(lib, paths=['/path/to/import'])
>>> importer.add_choice(importer.action.SKIP)
>>> importer.add_choice(importer.action.ASIS)
>>> importer.default_choice = importer.action.APPLY
>>> importer.run()
This imports ``/path/to/import`` into `lib`. It skips the first
album and imports thesecond one with metadata from the tags. For the
remaining albums, the metadata from the autotagger will be applied.
"""
def __init__(self, *args, **kwargs):
super(TestImportSession, self).__init__(*args, **kwargs)
self._choices = []
self._resolutions = []
default_choice = importer.action.APPLY
def add_choice(self, choice):
self._choices.append(choice)
def clear_choices(self):
self._choices = []
def choose_match(self, task):
try:
choice = self._choices.pop(0)
except IndexError:
choice = self.default_choice
if choice == importer.action.APPLY:
return task.candidates[0]
elif isinstance(choice, int):
return task.candidates[choice - 1]
else:
return choice
choose_item = choose_match
Resolution = Enum('Resolution', 'REMOVE SKIP KEEPBOTH')
default_resolution = 'REMOVE'
def add_resolution(self, resolution):
assert isinstance(resolution, self.Resolution)
self._resolutions.append(resolution)
def resolve_duplicate(self, task, found_duplicates):
try:
res = self._resolutions.pop(0)
except IndexError:
res = self.default_resolution
if res == self.Resolution.SKIP:
task.set_choice(importer.action.SKIP)
elif res == self.Resolution.REMOVE:
task.should_remove_duplicates = True
def generate_album_info(album_id, track_ids):
"""Return `AlbumInfo` populated with mock data.
Sets the album info's `album_id` field is set to the corresponding
argument. For each value in `track_ids` the `TrackInfo` from
`generate_track_info` is added to the album info's `tracks` field.
Most other fields of the album and track info are set to "album
info" and "track info", respectively.
"""
tracks = [generate_track_info(id) for id in track_ids]
album = AlbumInfo(
album_id='album info',
album='album info',
artist='album info',
artist_id='album info',
tracks=tracks,
)
for field in ALBUM_INFO_FIELDS:
setattr(album, field, 'album info')
return album
ALBUM_INFO_FIELDS = ['album', 'album_id', 'artist', 'artist_id',
'asin', 'albumtype', 'va', 'label',
'artist_sort', 'releasegroup_id', 'catalognum',
'language', 'country', 'albumstatus', 'media',
'albumdisambig', 'artist_credit',
'data_source', 'data_url']
def generate_track_info(track_id='track info', values={}):
"""Return `TrackInfo` populated with mock data.
The `track_id` field is set to the corresponding argument. All other
string fields are set to "track info".
"""
track = TrackInfo(
title='track info',
track_id=track_id,
)
for field in TRACK_INFO_FIELDS:
setattr(track, field, 'track info')
for field, value in values.items():
setattr(track, field, value)
return track
TRACK_INFO_FIELDS = ['artist', 'artist_id', 'artist_sort',
'disctitle', 'artist_credit', 'data_source',
'data_url']
| mit | -4,736,847,655,937,431,000 | 31.287986 | 75 | 0.591683 | false |
wujuguang/django-admin-honeypot | admin_honeypot/admin.py | 4 | 1519 | from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from admin_honeypot.models import LoginAttempt
class LoginAttemptAdmin(admin.ModelAdmin):
list_display = ('username', 'get_ip_address', 'get_session_key', 'timestamp', 'get_path')
list_filter = ('timestamp',)
readonly_fields = ('path', 'username', 'ip_address', 'session_key', 'user_agent')
search_fields = ('username', 'ip_address', 'user_agent', 'path')
def get_actions(self, request):
actions = super(LoginAttemptAdmin, self).get_actions(request)
if 'delete_selected' in actions:
del actions['delete_selected']
return actions
def get_session_key(self, instance):
return '<a href="?session_key=%(key)s">%(key)s</a>' % {'key': instance.session_key}
get_session_key.short_description = _('Session')
get_session_key.allow_tags = True
def get_ip_address(self, instance):
return '<a href="?ip_address=%(ip)s">%(ip)s</a>' % {'ip': instance.ip_address}
get_ip_address.short_description = _('IP Address')
get_ip_address.allow_tags = True
def get_path(self, instance):
return '<a href="?path=%(path)s">%(path)s</a>' % {'path': instance.path}
get_path.short_description = _('URL')
get_path.allow_tags = True
def has_add_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
admin.site.register(LoginAttempt, LoginAttemptAdmin)
| mit | -668,208,277,629,130,600 | 36.975 | 93 | 0.654378 | false |
ssut/choco | choco/endpoint.py | 1 | 3452 | #-*- coding: utf-8 -*-
"""
Python endpoint implementation with regular expression
https://gist.github.com/ssut/6ecf93fac9457dd623b0
"""
import inspect
import re
from .kakao.request import KakaoRequest
from .kakao.room import KakaoRoom
from .kakao.session import KakaoSession
def endpoint_from_func(func):
assert func is not None, 'expected func if endpoint is not provided.'
return func.__name__
class Endpoint(object):
def __init__(self):
self.rules = []
self.functions = {}
self.prefix = ''
def __call__(self, *args):
return self.dispatch(*args)
def set_prefix(self, prefix):
self.prefix = re.escape(prefix)
@property
def routes(self):
routes = []
routes.append(' %-30s| %-20s| %-16s' % ('Rule', 'Endpoint Function', 'Arguments'))
for regex, endpoint in self.rules:
args = tuple(inspect.getargspec(self.functions[endpoint]).args)
route = ' %-30s| %-20s| %-16s' % (regex.pattern, endpoint, args)
route = u' {:30s}| {:20s}| {:16s}'.format(regex.pattern, endpoint, args)
routes.append(route)
return '\n'.join(routes)
def route(self, rule, **options):
def decorator(f):
endpoint = options.pop('endpoint', None)
self.add_rule(rule, endpoint, f, **options)
return f
return decorator
def add_rule(self, rule, endpoint=None, func=None, **options):
"""
Basically this example:
@app.route('f')
def foo():
pass
Is equivalent to the following:
def foo():
pass
app.add_rule('f', 'foo', foo)
"""
if endpoint is None:
endpoint = endpoint_from_func(func)
options['endpoint'] = endpoint
if func is not None:
old_func = self.functions.get(endpoint)
if old_func is not None and old_func != func:
raise AssertionError('function mapping is overwriting an '
'existing endpoint function: %s', endpoint)
self.functions[endpoint] = func
if not options.has_key('re'):
if not options.has_key('prefix'):
rule = re.compile('^' + self.prefix + re.escape(rule) + '$')
else:
rule = re.compile('^' + re.escape(rule) + '$')
else:
if not options.has_key('prefix'):
if rule.startswith('^'):
rule = rule[1:]
rule = '^' + self.prefix + rule
else:
rule = self.prefix + rule
rule = re.compile(rule)
rule = (rule, endpoint)
self.rules.append(rule)
def dispatch(self, rule, message, room, session):
matches = (
(regex.match(rule), ep) for regex, ep in self.rules
)
# print matches
matches = (
(match.groups(), ep) for match, ep in matches if match is not None
)
for args, endpoint in matches:
session.update(message)
attachment = message.attachment
request = KakaoRequest(room=room, session=session, message=message,
attachment=attachment)
return self.functions[endpoint](request, *args)
return None | mit | 3,752,773,583,168,019,000 | 32.201923 | 90 | 0.529838 | false |
gooofy/nlp | examples/voice_assistant_de/toplevel.py | 6 | 1729 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018 Guenter Bartsch
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# this is the toplevel module which just depends on every module needed
# for our chatbot
DEPENDS = [ 'astro',
'base',
'bio',
'config',
'culture',
'data',
'dialog',
'dt',
'economy',
'food',
'games',
'geo',
'health',
'history',
'home',
'humans',
'humor',
'language',
'legal',
'literature',
'mathematics',
'media',
'miscellaneous',
'movies',
'music',
'mythology',
'news',
'personal',
'philosophy',
'physics',
'politics',
'psychology',
'social',
'sports',
'tech',
'television',
'transport',
'weather' ]
| lgpl-3.0 | 2,363,411,443,272,559,600 | 27.816667 | 74 | 0.45749 | false |
Jobava/zamboni | mkt/websites/tests/test_models.py | 1 | 4322 | import mock
from nose.tools import eq_, ok_
from django.core.files.storage import default_storage as storage
from lib.utils import static_url
from mkt.constants.applications import (DEVICE_DESKTOP, DEVICE_GAIA,
DEVICE_TYPE_LIST)
from mkt.constants.regions import URY, USA
from mkt.site.storage_utils import storage_is_remote
from mkt.site.tests import TestCase
from mkt.websites.models import Website
from mkt.websites.utils import website_factory
class TestWebsiteModel(TestCase):
def test_devices(self):
website = Website(devices=[device.id for device in DEVICE_TYPE_LIST])
eq_(sorted(website.devices),
sorted([device.id for device in DEVICE_TYPE_LIST]))
def test_devices_names(self):
website = Website(devices=[DEVICE_DESKTOP.id, DEVICE_GAIA.id])
eq_(sorted(website.device_names), ['desktop', 'firefoxos'])
def test_get_icon_url(self):
website = Website(pk=1, icon_type='image/png')
if not storage_is_remote():
expected = (static_url('WEBSITE_ICON_URL')
% ('0', website.pk, 32, 'never'))
else:
path = '%s/%s-%s.png' % (website.get_icon_dir(), website.pk, 32)
expected = '%s?modified=never' % storage.url(path)
assert website.get_icon_url(32).endswith(expected), (
'Expected %s, got %s' % (expected, website.get_icon_url(32)))
def test_get_icon_url_big_pk(self):
website = Website(pk=9876, icon_type='image/png')
if not storage_is_remote():
expected = (static_url('WEBSITE_ICON_URL')
% (str(website.pk)[:-3], website.pk, 32, 'never'))
else:
path = '%s/%s-%s.png' % (website.get_icon_dir(), website.pk, 32)
expected = '%s?modified=never' % storage.url(path)
assert website.get_icon_url(32).endswith(expected), (
'Expected %s, got %s' % (expected, website.get_icon_url(32)))
def test_get_icon_url_bigger_pk(self):
website = Website(pk=98765432, icon_type='image/png')
if not storage_is_remote():
expected = (static_url('WEBSITE_ICON_URL')
% (str(website.pk)[:-3], website.pk, 32, 'never'))
else:
path = '%s/%s-%s.png' % (website.get_icon_dir(), website.pk, 32)
expected = '%s?modified=never' % storage.url(path)
assert website.get_icon_url(32).endswith(expected), (
'Expected %s, got %s' % (expected, website.get_icon_url(32)))
def test_get_icon_url_hash(self):
website = Website(pk=1, icon_type='image/png', icon_hash='abcdef')
assert website.get_icon_url(32).endswith('?modified=abcdef')
def test_get_icon_no_icon_blue(self):
website = Website(pk=8)
url = website.get_icon_url(32)
assert url.endswith('hub/asia-australia-blue-32.png'), url
def test_get_icon_no_icon_pink(self):
website = Website(pk=164)
url = website.get_icon_url(32)
assert url.endswith('hub/europe-africa-pink-32.png'), url
def test_get_preferred_regions(self):
website = Website()
website.preferred_regions = [URY.id, USA.id]
eq_([r.slug for r in website.get_preferred_regions()],
[USA.slug, URY.slug])
def test_get_promo_img_url(self):
website = Website(pk=337141)
eq_(website.get_promo_img_url('640'), '')
eq_(website.get_promo_img_url('1920'), '')
website.promo_img_hash = 'chicken'
ok_('website_promo_imgs/337/337141-640.png?modified=chicken' in
website.get_promo_img_url('640'))
ok_('website_promo_imgs/337/337141-1920.png?modified=chicken' in
website.get_promo_img_url('1920'))
class TestWebsiteESIndexation(TestCase):
@mock.patch('mkt.search.indexers.BaseIndexer.index_ids')
def test_update_search_index(self, update_mock):
website = website_factory()
update_mock.assert_called_once_with([website.pk])
@mock.patch('mkt.search.indexers.BaseIndexer.unindex')
def test_delete_search_index(self, delete_mock):
for x in xrange(4):
website_factory()
count = Website.objects.count()
Website.objects.all().delete()
eq_(delete_mock.call_count, count)
| bsd-3-clause | -185,489,397,345,421,900 | 40.961165 | 77 | 0.611985 | false |
uranusjr/django | tests/str/tests.py | 106 | 1229 | import datetime
from django.db import models
from django.test import TestCase
from django.test.utils import isolate_apps
from .models import InternationalArticle
class SimpleTests(TestCase):
def test_international(self):
a = InternationalArticle.objects.create(
headline='Girl wins €12.500 in lottery',
pub_date=datetime.datetime(2005, 7, 28)
)
self.assertEqual(str(a), 'Girl wins €12.500 in lottery')
@isolate_apps('str')
def test_defaults(self):
"""
The default implementation of __str__ and __repr__ should return
instances of str.
"""
class Default(models.Model):
pass
obj = Default()
# Explicit call to __str__/__repr__ to make sure str()/repr() don't
# coerce the returned value.
self.assertIsInstance(obj.__str__(), str)
self.assertIsInstance(obj.__repr__(), str)
self.assertEqual(str(obj), 'Default object (None)')
self.assertEqual(repr(obj), '<Default: Default object (None)>')
obj2 = Default(pk=100)
self.assertEqual(str(obj2), 'Default object (100)')
self.assertEqual(repr(obj2), '<Default: Default object (100)>')
| bsd-3-clause | 8,806,628,094,382,898,000 | 32.108108 | 75 | 0.620408 | false |
jostep/tensorflow | tensorflow/contrib/layers/python/layers/optimizers.py | 16 | 18901 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizer ops for use in layers and tf.learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as vars_
from tensorflow.python.summary import summary
from tensorflow.python.training import moving_averages
from tensorflow.python.training import optimizer as optimizer_
from tensorflow.python.training import training as train
OPTIMIZER_CLS_NAMES = {
"Adagrad": train.AdagradOptimizer,
"Adam": train.AdamOptimizer,
"Ftrl": train.FtrlOptimizer,
"Momentum": train.MomentumOptimizer,
"RMSProp": train.RMSPropOptimizer,
"SGD": train.GradientDescentOptimizer,
}
OPTIMIZER_SUMMARIES = [
"learning_rate",
"loss",
"gradients",
"gradient_norm",
"global_gradient_norm",
]
def optimize_loss(loss,
global_step,
learning_rate,
optimizer,
gradient_noise_scale=None,
gradient_multipliers=None,
clip_gradients=None,
learning_rate_decay_fn=None,
update_ops=None,
variables=None,
name=None,
summaries=None,
colocate_gradients_with_ops=False,
increment_global_step=True):
"""Given loss and parameters for optimizer, returns a training op.
Various ways of passing optimizers include:
- by string specifying the name of the optimizer. See OPTIMIZER_CLS_NAMES
for full list. E.g. `optimize_loss(..., optimizer='Adam')`.
- by function taking learning rate `Tensor` as argument and returning an
`Optimizer` instance. E.g. `optimize_loss(...,
optimizer=lambda lr: tf.train.MomentumOptimizer(lr, momentum=0.5))`.
Alternatively, if `learning_rate` is `None`, the function takes no
arguments. E.g. `optimize_loss(..., learning_rate=None,
optimizer=lambda: tf.train.MomentumOptimizer(0.5, momentum=0.5))`.
- by a subclass of `Optimizer` having a single-argument constructor
(the argument is the learning rate), such as AdamOptimizer or
AdagradOptimizer. E.g. `optimize_loss(...,
optimizer=tf.train.AdagradOptimizer)`.
- by an instance of a subclass of `Optimizer`.
E.g., `optimize_loss(..., optimizer=tf.train.AdagradOptimizer(0.5))`.
Args:
loss: Scalar `Tensor`.
global_step: Scalar int `Tensor`, step counter to update on each step
unless `increment_global_step` is `False`. If not supplied,
it will be fetched from the default graph (see
`tf.train.get_global_step` for details). If it has
not been created, no step will be incremented with each weight
update. `learning_rate_decay_fn` requires `global_step`.
learning_rate: float or `Tensor`, magnitude of update per each training
step. Can be `None`.
optimizer: string, class or optimizer instance, used as trainer.
string should be name of optimizer, like 'SGD',
'Adam', 'Adagrad'. Full list in OPTIMIZER_CLS_NAMES constant.
class should be sub-class of `tf.Optimizer` that implements
`compute_gradients` and `apply_gradients` functions.
optimizer instance should be instantiation of `tf.Optimizer`
sub-class and have `compute_gradients` and `apply_gradients`
functions.
gradient_noise_scale: float or None, adds 0-mean normal noise scaled by this
value.
gradient_multipliers: dict of variables or variable names to floats.
If present, gradients for specified
variables will be multiplied by given constant.
clip_gradients: float, callable or `None`. If float, is provided, a global
clipping is applied to prevent the norm of the gradient to exceed this
value. Alternatively, a callable can be provided e.g.: adaptive_clipping.
This callable takes a `list` of `(gradients, variables)` `tuple`s and
returns the same thing with the gradients modified.
learning_rate_decay_fn: function, takes `learning_rate` and `global_step`
`Tensor`s, returns `Tensor`.
Can be used to implement any learning rate decay
functions.
For example: `tf.train.exponential_decay`.
Ignored if `learning_rate` is not supplied.
update_ops: list of update `Operation`s to execute at each step. If `None`,
uses elements of UPDATE_OPS collection. The order of execution
between `update_ops` and `loss` is non-deterministic.
variables: list of variables to optimize or
`None` to use all trainable variables.
name: The name for this operation is used to scope operations and summaries.
summaries: List of internal quantities to visualize on tensorboard. If not
set only the loss and the learning rate will be reported. The
complete list is in OPTIMIZER_SUMMARIES.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
increment_global_step: Whether to increment `global_step`. If your model
calls `optimize_loss` multiple times per training step (e.g. to optimize
different parts of the model), use this arg to avoid incrementing
`global_step` more times than necessary.
Returns:
Training op.
Raises:
ValueError: if:
* `loss` is an invalid type or shape.
* `global_step` is an invalid type or shape.
* `learning_rate` is an invalid type or value.
* `optimizer` has the wrong type.
* `clip_gradients` is neither float nor callable.
* `learning_rate` and `learning_rate_decay_fn` are supplied, but no
`global_step` is available.
* `gradients` is empty.
"""
loss = ops.convert_to_tensor(loss)
contrib_framework.assert_scalar(loss)
if global_step is None:
global_step = contrib_framework.get_global_step()
else:
contrib_framework.assert_global_step(global_step)
with vs.variable_scope(name, "OptimizeLoss", [loss, global_step]):
# Update ops take UPDATE_OPS collection if not provided.
if update_ops is None:
update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))
# Make sure update ops are ran before computing loss.
if update_ops:
loss = control_flow_ops.with_dependencies(list(update_ops), loss)
# Learning rate variable, with possible decay.
lr = None
if learning_rate is not None:
if (isinstance(learning_rate, ops.Tensor) and
learning_rate.get_shape().ndims == 0):
lr = learning_rate
elif isinstance(learning_rate, float):
if learning_rate < 0.0:
raise ValueError("Invalid learning_rate %s.", learning_rate)
lr = vs.get_variable(
"learning_rate", [],
trainable=False,
initializer=init_ops.constant_initializer(learning_rate))
else:
raise ValueError("Learning rate should be 0d Tensor or float. "
"Got %s of type %s" % (str(learning_rate),
str(type(learning_rate))))
if summaries is None:
summaries = ["loss", "learning_rate", "global_gradient_norm"]
else:
for summ in summaries:
if summ not in OPTIMIZER_SUMMARIES:
raise ValueError("Summaries should be one of [%s], you provided %s." %
(", ".join(OPTIMIZER_SUMMARIES), summ))
if learning_rate is not None and learning_rate_decay_fn is not None:
if global_step is None:
raise ValueError("global_step is required for learning_rate_decay_fn.")
lr = learning_rate_decay_fn(lr, global_step)
if "learning_rate" in summaries:
summary.scalar("learning_rate", lr)
# Create optimizer, given specified parameters.
if isinstance(optimizer, six.string_types):
if lr is None:
raise ValueError("Learning rate is None, but should be specified if "
"optimizer is string (%s)." % optimizer)
if optimizer not in OPTIMIZER_CLS_NAMES:
raise ValueError(
"Optimizer name should be one of [%s], you provided %s." %
(", ".join(OPTIMIZER_CLS_NAMES), optimizer))
opt = OPTIMIZER_CLS_NAMES[optimizer](learning_rate=lr)
elif (isinstance(optimizer, type) and
issubclass(optimizer, optimizer_.Optimizer)):
if lr is None:
raise ValueError("Learning rate is None, but should be specified if "
"optimizer is class (%s)." % optimizer)
opt = optimizer(learning_rate=lr)
elif isinstance(optimizer, optimizer_.Optimizer):
opt = optimizer
elif callable(optimizer):
if learning_rate is not None:
opt = optimizer(lr)
else:
opt = optimizer()
if not isinstance(opt, optimizer_.Optimizer):
raise ValueError("Unrecognized optimizer: function should return "
"subclass of Optimizer. Got %s." % str(opt))
else:
raise ValueError("Unrecognized optimizer: should be string, "
"subclass of Optimizer, instance of "
"subclass of Optimizer or function with one argument. "
"Got %s." % str(optimizer))
# All trainable variables, if specific variables are not specified.
if variables is None:
variables = vars_.trainable_variables()
# Compute gradients.
gradients = opt.compute_gradients(
loss,
variables,
colocate_gradients_with_ops=colocate_gradients_with_ops)
# Optionally add gradient noise.
if gradient_noise_scale is not None:
gradients = _add_scaled_noise_to_gradients(gradients,
gradient_noise_scale)
# Multiply some gradients.
if gradient_multipliers is not None:
gradients = _multiply_gradients(gradients, gradient_multipliers)
if not gradients:
raise ValueError(
"Empty list of (gradient, var) pairs encountered. This is most "
"likely to be caused by an improper value of gradient_multipliers.")
if "global_gradient_norm" in summaries or "gradient_norm" in summaries:
summary.scalar("global_norm/gradient_norm",
clip_ops.global_norm(list(zip(*gradients))[0]))
# Optionally clip gradients by global norm.
if isinstance(clip_gradients, float):
gradients = _clip_gradients_by_norm(gradients, clip_gradients)
elif callable(clip_gradients):
gradients = clip_gradients(gradients)
elif clip_gradients is not None:
raise ValueError(
"Unknown type %s for clip_gradients" % type(clip_gradients))
# Add scalar summary for loss.
if "loss" in summaries:
summary.scalar("loss", loss)
# Add histograms for variables, gradients and gradient norms.
for gradient, variable in gradients:
if isinstance(gradient, ops.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
if grad_values is not None:
var_name = variable.name.replace(":", "_")
if "gradients" in summaries:
summary.histogram("gradients/%s" % var_name, grad_values)
if "gradient_norm" in summaries:
summary.scalar("gradient_norm/%s" % var_name,
clip_ops.global_norm([grad_values]))
if clip_gradients is not None and ("global_gradient_norm" in summaries or
"gradient_norm" in summaries):
summary.scalar("global_norm/clipped_gradient_norm",
clip_ops.global_norm(list(zip(*gradients))[0]))
# Create gradient updates.
grad_updates = opt.apply_gradients(
gradients,
global_step=global_step if increment_global_step else None,
name="train")
# Ensure the train_tensor computes grad_updates.
train_tensor = control_flow_ops.with_dependencies([grad_updates], loss)
return train_tensor
def _clip_gradients_by_norm(grads_and_vars, clip_gradients):
"""Clips gradients by global norm."""
gradients, variables = zip(*grads_and_vars)
clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients, clip_gradients)
return list(zip(clipped_gradients, variables))
def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
"""Find max_norm given norm and previous average."""
with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):
log_norm = math_ops.log(norm + epsilon)
def moving_average(name, value, decay):
moving_average_variable = vs.get_variable(
name,
shape=value.get_shape(),
dtype=value.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False)
return moving_averages.assign_moving_average(
moving_average_variable, value, decay, zero_debias=False)
# quicker adaptation at the beginning
if global_step is not None:
n = math_ops.to_float(global_step)
decay = math_ops.minimum(decay, n / (n + 1.))
# update averages
mean = moving_average("mean", log_norm, decay)
sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)
variance = sq_mean - math_ops.square(mean)
std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
max_norms = math_ops.exp(mean + std_factor * std)
return max_norms, mean
def adaptive_clipping_fn(std_factor=2.,
decay=0.95,
static_max_norm=None,
global_step=None,
report_summary=False,
epsilon=1e-8,
name=None):
"""Adapt the clipping value using statistics on the norms.
Implement adaptive gradient as presented in section 3.2.1 of
https://arxiv.org/abs/1412.1602.
Keeps a moving average of the mean and std of the log(norm) of the gradient.
If the norm exceeds `exp(mean + std_factor*std)` then all gradients will be
rescaled such that the global norm becomes `exp(mean)`.
Args:
std_factor: Python scaler (or tensor).
`max_norm = exp(mean + std_factor*std)`
decay: The smoothing factor of the moving averages.
static_max_norm: If provided, will threshold the norm to this value as an
extra safety.
global_step: Optional global_step. If provided, `decay = decay*n/(n+1)`.
This provides a quicker adaptation of the mean for the first steps.
report_summary: If `True`, will add histogram summaries of the `max_norm`.
epsilon: Small value chosen to avoid zero variance.
name: The name for this operation is used to scope operations and summaries.
Returns:
A function for applying gradient clipping.
"""
def gradient_clipping(grads_and_vars):
"""Internal function for adaptive clipping."""
grads, variables = zip(*grads_and_vars)
norm = clip_ops.global_norm(grads)
max_norm, log_mean = _adaptive_max_norm(norm, std_factor, decay,
global_step, epsilon, name)
# reports the max gradient norm for debugging
if report_summary:
summary.scalar("global_norm/adaptive_max_gradient_norm", max_norm)
# factor will be 1. if norm is smaller than max_norm
factor = array_ops.where(norm < max_norm,
array_ops.ones_like(norm),
math_ops.exp(log_mean) / norm)
if static_max_norm is not None:
factor = math_ops.minimum(static_max_norm / norm, factor)
# apply factor
clipped_grads = []
for grad in grads:
if grad is None:
clipped_grads.append(None)
elif isinstance(grad, ops.IndexedSlices):
clipped_grads.append(
ops.IndexedSlices(grad.values * factor, grad.indices,
grad.dense_shape))
else:
clipped_grads.append(grad * factor)
return list(zip(clipped_grads, variables))
return gradient_clipping
def _add_scaled_noise_to_gradients(grads_and_vars, gradient_noise_scale):
"""Adds scaled noise from a 0-mean normal distribution to gradients."""
gradients, variables = zip(*grads_and_vars)
noisy_gradients = []
for gradient in gradients:
if gradient is None:
noisy_gradients.append(None)
continue
if isinstance(gradient, ops.IndexedSlices):
gradient_shape = gradient.dense_shape
else:
gradient_shape = gradient.get_shape()
noise = random_ops.truncated_normal(gradient_shape) * gradient_noise_scale
noisy_gradients.append(gradient + noise)
return list(zip(noisy_gradients, variables))
def _multiply_gradients(grads_and_vars, gradient_multipliers):
"""Multiply specified gradients."""
multiplied_grads_and_vars = []
for grad, var in grads_and_vars:
if (grad is not None and
(var in gradient_multipliers or var.name in gradient_multipliers)):
key = var if var in gradient_multipliers else var.name
multiplier = constant_op.constant(
gradient_multipliers[key], dtype=dtypes.float32)
if isinstance(grad, ops.IndexedSlices):
grad_values = grad.values * multiplier
grad = ops.IndexedSlices(grad_values, grad.indices, grad.dense_shape)
else:
grad *= multiplier
multiplied_grads_and_vars.append((grad, var))
return multiplied_grads_and_vars
| apache-2.0 | 2,788,787,624,206,360,600 | 41.665914 | 80 | 0.644887 | false |
porksmash/swarfarm | herders/autocomplete.py | 1 | 1327 | from django.db.models import Q
from django.template import loader
from dal import autocomplete
from .models import MonsterTag, MonsterInstance
class MonsterInstanceAutocomplete(autocomplete.Select2QuerySetView):
paginate_by = 15
def get_queryset(self):
qs = MonsterInstance.objects.filter(owner__user=self.request.user)
if self.q:
# Split the terms into words and build a Q object
search_terms = self.q.split(' ')
query = Q()
for term in search_terms:
query.add(
Q(monster__name__icontains=term) |
Q(monster__awakens_from__name__icontains=term) |
Q(monster__awakens_to__name__icontains=term) |
Q(monster__element__startswith=term),
Q.AND
)
qs = qs.filter(query)
return qs
def get_result_label(self, item):
return loader.get_template('autocomplete/monster_instance_choice.html').render({'choice': item})
class MonsterTagAutocomplete(autocomplete.Select2QuerySetView):
paginate_by = 15
def get_queryset(self):
qs = MonsterTag.objects.all()
if self.q:
# Filter the queryset
qs = qs.filter(name__icontains=self.q)
return qs
| apache-2.0 | -5,766,654,569,417,694,000 | 27.234043 | 104 | 0.595328 | false |
gemalto/pycryptoki | pycryptoki/cryptoki/func_defs.py | 1 | 45634 | """
PKCS11 & CA Extension ctypes function bindings.
Note to maintainers: This is where new functions added to the libCryptoki C API should
be defined.
"""
from pycryptoki.cryptoki._ck_func_list import (
CK_SFNT_CA_FUNCTION_LIST_PTR_PTR,
CK_NOTIFY,
CK_FUNCTION_LIST_PTR_PTR,
)
from pycryptoki.cryptoki.ck_defs import *
from pycryptoki.cryptoki.helpers import make_late_binding_function
CA_GetFunctionList = make_late_binding_function(
"CA_GetFunctionList", [CK_SFNT_CA_FUNCTION_LIST_PTR_PTR]
)
CA_WaitForSlotEvent = make_late_binding_function(
"CA_WaitForSlotEvent", [CK_FLAGS, POINTER(CK_ULONG), CK_SLOT_ID_PTR, CK_VOID_PTR]
)
CA_InitIndirectToken = make_late_binding_function(
"CA_InitIndirectToken", [CK_SLOT_ID, CK_CHAR_PTR, CK_ULONG, CK_CHAR_PTR, CK_SESSION_HANDLE]
)
CA_InitIndirectPIN = make_late_binding_function(
"CA_InitIndirectPIN", [CK_SESSION_HANDLE, CK_CHAR_PTR, CK_ULONG, CK_SESSION_HANDLE]
)
CA_ResetPIN = make_late_binding_function("CA_ResetPIN", [CK_SESSION_HANDLE, CK_CHAR_PTR, CK_ULONG])
CA_InitRolePIN = make_late_binding_function(
"CA_InitRolePIN", [CK_SESSION_HANDLE, CK_USER_TYPE, CK_CHAR_PTR, CK_ULONG]
)
CA_InitSlotRolePIN = make_late_binding_function(
"CA_InitSlotRolePIN", [CK_SESSION_HANDLE, CK_SLOT_ID, CK_USER_TYPE, CK_CHAR_PTR, CK_ULONG]
)
CA_RoleStateGet = make_late_binding_function(
"CA_RoleStateGet", [CK_SLOT_ID, CK_USER_TYPE, POINTER(CA_ROLE_STATE)]
)
CA_CreateLoginChallenge = make_late_binding_function(
"CA_CreateLoginChallenge",
[CK_SESSION_HANDLE, CK_USER_TYPE, CK_ULONG, CK_CHAR_PTR, CK_ULONG_PTR, CK_CHAR_PTR],
)
CA_CreateContainerLoginChallenge = make_late_binding_function(
"CA_CreateContainerLoginChallenge",
[CK_SESSION_HANDLE, CK_SLOT_ID, CK_USER_TYPE, CK_ULONG, CK_CHAR_PTR, CK_ULONG_PTR, CK_CHAR_PTR],
)
CA_Deactivate = make_late_binding_function("CA_Deactivate", [CK_SLOT_ID, CK_USER_TYPE])
CA_FindAdminSlotForSlot = make_late_binding_function(
"CA_FindAdminSlotForSlot", [CK_SLOT_ID, POINTER(CK_SLOT_ID), POINTER(CK_SLOT_ID)]
)
CA_TokenInsert = make_late_binding_function(
"CA_TokenInsert", [CK_SESSION_HANDLE, CT_TokenHndle, CK_SLOT_ID]
)
CA_TokenInsertNoAuth = make_late_binding_function(
"CA_TokenInsertNoAuth", [CT_TokenHndle, CK_SLOT_ID]
)
CA_TokenZeroize = make_late_binding_function(
"CA_TokenZeroize", [CK_SESSION_HANDLE, CK_SLOT_ID, CK_FLAGS]
)
CA_TokenDelete = make_late_binding_function("CA_TokenDelete", [CK_SESSION_HANDLE, CK_SLOT_ID])
CA_OpenSession = make_late_binding_function(
"CA_OpenSession",
[CK_SLOT_ID, CK_ULONG, CK_FLAGS, CK_VOID_PTR, CK_NOTIFY, CK_SESSION_HANDLE_PTR],
)
CA_OpenSessionWithAppID = make_late_binding_function(
"CA_OpenSessionWithAppID",
[CK_SLOT_ID, CK_FLAGS, CK_ULONG, CK_ULONG, CK_VOID_PTR, CK_NOTIFY, CK_SESSION_HANDLE_PTR],
)
CA_IndirectLogin = make_late_binding_function(
"CA_IndirectLogin", [CK_SESSION_HANDLE, CK_USER_TYPE, CK_SESSION_HANDLE]
)
CA_InitializeRemotePEDVector = make_late_binding_function(
"CA_InitializeRemotePEDVector", [CK_SESSION_HANDLE]
)
CA_DeleteRemotePEDVector = make_late_binding_function(
"CA_DeleteRemotePEDVector", [CK_SESSION_HANDLE]
)
CA_GetRemotePEDVectorStatus = make_late_binding_function(
"CA_GetRemotePEDVectorStatus", [CK_SLOT_ID, CK_ULONG_PTR]
)
CA_ConfigureRemotePED = make_late_binding_function(
"CA_ConfigureRemotePED", [CK_SLOT_ID, CK_CHAR_PTR, CK_ULONG, CK_ULONG_PTR]
)
CA_DismantleRemotePED = make_late_binding_function("CA_DismantleRemotePED", [CK_SLOT_ID, CK_ULONG])
CA_Restart = make_late_binding_function("CA_Restart", [CK_SLOT_ID])
CA_RestartForContainer = make_late_binding_function(
"CA_RestartForContainer", [CK_SLOT_ID, CK_ULONG]
)
CA_CloseApplicationID = make_late_binding_function(
"CA_CloseApplicationID", [CK_SLOT_ID, CK_ULONG, CK_ULONG]
)
CA_CloseApplicationIDForContainer = make_late_binding_function(
"CA_CloseApplicationIDForContainer", [CK_SLOT_ID, CK_ULONG, CK_ULONG, CK_ULONG]
)
CA_OpenApplicationID = make_late_binding_function(
"CA_OpenApplicationID", [CK_SLOT_ID, CK_ULONG, CK_ULONG]
)
CA_OpenApplicationIDForContainer = make_late_binding_function(
"CA_OpenApplicationIDForContainer", [CK_SLOT_ID, CK_ULONG, CK_ULONG, CK_ULONG]
)
CA_SetApplicationID = make_late_binding_function("CA_SetApplicationID", [CK_ULONG, CK_ULONG])
CA_DescribeUtilizationBinId = make_late_binding_function(
"CA_DescribeUtilizationBinId", [CK_ULONG, CK_CHAR_PTR]
)
CA_ReadUtilizationMetrics = make_late_binding_function(
"CA_ReadUtilizationMetrics", [CK_SESSION_HANDLE]
)
CA_ReadAndResetUtilizationMetrics = make_late_binding_function(
"CA_ReadAndResetUtilizationMetrics", [CK_SESSION_HANDLE]
)
CA_ReadAllUtilizationCounters = make_late_binding_function(
"CA_ReadAllUtilizationCounters", [CK_SESSION_HANDLE, CK_UTILIZATION_COUNTER_PTR, CK_ULONG_PTR]
)
# pka
CA_SetAuthorizationData = make_late_binding_function(
"CA_SetAuthorizationData",
[CK_SESSION_HANDLE, CK_OBJECT_HANDLE, CK_UTF8CHAR_PTR, CK_ULONG, CK_UTF8CHAR_PTR, CK_ULONG],
)
CA_ResetAuthorizationData = make_late_binding_function(
"CA_ResetAuthorizationData", [CK_SESSION_HANDLE, CK_OBJECT_HANDLE, CK_UTF8CHAR_PTR, CK_ULONG]
)
CA_AuthorizeKey = make_late_binding_function(
"CA_AuthorizeKey", [CK_SESSION_HANDLE, CK_OBJECT_HANDLE, CK_UTF8CHAR_PTR, CK_ULONG]
)
CA_AssignKey = make_late_binding_function("CA_AssignKey", [CK_SESSION_HANDLE, CK_OBJECT_HANDLE])
CA_IncrementFailedAuthCount = make_late_binding_function(
"CA_IncrementFailedAuthCount", [CK_SESSION_HANDLE, CK_OBJECT_HANDLE]
)
CA_ManualKCV = make_late_binding_function("CA_ManualKCV", [CK_SESSION_HANDLE])
CA_SetLKCV = make_late_binding_function("CA_SetLKCV", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG])
CA_SetKCV = make_late_binding_function("CA_SetKCV", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG])
CA_SetRDK = make_late_binding_function("CA_SetRDK", [CK_SESSION_HANDLE, POINTER(CK_BYTE), CK_ULONG])
CA_SetCloningDomain = make_late_binding_function("CA_SetCloningDomain", [CK_BYTE_PTR, CK_ULONG])
CA_ClonePrivateKey = make_late_binding_function(
"CA_ClonePrivateKey",
[CK_SESSION_HANDLE, CK_SESSION_HANDLE, CK_OBJECT_HANDLE, CK_OBJECT_HANDLE_PTR],
)
CA_CloneObject = make_late_binding_function(
"CA_CloneObject",
[CK_SESSION_HANDLE, CK_SESSION_HANDLE, CK_ULONG, CK_OBJECT_HANDLE, CK_OBJECT_HANDLE_PTR],
)
CA_GenerateCloningKEV = make_late_binding_function(
"CA_GenerateCloningKEV", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG_PTR]
)
CA_CloneAsTargetInit = make_late_binding_function(
"CA_CloneAsTargetInit",
[
CK_SESSION_HANDLE,
CK_BYTE_PTR,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
CK_BBOOL,
CK_BYTE_PTR,
CK_ULONG_PTR,
],
)
CA_CloneAsSource = make_late_binding_function(
"CA_CloneAsSource",
[
CK_SESSION_HANDLE,
CK_ULONG,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
CK_BBOOL,
CK_BYTE_PTR,
CK_ULONG_PTR,
],
)
CA_CloneAsTarget = make_late_binding_function(
"CA_CloneAsTarget",
[
CK_SESSION_HANDLE,
CK_BYTE_PTR,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
CK_ULONG,
CK_ULONG,
CK_BBOOL,
CK_OBJECT_HANDLE_PTR,
],
)
CA_SetMofN = make_late_binding_function("CA_SetMofN", [CK_BBOOL])
CA_GenerateMofN = make_late_binding_function(
"CA_GenerateMofN",
[CK_SESSION_HANDLE, CK_ULONG, CA_MOFN_GENERATION_PTR, CK_ULONG, CK_ULONG, CK_VOID_PTR],
)
CA_GenerateCloneableMofN = make_late_binding_function(
"CA_GenerateCloneableMofN",
[CK_SESSION_HANDLE, CK_ULONG, CA_MOFN_GENERATION_PTR, CK_ULONG, CK_ULONG, CK_VOID_PTR],
)
CA_ModifyMofN = make_late_binding_function(
"CA_ModifyMofN",
[CK_SESSION_HANDLE, CK_ULONG, CA_MOFN_GENERATION_PTR, CK_ULONG, CK_ULONG, CK_VOID_PTR],
)
CA_CloneMofN = make_late_binding_function(
"CA_CloneMofN", [CK_SESSION_HANDLE, CK_SESSION_HANDLE, CK_VOID_PTR]
)
CA_CloneModifyMofN = make_late_binding_function(
"CA_CloneModifyMofN", [CK_SESSION_HANDLE, CK_SESSION_HANDLE, CK_VOID_PTR]
)
CA_ActivateMofN = make_late_binding_function(
"CA_ActivateMofN", [CK_SESSION_HANDLE, CA_MOFN_ACTIVATION_PTR, CK_ULONG]
)
CA_DeactivateMofN = make_late_binding_function("CA_DeactivateMofN", [CK_SESSION_HANDLE])
CA_GetMofNStatus = make_late_binding_function("CA_GetMofNStatus", [CK_SLOT_ID, CA_MOFN_STATUS_PTR])
CA_DuplicateMofN = make_late_binding_function("CA_DuplicateMofN", [CK_SESSION_HANDLE])
CA_IsMofNEnabled = make_late_binding_function("CA_IsMofNEnabled", [CK_SLOT_ID, CK_ULONG_PTR])
CA_IsMofNRequired = make_late_binding_function("CA_IsMofNRequired", [CK_SLOT_ID, CK_ULONG_PTR])
CA_GenerateTokenKeys = make_late_binding_function(
"CA_GenerateTokenKeys", [CK_SESSION_HANDLE, CK_ATTRIBUTE_PTR, CK_ULONG]
)
CA_GetTokenCertificateInfo = make_late_binding_function(
"CA_GetTokenCertificateInfo", [CK_SLOT_ID, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR]
)
CA_SetTokenCertificateSignature = make_late_binding_function(
"CA_SetTokenCertificateSignature",
[CK_SESSION_HANDLE, CK_ULONG, CK_ULONG, CK_ATTRIBUTE_PTR, CK_ULONG, CK_BYTE_PTR, CK_ULONG],
)
CA_GetModuleList = make_late_binding_function(
"CA_GetModuleList", [CK_SLOT_ID, CKCA_MODULE_ID_PTR, CK_ULONG, CK_ULONG_PTR]
)
CA_GetModuleInfo = make_late_binding_function(
"CA_GetModuleInfo", [CK_SLOT_ID, CKCA_MODULE_ID, CKCA_MODULE_INFO_PTR]
)
CA_LoadModule = make_late_binding_function(
"CA_LoadModule",
[
CK_SESSION_HANDLE,
CK_BYTE_PTR,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
CKCA_MODULE_ID_PTR,
],
)
CA_LoadEncryptedModule = make_late_binding_function(
"CA_LoadEncryptedModule",
[
CK_SESSION_HANDLE,
CK_OBJECT_HANDLE,
CK_BYTE_PTR,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
CKCA_MODULE_ID_PTR,
],
)
CA_UnloadModule = make_late_binding_function("CA_UnloadModule", [CK_SESSION_HANDLE, CKCA_MODULE_ID])
CA_PerformModuleCall = make_late_binding_function(
"CA_PerformModuleCall",
[CK_SESSION_HANDLE, CKCA_MODULE_ID, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR, CK_ULONG, CK_ULONG_PTR],
)
CA_FirmwareUpdate = make_late_binding_function(
"CA_FirmwareUpdate",
[
CK_SESSION_HANDLE,
CK_ULONG,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
CK_BYTE_PTR,
],
)
CA_FirmwareRollback = make_late_binding_function("CA_FirmwareRollback", [CK_SESSION_HANDLE])
CA_CapabilityUpdate = make_late_binding_function(
"CA_CapabilityUpdate", [CK_SESSION_HANDLE, CK_ULONG, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR]
)
CA_GetUserContainerNumber = make_late_binding_function(
"CA_GetUserContainerNumber", [CK_SLOT_ID, CK_ULONG_PTR]
)
CA_GetUserContainerName = make_late_binding_function(
"CA_GetUserContainerName", [CK_SLOT_ID, CK_BYTE_PTR, CK_ULONG_PTR]
)
CA_SetUserContainerName = make_late_binding_function(
"CA_SetUserContainerName", [CK_SLOT_ID, CK_BYTE_PTR, CK_ULONG]
)
CA_GetTokenInsertionCount = make_late_binding_function(
"CA_GetTokenInsertionCount", [CK_SLOT_ID, CK_ULONG_PTR]
)
CA_GetRollbackFirmwareVersion = make_late_binding_function(
"CA_GetRollbackFirmwareVersion", [CK_SLOT_ID, CK_ULONG_PTR]
)
CA_GetFPV = make_late_binding_function("CA_GetFPV", [CK_SLOT_ID, CK_ULONG_PTR])
CA_GetTPV = make_late_binding_function("CA_GetTPV", [CK_SLOT_ID, CK_ULONG_PTR])
CA_GetExtendedTPV = make_late_binding_function(
"CA_GetExtendedTPV", [CK_SLOT_ID, CK_ULONG_PTR, CK_ULONG_PTR]
)
CA_GetConfigurationElementDescription = make_late_binding_function(
"CA_GetConfigurationElementDescription",
[
CK_SLOT_ID,
CK_ULONG,
CK_ULONG,
CK_ULONG,
CK_ULONG_PTR,
CK_ULONG_PTR,
CK_ULONG_PTR,
CK_CHAR_PTR,
],
)
CA_GetHSMCapabilitySet = make_late_binding_function(
"CA_GetHSMCapabilitySet", [CK_SLOT_ID, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR]
)
CA_GetHSMCapabilitySetting = make_late_binding_function(
"CA_GetHSMCapabilitySetting", [CK_SLOT_ID, CK_ULONG, CK_ULONG_PTR]
)
CA_GetHSMPolicySet = make_late_binding_function(
"CA_GetHSMPolicySet", [CK_SLOT_ID, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR]
)
CA_GetHSMPolicySetting = make_late_binding_function(
"CA_GetHSMPolicySetting", [CK_SLOT_ID, CK_ULONG, CK_ULONG_PTR]
)
CA_GetContainerCapabilitySet = make_late_binding_function(
"CA_GetContainerCapabilitySet",
[CK_SLOT_ID, CK_ULONG, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR],
)
CA_GetContainerCapabilitySetting = make_late_binding_function(
"CA_GetContainerCapabilitySetting", [CK_SLOT_ID, CK_ULONG, CK_ULONG, CK_ULONG_PTR]
)
CA_GetContainerPolicySet = make_late_binding_function(
"CA_GetContainerPolicySet",
[CK_SLOT_ID, CK_ULONG, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR],
)
CA_GetContainerPolicySetting = make_late_binding_function(
"CA_GetContainerPolicySetting", [CK_SLOT_ID, CK_ULONG, CK_ULONG, CK_ULONG_PTR]
)
CA_GetPartitionPolicyTemplate = make_late_binding_function(
"CA_GetPartitionPolicyTemplate", [CK_SLOT_ID, CK_ULONG_PTR, CK_ULONG_PTR, CK_BYTE_PTR]
)
CA_SetTPV = make_late_binding_function("CA_SetTPV", [CK_SESSION_HANDLE, CK_ULONG])
CA_SetExtendedTPV = make_late_binding_function(
"CA_SetExtendedTPV", [CK_SESSION_HANDLE, CK_ULONG, CK_ULONG]
)
CA_SetHSMPolicy = make_late_binding_function(
"CA_SetHSMPolicy", [CK_SESSION_HANDLE, CK_ULONG, CK_ULONG]
)
CA_SetHSMPolicies = make_late_binding_function(
"CA_SetHSMPolicies", [CK_SESSION_HANDLE, CK_ULONG, CK_ULONG_PTR, CK_ULONG_PTR]
)
CA_SetDestructiveHSMPolicy = make_late_binding_function(
"CA_SetDestructiveHSMPolicy", [CK_SESSION_HANDLE, CK_ULONG, CK_ULONG]
)
CA_SetDestructiveHSMPolicies = make_late_binding_function(
"CA_SetDestructiveHSMPolicies", [CK_SESSION_HANDLE, CK_ULONG, CK_ULONG_PTR, CK_ULONG_PTR]
)
CA_SetContainerPolicy = make_late_binding_function(
"CA_SetContainerPolicy", [CK_SESSION_HANDLE, CK_ULONG, CK_ULONG, CK_ULONG]
)
CA_SetContainerPolicies = make_late_binding_function(
"CA_SetContainerPolicies", [CK_SESSION_HANDLE, CK_ULONG, CK_ULONG, CK_ULONG_PTR, CK_ULONG_PTR]
)
CA_GetTokenCapabilities = make_late_binding_function(
"CA_GetTokenCapabilities", [CK_SLOT_ID, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR]
)
CA_SetTokenPolicies = make_late_binding_function(
"CA_SetTokenPolicies", [CK_SESSION_HANDLE, CK_SLOT_ID, CK_ULONG, CK_ULONG_PTR, CK_ULONG_PTR]
)
CA_GetTokenPolicies = make_late_binding_function(
"CA_GetTokenPolicies", [CK_SLOT_ID, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR]
)
CA_RetrieveLicenseList = make_late_binding_function(
"CA_RetrieveLicenseList", [CK_SLOT_ID, CK_ULONG_PTR, CK_ULONG_PTR]
)
CA_QueryLicense = make_late_binding_function(
"CA_QueryLicense",
[CK_SLOT_ID, CK_ULONG, CK_ULONG, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR, CK_BYTE_PTR],
)
CA_GetContainerStatus = make_late_binding_function(
"CA_GetContainerStatus",
[CK_SLOT_ID, CK_ULONG, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR],
)
CA_GetTokenStatus = make_late_binding_function(
"CA_GetTokenStatus", [CK_SLOT_ID, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR]
)
CA_GetSessionInfo = make_late_binding_function(
"CA_GetSessionInfo", [CK_SESSION_HANDLE, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR]
)
CA_GetCVFirmwareVersion = make_late_binding_function(
"CA_GetCVFirmwareVersion", [CK_SLOT_ID, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR]
)
CA_ReadCommonStore = make_late_binding_function(
"CA_ReadCommonStore", [CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR]
)
CA_WriteCommonStore = make_late_binding_function(
"CA_WriteCommonStore", [CK_ULONG, CK_BYTE_PTR, CK_ULONG]
)
CA_GetPrimarySlot = make_late_binding_function(
"CA_GetPrimarySlot", [CK_SESSION_HANDLE, CK_SLOT_ID_PTR]
)
CA_GetSecondarySlot = make_late_binding_function(
"CA_GetSecondarySlot", [CK_SESSION_HANDLE, CK_SLOT_ID_PTR]
)
CA_SwitchSecondarySlot = make_late_binding_function(
"CA_SwitchSecondarySlot", [CK_SESSION_HANDLE, CK_SLOT_ID, CK_ULONG]
)
CA_CloseSecondarySession = make_late_binding_function(
"CA_CloseSecondarySession", [CK_SESSION_HANDLE, CK_SLOT_ID, CK_ULONG]
)
CA_CloseAllSecondarySessions = make_late_binding_function(
"CA_CloseAllSecondarySessions", [CK_SESSION_HANDLE]
)
CA_ChoosePrimarySlot = make_late_binding_function("CA_ChoosePrimarySlot", [CK_SESSION_HANDLE])
CA_ChooseSecondarySlot = make_late_binding_function("CA_ChooseSecondarySlot", [CK_SESSION_HANDLE])
CA_CloneObjectToAllSessions = make_late_binding_function(
"CA_CloneObjectToAllSessions", [CK_SESSION_HANDLE, CK_OBJECT_HANDLE]
)
CA_CloneAllObjectsToSession = make_late_binding_function(
"CA_CloneAllObjectsToSession", [CK_SESSION_HANDLE, CK_SLOT_ID]
)
CA_ResetDevice = make_late_binding_function("CA_ResetDevice", [CK_SLOT_ID, CK_FLAGS])
CA_Zeroize = make_late_binding_function("CA_Zeroize", [CK_SLOT_ID, CK_FLAGS])
CA_FactoryReset = make_late_binding_function("CA_FactoryReset", [CK_SLOT_ID, CK_FLAGS])
CA_SetPedId = make_late_binding_function("CA_SetPedId", [CK_SLOT_ID, CK_ULONG])
CA_GetPedId = make_late_binding_function("CA_GetPedId", [CK_SLOT_ID, POINTER(CK_ULONG)])
CA_SpRawRead = make_late_binding_function("CA_SpRawRead", [CK_SLOT_ID, CK_ULONG_PTR])
CA_SpRawWrite = make_late_binding_function("CA_SpRawWrite", [CK_SLOT_ID, CK_ULONG_PTR])
CA_CheckOperationState = make_late_binding_function(
"CA_CheckOperationState", [CK_SESSION_HANDLE, CK_ULONG, POINTER(CK_BBOOL)]
)
CA_DestroyMultipleObjects = make_late_binding_function(
"CA_DestroyMultipleObjects", [CK_SESSION_HANDLE, CK_ULONG, CK_OBJECT_HANDLE_PTR, CK_ULONG_PTR]
)
CA_OpenSecureToken = make_late_binding_function(
"CA_OpenSecureToken",
[
CK_SESSION_HANDLE,
CK_ULONG,
CK_ULONG,
CK_ULONG,
CK_ULONG,
CK_ULONG_PTR,
CK_ULONG_PTR,
CK_ULONG,
CK_CHAR_PTR,
],
)
CA_CloseSecureToken = make_late_binding_function(
"CA_CloseSecureToken", [CK_SESSION_HANDLE, CK_ULONG]
)
CA_ListSecureTokenInit = make_late_binding_function(
"CA_ListSecureTokenInit",
[CK_SESSION_HANDLE, CK_ULONG, CK_ULONG, CK_ULONG_PTR, CK_ULONG_PTR, CK_BYTE_PTR],
)
CA_ListSecureTokenUpdate = make_late_binding_function(
"CA_ListSecureTokenUpdate", [CK_SESSION_HANDLE, CK_ULONG, CK_ULONG, CK_BYTE_PTR, CK_ULONG]
)
CA_GetSecureElementMeta = make_late_binding_function(
"CA_GetSecureElementMeta",
[
CK_SESSION_HANDLE,
CK_ULONG,
CK_MECHANISM_PTR,
CK_ULONG_PTR,
CK_ULONG_PTR,
CK_BYTE_PTR,
CK_ULONG,
],
)
CA_HAInit = make_late_binding_function("CA_HAInit", [CK_SESSION_HANDLE, CK_OBJECT_HANDLE])
CA_HAGetMasterPublic = make_late_binding_function(
"CA_HAGetMasterPublic", [CK_SLOT_ID, CK_BYTE_PTR, CK_ULONG_PTR]
)
CA_HAGetLoginChallenge = make_late_binding_function(
"CA_HAGetLoginChallenge",
[CK_SESSION_HANDLE, CK_USER_TYPE, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR],
)
CA_HAAnswerLoginChallenge = make_late_binding_function(
"CA_HAAnswerLoginChallenge",
[CK_SESSION_HANDLE, CK_OBJECT_HANDLE, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR],
)
CA_HALogin = make_late_binding_function(
"CA_HALogin", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR]
)
CA_HAAnswerMofNChallenge = make_late_binding_function(
"CA_HAAnswerMofNChallenge",
[CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR],
)
CA_HAActivateMofN = make_late_binding_function(
"CA_HAActivateMofN", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG]
)
CA_GetHAState = make_late_binding_function("CA_GetHAState", [CK_SLOT_ID, CK_HA_STATE_PTR])
CA_GetTokenCertificates = make_late_binding_function(
"CA_GetTokenCertificates", [CK_SLOT_ID, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR]
)
CA_ExtractMaskedObject = make_late_binding_function(
"CA_ExtractMaskedObject", [CK_SESSION_HANDLE, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR]
)
CA_InsertMaskedObject = make_late_binding_function(
"CA_InsertMaskedObject", [CK_SESSION_HANDLE, CK_ULONG_PTR, CK_BYTE_PTR, CK_ULONG]
)
CA_MultisignValue = make_late_binding_function(
"CA_MultisignValue",
[
CK_SESSION_HANDLE,
CK_MECHANISM_PTR,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG_PTR,
CK_ULONG_PTR,
POINTER(CK_BYTE_PTR),
CK_ULONG_PTR,
POINTER(CK_BYTE_PTR),
],
)
CA_SIMExtract = make_late_binding_function(
"CA_SIMExtract",
[
CK_SESSION_HANDLE,
CK_ULONG,
CK_OBJECT_HANDLE_PTR,
CK_ULONG,
CK_ULONG,
CKA_SIM_AUTH_FORM,
CK_ULONG_PTR,
POINTER(CK_BYTE_PTR),
CK_BBOOL,
CK_ULONG_PTR,
CK_BYTE_PTR,
],
)
CA_SIMInsert = make_late_binding_function(
"CA_SIMInsert",
[
CK_SESSION_HANDLE,
CK_ULONG,
CKA_SIM_AUTH_FORM,
CK_ULONG_PTR,
POINTER(CK_BYTE_PTR),
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG_PTR,
CK_OBJECT_HANDLE_PTR,
],
)
CA_SIMMultiSign = make_late_binding_function(
"CA_SIMMultiSign",
[
CK_SESSION_HANDLE,
CK_MECHANISM_PTR,
CK_ULONG,
CKA_SIM_AUTH_FORM,
CK_ULONG_PTR,
POINTER(CK_BYTE_PTR),
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
CK_ULONG_PTR,
POINTER(CK_BYTE_PTR),
CK_ULONG_PTR,
POINTER(CK_BYTE_PTR),
],
)
CA_Extract = make_late_binding_function("CA_Extract", [CK_SESSION_HANDLE, CK_MECHANISM_PTR])
CA_Insert = make_late_binding_function("CA_Insert", [CK_SESSION_HANDLE, CK_MECHANISM_PTR])
CA_GetTokenObjectUID = make_late_binding_function(
"CA_GetTokenObjectUID", [CK_SLOT_ID, CK_ULONG, CK_ULONG, POINTER(CK_BYTE)]
)
CA_GetTokenObjectHandle = make_late_binding_function(
"CA_GetTokenObjectHandle", [CK_SLOT_ID, POINTER(CK_BYTE), CK_ULONG_PTR, CK_ULONG_PTR]
)
CA_GetObjectUID = make_late_binding_function(
"CA_GetObjectUID", [CK_SLOT_ID, CK_ULONG, CK_ULONG, CK_ULONG, POINTER(CK_BYTE)]
)
CA_GetObjectHandle = make_late_binding_function(
"CA_GetObjectHandle", [CK_SLOT_ID, CK_ULONG, POINTER(CK_BYTE), CK_ULONG_PTR, CK_ULONG_PTR]
)
CA_DeleteContainer = make_late_binding_function("CA_DeleteContainer", [CK_SESSION_HANDLE])
CA_MTKSetStorage = make_late_binding_function("CA_MTKSetStorage", [CK_SESSION_HANDLE, CK_ULONG])
CA_MTKRestore = make_late_binding_function("CA_MTKRestore", [CK_SLOT_ID])
CA_MTKResplit = make_late_binding_function("CA_MTKResplit", [CK_SLOT_ID])
CA_MTKZeroize = make_late_binding_function("CA_MTKZeroize", [CK_SLOT_ID])
CA_MTKGetState = make_late_binding_function("CA_MTKGetState", [CK_SLOT_ID, CK_ULONG_PTR])
CA_TamperClear = make_late_binding_function("CA_TamperClear", [CK_SESSION_HANDLE])
CA_STMToggle = make_late_binding_function("CA_STMToggle", [CK_SESSION_HANDLE, CK_ULONG])
CA_STMGetState = make_late_binding_function("CA_STMGetState", [CK_SLOT_ID, CK_ULONG_PTR])
CA_GetTSV = make_late_binding_function("CA_GetTSV", [CK_SLOT_ID, CK_ULONG_PTR])
CA_InvokeServiceInit = make_late_binding_function(
"CA_InvokeServiceInit", [CK_SESSION_HANDLE, CK_ULONG]
)
CA_InvokeService = make_late_binding_function(
"CA_InvokeService", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG, CK_ULONG_PTR]
)
CA_InvokeServiceFinal = make_late_binding_function(
"CA_InvokeServiceFinal", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG_PTR]
)
CA_InvokeServiceAsynch = make_late_binding_function(
"CA_InvokeServiceAsynch", [CK_SESSION_HANDLE, CK_ULONG, CK_BYTE_PTR, CK_ULONG]
)
CA_InvokeServiceSinglePart = make_late_binding_function(
"CA_InvokeServiceSinglePart",
[CK_SESSION_HANDLE, CK_ULONG, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR],
)
CA_EncodeECPrimeParams = make_late_binding_function(
"CA_EncodeECPrimeParams",
[
CK_BYTE_PTR,
CK_ULONG_PTR,
CK_BYTE_PTR,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
],
)
CA_EncodeECChar2Params = make_late_binding_function(
"CA_EncodeECChar2Params",
[
CK_BYTE_PTR,
CK_ULONG_PTR,
CK_ULONG,
CK_ULONG,
CK_ULONG,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
CK_BYTE_PTR,
CK_ULONG,
],
)
CA_EncodeECParamsFromFile = make_late_binding_function(
"CA_EncodeECParamsFromFile", [CK_BYTE_PTR, CK_ULONG_PTR, CK_BYTE_PTR]
)
CA_GetHSMStats = make_late_binding_function(
"CA_GetHSMStats", [CK_SLOT_ID, CK_ULONG, CK_ULONG_PTR, POINTER(HSM_STATS_PARAMS)]
)
CA_GetHSMStorageInformation = make_late_binding_function(
"CA_GetHSMStorageInformation",
[CK_SLOT_ID, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR],
)
CA_GetTokenStorageInformation = make_late_binding_function(
"CA_GetTokenStorageInformation",
[CK_SLOT_ID, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR],
)
CA_GetContainerStorageInformation = make_late_binding_function(
"CA_GetContainerStorageInformation",
[CK_SLOT_ID, CK_ULONG, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR],
)
CA_SetContainerSize = make_late_binding_function(
"CA_SetContainerSize", [CK_SESSION_HANDLE, CK_ULONG, CK_ULONG]
)
CA_CreateContainerWithPolicy = make_late_binding_function(
"CA_CreateContainerWithPolicy",
[
CK_SESSION_HANDLE,
CK_ULONG,
CK_CHAR_PTR,
CK_ULONG,
CK_CHAR_PTR,
CK_ULONG,
CK_ULONG,
CK_ULONG,
CK_ULONG,
CK_ULONG,
CK_ULONG,
CK_ULONG_PTR,
CK_ULONG,
CK_ULONG,
CK_BYTE_PTR,
],
)
CA_CreateContainer = make_late_binding_function(
"CA_CreateContainer",
[
CK_SESSION_HANDLE,
CK_ULONG,
CK_CHAR_PTR,
CK_ULONG,
CK_CHAR_PTR,
CK_ULONG,
CK_ULONG,
CK_ULONG,
CK_ULONG,
CK_ULONG,
CK_ULONG,
CK_ULONG_PTR,
],
)
CA_InitAudit = make_late_binding_function(
"CA_InitAudit", [CK_SLOT_ID, CK_CHAR_PTR, CK_ULONG, CK_CHAR_PTR]
)
CA_LogVerify = make_late_binding_function(
"CA_LogVerify", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG, CK_ULONG, CK_ULONG_PTR]
)
CA_LogVerifyFile = make_late_binding_function(
"CA_LogVerifyFile", [CK_SESSION_HANDLE, CK_CHAR_PTR, CK_ULONG_PTR]
)
CA_LogExternal = make_late_binding_function(
"CA_LogExternal", [CK_SLOT_ID, CK_SESSION_HANDLE, POINTER(CK_CHAR), CK_ULONG]
)
CA_LogImportSecret = make_late_binding_function(
"CA_LogImportSecret", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG]
)
CA_LogExportSecret = make_late_binding_function(
"CA_LogExportSecret", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG_PTR]
)
CA_TimeSync = make_late_binding_function("CA_TimeSync", [CK_SESSION_HANDLE, CK_ULONG])
CA_GetTime = make_late_binding_function("CA_GetTime", [CK_SESSION_HANDLE, CK_ULONG_PTR])
CA_LogSetConfig = make_late_binding_function(
"CA_LogSetConfig", [CK_SESSION_HANDLE, CK_ULONG, CK_ULONG, CK_ULONG, CK_ULONG, CK_BYTE_PTR]
)
CA_LogGetConfig = make_late_binding_function(
"CA_LogGetConfig",
[
CK_SESSION_HANDLE,
POINTER(CK_ULONG),
POINTER(CK_ULONG),
POINTER(CK_ULONG),
POINTER(CK_ULONG),
CK_BYTE_PTR,
],
)
CA_ReplaceFastPathKEK = make_late_binding_function("CA_ReplaceFastPathKEK", [CK_SESSION_HANDLE])
CA_LogGetStatus = make_late_binding_function(
"CA_LogGetStatus",
[
CK_SLOT_ID,
POINTER(CK_ULONG),
POINTER(CK_ULONG),
POINTER(CK_ULONG),
POINTER(CK_ULONG),
POINTER(CK_ULONG),
],
)
CA_DeleteContainerWithHandle = make_late_binding_function(
"CA_DeleteContainerWithHandle", [CK_SESSION_HANDLE, CK_ULONG]
)
CA_GetContainerList = make_late_binding_function(
"CA_GetContainerList", [CK_SLOT_ID, CK_ULONG, CK_ULONG, CK_ULONG_PTR, CK_ULONG_PTR]
)
CA_GetContainerName = make_late_binding_function(
"CA_GetContainerName", [CK_SLOT_ID, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR]
)
CA_GetNumberOfAllowedContainers = make_late_binding_function(
"CA_GetNumberOfAllowedContainers", [CK_SLOT_ID, CK_ULONG_PTR]
)
CA_GetTunnelSlotNumber = make_late_binding_function(
"CA_GetTunnelSlotNumber", [CK_SLOT_ID, CK_SLOT_ID_PTR]
)
CA_GetClusterState = make_late_binding_function(
"CA_GetClusterState", [CK_SLOT_ID, CK_CLUSTER_STATE_PTR]
)
CA_LockClusteredSlot = make_late_binding_function("CA_LockClusteredSlot", [CK_SLOT_ID])
CA_UnlockClusteredSlot = make_late_binding_function("CA_UnlockClusteredSlot", [CK_SLOT_ID])
CA_LKMInitiatorChallenge = make_late_binding_function(
"CA_LKMInitiatorChallenge",
[
CK_SESSION_HANDLE,
CK_OBJECT_HANDLE,
CK_OBJECT_HANDLE,
CK_ULONG,
CK_LKM_TOKEN_ID_PTR,
CK_LKM_TOKEN_ID_PTR,
CK_CHAR_PTR,
CK_ULONG_PTR,
],
)
CA_LKMReceiverResponse = make_late_binding_function(
"CA_LKMReceiverResponse",
[
CK_SESSION_HANDLE,
CK_OBJECT_HANDLE,
CK_OBJECT_HANDLE,
CK_ULONG,
CK_LKM_TOKEN_ID_PTR,
CK_CHAR_PTR,
CK_ULONG,
CK_CHAR_PTR,
CK_ULONG_PTR,
],
)
CA_LKMInitiatorComplete = make_late_binding_function(
"CA_LKMInitiatorComplete",
[
CK_SESSION_HANDLE,
CK_CHAR_PTR,
CK_ULONG,
CK_ATTRIBUTE_PTR,
CK_ULONG,
CK_ATTRIBUTE_PTR,
CK_ULONG,
CK_CHAR_PTR,
CK_ULONG_PTR,
CK_OBJECT_HANDLE_PTR,
CK_OBJECT_HANDLE_PTR,
],
)
CA_LKMReceiverComplete = make_late_binding_function(
"CA_LKMReceiverComplete",
[
CK_SESSION_HANDLE,
CK_CHAR_PTR,
CK_ULONG,
CK_ATTRIBUTE_PTR,
CK_ULONG,
CK_ATTRIBUTE_PTR,
CK_ULONG,
CK_OBJECT_HANDLE_PTR,
CK_OBJECT_HANDLE_PTR,
],
)
CA_ModifyUsageCount = make_late_binding_function(
"CA_ModifyUsageCount", [CK_SESSION_HANDLE, CK_OBJECT_HANDLE, CK_ULONG, CK_ULONG]
)
CA_EnableUnauthTokenInsertion = make_late_binding_function(
"CA_EnableUnauthTokenInsertion", [CK_SESSION_HANDLE, CK_ULONG, CK_ULONG_PTR]
)
CA_GetUnauthTokenInsertionStatus = make_late_binding_function(
"CA_GetUnauthTokenInsertionStatus",
[CK_SESSION_HANDLE, CK_ULONG, POINTER(CK_ULONG), POINTER(CK_ULONG)],
)
CA_DisableUnauthTokenInsertion = make_late_binding_function(
"CA_DisableUnauthTokenInsertion", [CK_SESSION_HANDLE, CK_ULONG]
)
CA_STCRegister = make_late_binding_function(
"CA_STCRegister",
[
CK_SESSION_HANDLE,
CK_SLOT_ID,
POINTER(CK_CHAR),
CK_ULONG,
POINTER(CK_CHAR),
CK_ULONG,
POINTER(CK_CHAR),
CK_ULONG,
],
)
CA_STCDeregister = make_late_binding_function(
"CA_STCDeregister", [CK_SESSION_HANDLE, CK_SLOT_ID, POINTER(CK_CHAR)]
)
CA_STCGetPubKey = make_late_binding_function(
"CA_STCGetPubKey",
[
CK_SESSION_HANDLE,
CK_SLOT_ID,
POINTER(CK_CHAR),
POINTER(CK_CHAR),
CK_ULONG_PTR,
POINTER(CK_CHAR),
CK_ULONG_PTR,
],
)
CA_STCGetClientsList = make_late_binding_function(
"CA_STCGetClientsList", [CK_SESSION_HANDLE, CK_SLOT_ID, CK_ULONG_PTR, CK_ULONG_PTR]
)
CA_STCGetClientInfo = make_late_binding_function(
"CA_STCGetClientInfo",
[
CK_SESSION_HANDLE,
CK_SLOT_ID,
CK_ULONG,
POINTER(CK_CHAR),
CK_ULONG_PTR,
CK_ULONG_PTR,
POINTER(CK_CHAR),
CK_ULONG_PTR,
POINTER(CK_CHAR),
CK_ULONG_PTR,
],
)
CA_STCGetPartPubKey = make_late_binding_function(
"CA_STCGetPartPubKey",
[CK_SESSION_HANDLE, CK_SLOT_ID, POINTER(CK_CHAR), CK_ULONG_PTR, POINTER(CK_CHAR), CK_ULONG_PTR],
)
CA_STCGetAdminPubKey = make_late_binding_function(
"CA_STCGetAdminPubKey",
[CK_SLOT_ID, POINTER(CK_CHAR), CK_ULONG_PTR, POINTER(CK_CHAR), CK_ULONG_PTR],
)
CA_STCSetCipherAlgorithm = make_late_binding_function(
"CA_STCSetCipherAlgorithm", [CK_SESSION_HANDLE, CK_ULONG, CK_ULONG]
)
CA_STCGetCipherAlgorithm = make_late_binding_function(
"CA_STCGetCipherAlgorithm", [CK_SESSION_HANDLE, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR]
)
CA_STCClearCipherAlgorithm = make_late_binding_function(
"CA_STCClearCipherAlgorithm", [CK_SESSION_HANDLE, CK_ULONG, CK_ULONG]
)
CA_STCSetDigestAlgorithm = make_late_binding_function(
"CA_STCSetDigestAlgorithm", [CK_SESSION_HANDLE, CK_ULONG, CK_ULONG]
)
CA_STCGetDigestAlgorithm = make_late_binding_function(
"CA_STCGetDigestAlgorithm", [CK_SESSION_HANDLE, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR]
)
CA_STCClearDigestAlgorithm = make_late_binding_function(
"CA_STCClearDigestAlgorithm", [CK_SESSION_HANDLE, CK_ULONG, CK_ULONG]
)
CA_STCSetKeyLifeTime = make_late_binding_function(
"CA_STCSetKeyLifeTime", [CK_SESSION_HANDLE, CK_ULONG, CK_ULONG]
)
CA_STCGetKeyLifeTime = make_late_binding_function(
"CA_STCGetKeyLifeTime", [CK_SESSION_HANDLE, CK_ULONG, CK_ULONG_PTR]
)
CA_STCSetKeyActivationTimeOut = make_late_binding_function(
"CA_STCSetKeyActivationTimeOut", [CK_SESSION_HANDLE, CK_ULONG, CK_ULONG]
)
CA_STCGetKeyActivationTimeOut = make_late_binding_function(
"CA_STCGetKeyActivationTimeOut", [CK_SESSION_HANDLE, CK_ULONG, CK_ULONG_PTR]
)
CA_STCSetMaxSessions = make_late_binding_function(
"CA_STCSetMaxSessions", [CK_SESSION_HANDLE, CK_ULONG, CK_ULONG]
)
CA_STCGetMaxSessions = make_late_binding_function(
"CA_STCGetMaxSessions", [CK_SESSION_HANDLE, CK_ULONG, CK_ULONG_PTR]
)
CA_STCSetSequenceWindowSize = make_late_binding_function(
"CA_STCSetSequenceWindowSize", [CK_SESSION_HANDLE, CK_ULONG, CK_ULONG]
)
CA_STCGetSequenceWindowSize = make_late_binding_function(
"CA_STCGetSequenceWindowSize", [CK_SESSION_HANDLE, CK_ULONG, CK_ULONG_PTR]
)
CA_STCIsEnabled = make_late_binding_function("CA_STCIsEnabled", [CK_ULONG, CK_BYTE_PTR])
CA_STCGetState = make_late_binding_function("CA_STCGetState", [CK_ULONG, POINTER(CK_CHAR), CK_BYTE])
CA_STCGetCurrentKeyLife = make_late_binding_function(
"CA_STCGetCurrentKeyLife", [CK_SESSION_HANDLE, CK_ULONG, CK_ULONG_PTR]
)
CA_GetSlotIdForPhysicalSlot = make_late_binding_function(
"CA_GetSlotIdForPhysicalSlot", [CK_ULONG, CK_SLOT_ID_PTR]
)
CA_GetSlotIdForContainer = make_late_binding_function(
"CA_GetSlotIdForContainer", [CK_ULONG, CK_ULONG, CK_SLOT_ID_PTR]
)
CA_STCGetChannelID = make_late_binding_function("CA_STCGetChannelID", [CK_SLOT_ID, CK_ULONG_PTR])
CA_STCGetCipherID = make_late_binding_function("CA_STCGetCipherID", [CK_SLOT_ID, CK_ULONG_PTR])
CA_STCGetDigestID = make_late_binding_function("CA_STCGetDigestID", [CK_SLOT_ID, CK_ULONG_PTR])
CA_STCGetCipherIDs = make_late_binding_function(
"CA_STCGetCipherIDs", [CK_SLOT_ID, CK_ULONG_PTR, CK_BYTE_PTR]
)
CA_STCGetCipherNameByID = make_late_binding_function(
"CA_STCGetCipherNameByID", [CK_SLOT_ID, CK_ULONG, CK_CHAR_PTR, CK_BYTE]
)
CA_STCGetDigestIDs = make_late_binding_function(
"CA_STCGetDigestIDs", [CK_SLOT_ID, CK_ULONG_PTR, CK_BYTE_PTR]
)
CA_STCGetDigestNameByID = make_late_binding_function(
"CA_STCGetDigestNameByID", [CK_SLOT_ID, CK_ULONG, CK_CHAR_PTR, CK_BYTE]
)
CA_GetServerInstanceBySlotID = make_late_binding_function(
"CA_GetServerInstanceBySlotID", [CK_SLOT_ID, CK_ULONG_PTR]
)
CA_GetSlotListFromServerInstance = make_late_binding_function(
"CA_GetSlotListFromServerInstance", [CK_ULONG, CK_SLOT_ID_PTR, CK_ULONG_PTR]
)
CA_PerformSelfTest = make_late_binding_function(
"CA_PerformSelfTest", [CK_SLOT_ID, CK_ULONG, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR]
)
CA_DeriveKeyAndWrap = make_late_binding_function(
"CA_DeriveKeyAndWrap",
[
CK_SESSION_HANDLE,
CK_MECHANISM_PTR,
CK_OBJECT_HANDLE,
CK_ATTRIBUTE_PTR,
CK_ULONG,
CK_MECHANISM_PTR,
CK_OBJECT_HANDLE,
CK_BYTE_PTR,
CK_ULONG_PTR,
],
)
CA_Get = make_late_binding_function("CA_Get", [CK_SLOT_ID, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR])
CA_GetFirmwareVersion = make_late_binding_function(
"CA_GetFirmwareVersion", [CK_SLOT_ID, CK_ULONG_PTR, CK_ULONG_PTR, CK_ULONG_PTR]
)
C_Initialize = make_late_binding_function("C_Initialize", [CK_VOID_PTR])
C_Finalize = make_late_binding_function("C_Finalize", [CK_VOID_PTR])
C_GetInfo = make_late_binding_function("C_GetInfo", [CK_INFO_PTR])
C_GetFunctionList = make_late_binding_function("C_GetFunctionList", [CK_FUNCTION_LIST_PTR_PTR])
C_GetSlotList = make_late_binding_function(
"C_GetSlotList", [CK_BBOOL, CK_SLOT_ID_PTR, CK_ULONG_PTR]
)
C_GetSlotInfo = make_late_binding_function("C_GetSlotInfo", [CK_SLOT_ID, CK_SLOT_INFO_PTR])
C_GetTokenInfo = make_late_binding_function("C_GetTokenInfo", [CK_SLOT_ID, CK_TOKEN_INFO_PTR])
C_GetMechanismList = make_late_binding_function(
"C_GetMechanismList", [CK_SLOT_ID, CK_MECHANISM_TYPE_PTR, CK_ULONG_PTR]
)
C_GetMechanismInfo = make_late_binding_function(
"C_GetMechanismInfo", [CK_SLOT_ID, CK_MECHANISM_TYPE, CK_MECHANISM_INFO_PTR]
)
C_InitToken = make_late_binding_function(
"C_InitToken", [CK_SLOT_ID, CK_UTF8CHAR_PTR, CK_ULONG, CK_UTF8CHAR_PTR]
)
C_InitPIN = make_late_binding_function("C_InitPIN", [CK_SESSION_HANDLE, CK_UTF8CHAR_PTR, CK_ULONG])
C_SetPIN = make_late_binding_function(
"C_SetPIN", [CK_SESSION_HANDLE, CK_UTF8CHAR_PTR, CK_ULONG, CK_UTF8CHAR_PTR, CK_ULONG]
)
C_OpenSession = make_late_binding_function(
"C_OpenSession", [CK_SLOT_ID, CK_FLAGS, CK_VOID_PTR, CK_NOTIFY, CK_SESSION_HANDLE_PTR]
)
C_CloseSession = make_late_binding_function("C_CloseSession", [CK_SESSION_HANDLE])
C_CloseAllSessions = make_late_binding_function("C_CloseAllSessions", [CK_SLOT_ID])
C_GetSessionInfo = make_late_binding_function(
"C_GetSessionInfo", [CK_SESSION_HANDLE, CK_SESSION_INFO_PTR]
)
C_GetOperationState = make_late_binding_function(
"C_GetOperationState", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG_PTR]
)
C_SetOperationState = make_late_binding_function(
"C_SetOperationState",
[CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG, CK_OBJECT_HANDLE, CK_OBJECT_HANDLE],
)
C_Login = make_late_binding_function(
"C_Login", [CK_SESSION_HANDLE, CK_USER_TYPE, CK_UTF8CHAR_PTR, CK_ULONG]
)
C_Logout = make_late_binding_function("C_Logout", [CK_SESSION_HANDLE])
C_CreateObject = make_late_binding_function(
"C_CreateObject", [CK_SESSION_HANDLE, CK_ATTRIBUTE_PTR, CK_ULONG, CK_OBJECT_HANDLE_PTR]
)
C_CopyObject = make_late_binding_function(
"C_CopyObject",
[CK_SESSION_HANDLE, CK_OBJECT_HANDLE, CK_ATTRIBUTE_PTR, CK_ULONG, CK_OBJECT_HANDLE_PTR],
)
C_DestroyObject = make_late_binding_function(
"C_DestroyObject", [CK_SESSION_HANDLE, CK_OBJECT_HANDLE]
)
C_GetObjectSize = make_late_binding_function(
"C_GetObjectSize", [CK_SESSION_HANDLE, CK_OBJECT_HANDLE, CK_ULONG_PTR]
)
C_GetAttributeValue = make_late_binding_function(
"C_GetAttributeValue", [CK_SESSION_HANDLE, CK_OBJECT_HANDLE, CK_ATTRIBUTE_PTR, CK_ULONG]
)
C_SetAttributeValue = make_late_binding_function(
"C_SetAttributeValue", [CK_SESSION_HANDLE, CK_OBJECT_HANDLE, CK_ATTRIBUTE_PTR, CK_ULONG]
)
C_FindObjectsInit = make_late_binding_function(
"C_FindObjectsInit", [CK_SESSION_HANDLE, CK_ATTRIBUTE_PTR, CK_ULONG]
)
C_FindObjects = make_late_binding_function(
"C_FindObjects", [CK_SESSION_HANDLE, CK_OBJECT_HANDLE_PTR, CK_ULONG, CK_ULONG_PTR]
)
C_FindObjectsFinal = make_late_binding_function("C_FindObjectsFinal", [CK_SESSION_HANDLE])
C_EncryptInit = make_late_binding_function(
"C_EncryptInit", [CK_SESSION_HANDLE, CK_MECHANISM_PTR, CK_OBJECT_HANDLE]
)
C_Encrypt = make_late_binding_function(
"C_Encrypt", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR]
)
C_EncryptUpdate = make_late_binding_function(
"C_EncryptUpdate", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR]
)
C_EncryptFinal = make_late_binding_function(
"C_EncryptFinal", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG_PTR]
)
C_DecryptInit = make_late_binding_function(
"C_DecryptInit", [CK_SESSION_HANDLE, CK_MECHANISM_PTR, CK_OBJECT_HANDLE]
)
C_Decrypt = make_late_binding_function(
"C_Decrypt", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR]
)
C_DecryptUpdate = make_late_binding_function(
"C_DecryptUpdate", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR]
)
C_DecryptFinal = make_late_binding_function(
"C_DecryptFinal", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG_PTR]
)
C_DigestInit = make_late_binding_function("C_DigestInit", [CK_SESSION_HANDLE, CK_MECHANISM_PTR])
C_Digest = make_late_binding_function(
"C_Digest", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR]
)
C_DigestUpdate = make_late_binding_function(
"C_DigestUpdate", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG]
)
C_DigestKey = make_late_binding_function("C_DigestKey", [CK_SESSION_HANDLE, CK_OBJECT_HANDLE])
C_DigestFinal = make_late_binding_function(
"C_DigestFinal", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG_PTR]
)
C_SignInit = make_late_binding_function(
"C_SignInit", [CK_SESSION_HANDLE, CK_MECHANISM_PTR, CK_OBJECT_HANDLE]
)
C_Sign = make_late_binding_function(
"C_Sign", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR]
)
C_SignUpdate = make_late_binding_function(
"C_SignUpdate", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG]
)
C_SignFinal = make_late_binding_function(
"C_SignFinal", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG_PTR]
)
C_SignRecoverInit = make_late_binding_function(
"C_SignRecoverInit", [CK_SESSION_HANDLE, CK_MECHANISM_PTR, CK_OBJECT_HANDLE]
)
C_SignRecover = make_late_binding_function(
"C_SignRecover", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR]
)
C_VerifyInit = make_late_binding_function(
"C_VerifyInit", [CK_SESSION_HANDLE, CK_MECHANISM_PTR, CK_OBJECT_HANDLE]
)
C_Verify = make_late_binding_function(
"C_Verify", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR, CK_ULONG]
)
C_VerifyUpdate = make_late_binding_function(
"C_VerifyUpdate", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG]
)
C_VerifyFinal = make_late_binding_function(
"C_VerifyFinal", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG]
)
C_VerifyRecoverInit = make_late_binding_function(
"C_VerifyRecoverInit", [CK_SESSION_HANDLE, CK_MECHANISM_PTR, CK_OBJECT_HANDLE]
)
C_VerifyRecover = make_late_binding_function(
"C_VerifyRecover", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR]
)
C_DigestEncryptUpdate = make_late_binding_function(
"C_DigestEncryptUpdate", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR]
)
C_DecryptDigestUpdate = make_late_binding_function(
"C_DecryptDigestUpdate", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR]
)
C_SignEncryptUpdate = make_late_binding_function(
"C_SignEncryptUpdate", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR]
)
C_DecryptVerifyUpdate = make_late_binding_function(
"C_DecryptVerifyUpdate", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG, CK_BYTE_PTR, CK_ULONG_PTR]
)
C_GenerateKey = make_late_binding_function(
"C_GenerateKey",
[CK_SESSION_HANDLE, CK_MECHANISM_PTR, CK_ATTRIBUTE_PTR, CK_ULONG, CK_OBJECT_HANDLE_PTR],
)
C_GenerateKeyPair = make_late_binding_function(
"C_GenerateKeyPair",
[
CK_SESSION_HANDLE,
CK_MECHANISM_PTR,
CK_ATTRIBUTE_PTR,
CK_ULONG,
CK_ATTRIBUTE_PTR,
CK_ULONG,
CK_OBJECT_HANDLE_PTR,
CK_OBJECT_HANDLE_PTR,
],
)
C_WrapKey = make_late_binding_function(
"C_WrapKey",
[
CK_SESSION_HANDLE,
CK_MECHANISM_PTR,
CK_OBJECT_HANDLE,
CK_OBJECT_HANDLE,
CK_BYTE_PTR,
CK_ULONG_PTR,
],
)
C_UnwrapKey = make_late_binding_function(
"C_UnwrapKey",
[
CK_SESSION_HANDLE,
CK_MECHANISM_PTR,
CK_OBJECT_HANDLE,
CK_BYTE_PTR,
CK_ULONG,
CK_ATTRIBUTE_PTR,
CK_ULONG,
CK_OBJECT_HANDLE_PTR,
],
)
C_DeriveKey = make_late_binding_function(
"C_DeriveKey",
[
CK_SESSION_HANDLE,
CK_MECHANISM_PTR,
CK_OBJECT_HANDLE,
CK_ATTRIBUTE_PTR,
CK_ULONG,
CK_OBJECT_HANDLE_PTR,
],
)
C_SeedRandom = make_late_binding_function(
"C_SeedRandom", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG]
)
C_GenerateRandom = make_late_binding_function(
"C_GenerateRandom", [CK_SESSION_HANDLE, CK_BYTE_PTR, CK_ULONG]
)
C_GetFunctionStatus = make_late_binding_function("C_GetFunctionStatus", [CK_SESSION_HANDLE])
C_CancelFunction = make_late_binding_function("C_CancelFunction", [CK_SESSION_HANDLE])
C_WaitForSlotEvent = make_late_binding_function(
"C_WaitForSlotEvent", [CK_FLAGS, CK_SLOT_ID_PTR, CK_VOID_PTR]
)
CA_GetApplicationID = make_late_binding_function(
"CA_GetApplicationID", [POINTER(CK_APPLICATION_ID)]
)
CA_OpenApplicationIDV2 = make_late_binding_function(
"CA_OpenApplicationIDV2", [CK_SLOT_ID, POINTER(CK_APPLICATION_ID)]
)
CA_CloseApplicationIDV2 = make_late_binding_function(
"CA_CloseApplicationIDV2", [CK_SLOT_ID, POINTER(CK_APPLICATION_ID)]
)
CA_SetApplicationIDV2 = make_late_binding_function(
"CA_SetApplicationIDV2", [POINTER(CK_APPLICATION_ID)]
)
| apache-2.0 | 6,327,332,039,044,497,000 | 36.100813 | 100 | 0.693233 | false |
jieyu/maple | script/maple/idiom/history.py | 1 | 3745 | """Copyright 2011 The University of Michigan
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Authors - Jie Yu ([email protected])
"""
import os
from maple.core import logging
from maple.core import proto
from maple.idiom import iroot
def history_pb2():
return proto.module('idiom.history_pb2')
def test_result_str(success):
if success == None:
return 'None'
elif success:
return 'Success'
else:
return 'Fail'
class TestHistoryEntry(object):
def __init__(self, proto, db):
self.proto = proto
self.db = db
def iroot(self):
return self.db.iroot_db.find_iroot(self.proto.iroot_id)
def seed(self):
return self.proto.seed
def success(self):
if self.proto.HasField('success'):
return self.proto.success
else:
return None
def __str__(self):
content = []
content.append('%-5d' % self.iroot().id())
content.append('%-7s' % iroot.idiom_type_name(self.iroot().idiom()))
content.append('%-7s' % test_result_str(self.success()))
content.append('%d' % self.seed())
return ' '.join(content)
class TestHistory(object):
def __init__(self, sinfo, iroot_db):
self.sinfo = sinfo
self.iroot_db = iroot_db
self.proto = history_pb2().HistoryTableProto()
self.history = []
def load(self, histo_name):
if not os.path.exists(histo_name):
return
f = open(histo_name, 'rb')
self.proto.ParseFromString(f.read())
f.close()
for history_proto in self.proto.history:
entry = TestHistoryEntry(history_proto, self)
self.history.append(entry)
def num_success(self, idiom):
succ_count = 0
for entry in self.history:
if entry.iroot().idiom() == idiom:
if entry.success() != None:
if entry.success():
succ_count += 1
return succ_count
def num_fail(self, idiom):
fail_count = 0
for entry in self.history:
if entry.iroot().idiom() == idiom:
if entry.success() != None:
if not entry.success():
fail_count += 1
return fail_count
def num_iroot(self, idiom):
iroot_id_set = set()
for entry in self.history:
if entry.iroot().idiom() == idiom:
iroot_id_set.add(entry.iroot().id())
return len(iroot_id_set)
def display(self, f):
for entry in self.history:
f.write('%s\n' % str(entry))
def display_summary(self, f):
f.write('Test History Summary\n')
f.write('---------------------------\n')
for idiom in range(1,6):
num_success = self.num_success(idiom)
num_fail = self.num_fail(idiom)
num_iroot = self.num_iroot(idiom)
f.write('# Idiom%d tests = %d\n' % (idiom, num_success + num_fail))
f.write(' # succ = %d\n' % num_success)
f.write(' # fail = %d\n' % num_fail)
f.write(' # succ iroot = %d\n' % num_success)
f.write(' # fail iroot = %d\n' % (num_iroot - num_success))
| apache-2.0 | -706,543,157,349,273,000 | 34 | 84 | 0.569559 | false |
qma/pants | src/python/pants/backend/jvm/targets/java_agent.py | 14 | 4062 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from six import string_types
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.base.exceptions import TargetDefinitionException
class JavaAgent(JavaLibrary):
"""Defines a java agent entrypoint."""
def __init__(self,
name,
sources=None,
excludes=None,
resources=None,
premain=None,
agent_class=None,
can_redefine=False,
can_retransform=False,
can_set_native_method_prefix=False,
**kwargs):
"""
:param string premain: When an agent is specified at JVM launch time this attribute specifies
the agent class. Exactly one of ``premain`` or ``agent_class`` must be specified.
:param string agent_class: If an implementation supports a mechanism to start agents sometime
after the VM has started then this attribute specifies the agent class. Exactly one of
``premain`` or ``agent_class`` must be specified.
:param bool can_redefine: `True` if the ability to redefine classes is needed by this agent;
`False` by default.
:param bool can_retransform: `True` if the ability to retransform classes is needed by this
agent; `False` by default.
:param bool can_set_native_method_prefix: `True` if the ability to set he native method prefix
is needed by this agent; `False` by default.
"""
super(JavaAgent, self).__init__(
name=name,
sources=self.assert_list(sources, key_arg='sources'),
provides=None,
excludes=self.assert_list(excludes, key_arg='excludes'),
resources=self.assert_list(resources, key_arg='resources'),
**kwargs)
if not (premain or agent_class):
raise TargetDefinitionException(self, "Must have at least one of 'premain' or 'agent_class' "
"defined.")
if premain and not isinstance(premain, string_types):
raise TargetDefinitionException(self, 'The premain must be a fully qualified class name, '
'given {} of type {}'.format(premain, type(premain)))
if agent_class and not isinstance(agent_class, string_types):
raise TargetDefinitionException(self,
'The agent_class must be a fully qualified class name, given '
'{} of type {}'.format(agent_class, type(agent_class)))
self._premain = premain
self._agent_class = agent_class
self._can_redefine = can_redefine
self._can_retransform = can_retransform
self._can_set_native_method_prefix = can_set_native_method_prefix
# TODO(Eric Ayers) As of 2/5/2015 this call is DEPRECATED and should be removed soon
self.add_labels('java_agent')
@property
def premain(self):
"""The launch time agent fully qualified class name.
Either ``agent_class`` or ``premain`` will be defined and the other will be `None`.
"""
return self._premain
@property
def agent_class(self):
"""The post-launch-time agent fully qualified class name.
Either ``agent_class`` or ``premain`` will be defined and the other will be `None`.
"""
return self._agent_class
@property
def can_redefine(self):
"""Returns `True` if the ability to redefine classes is needed by this agent."""
return self._can_redefine
@property
def can_retransform(self):
"""Returns `True` if the ability to retransform classes is needed by this agent."""
return self._can_retransform
@property
def can_set_native_method_prefix(self):
"""Returns `True` if the ability to set he native method prefix is needed by this agent."""
return self._can_set_native_method_prefix
| apache-2.0 | -4,477,075,875,195,826,000 | 39.62 | 100 | 0.649434 | false |
cjaymes/expatriate | src/expatriate/model/xs/NMTokensType.py | 1 | 1113 | # Copyright 2016 Casey Jaymes
# This file is part of Expatriate.
#
# Expatriate is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Expatriate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Expatriate. If not, see <http://www.gnu.org/licenses/>.
import logging
import re
from ..decorators import *
from .List import List
from .NMTokenType import NMTokenType
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class NMTokensType(List):
def parse_item(self, item_value):
return NMTokenType().parse_value(item_value)
def produce_item(self, item_value):
return NMTokenType().produce_value(item_value)
| lgpl-3.0 | 7,696,769,641,600,931,000 | 32.727273 | 77 | 0.754717 | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/samba/samba3/libsmb_samba_internal.py | 1 | 1533 | # encoding: utf-8
# module samba.samba3.libsmb_samba_internal
# from /usr/lib/python2.7/dist-packages/samba/samba3/libsmb_samba_internal.so
# by generator 1.135
""" libsmb wrapper """
# no imports
# no functions
# classes
from object import object
class Conn(object):
""" libsmb connection """
def close(self, *args, **kwargs): # real signature unknown
""" Close a file handle """
pass
def create(self, *args, **kwargs): # real signature unknown
""" Open a file """
pass
def delete_on_close(self, *args, **kwargs): # real signature unknown
""" Set/Reset the delete on close flag """
pass
def get_oplock_break(self, *args, **kwargs): # real signature unknown
""" Wait for an oplock break """
pass
def read(self, *args, **kwargs): # real signature unknown
""" Read from a file handle """
pass
def readdir(self, *args, **kwargs): # real signature unknown
""" List a directory """
pass
def truncate(self, *args, **kwargs): # real signature unknown
""" Truncate a file """
pass
def write(self, *args, **kwargs): # real signature unknown
""" Write to a file handle """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
| gpl-2.0 | -3,532,237,089,675,072,500 | 26.872727 | 77 | 0.587736 | false |
vrv/tensorflow | tensorflow/tensorboard/backend/http_util_test.py | 27 | 6514 | # -*- coding: utf-8 -*-
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests HTTP utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import gzip
import six
from werkzeug import test as wtest
from werkzeug import wrappers
from tensorflow.python.platform import test
from tensorflow.tensorboard.backend import http_util
class RespondTest(test.TestCase):
def testHelloWorld(self):
q = wrappers.Request(wtest.EnvironBuilder().get_environ())
r = http_util.Respond(q, '<b>hello world</b>', 'text/html')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.response[0], six.b('<b>hello world</b>'))
def testHeadRequest_doesNotWrite(self):
builder = wtest.EnvironBuilder(method='HEAD')
env = builder.get_environ()
request = wrappers.Request(env)
r = http_util.Respond(request, '<b>hello world</b>', 'text/html')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.response[0], six.b(''))
def testPlainText_appendsUtf8ToContentType(self):
q = wrappers.Request(wtest.EnvironBuilder().get_environ())
r = http_util.Respond(q, 'hello', 'text/plain')
h = r.headers
self.assertEqual(h.get('Content-Type'), 'text/plain; charset=utf-8')
def testContentLength_isInBytes(self):
q = wrappers.Request(wtest.EnvironBuilder().get_environ())
r = http_util.Respond(q, '爱', 'text/plain')
self.assertEqual(r.headers.get('Content-Length'), '3')
q = wrappers.Request(wtest.EnvironBuilder().get_environ())
r = http_util.Respond(q, '爱'.encode('utf-8'), 'text/plain')
self.assertEqual(r.headers.get('Content-Length'), '3')
def testResponseCharsetTranscoding(self):
bean = '要依法治国是赞美那些谁是公义的和惩罚恶人。 - 韩非'
# input is unicode string, output is gbk string
q = wrappers.Request(wtest.EnvironBuilder().get_environ())
r = http_util.Respond(q, bean, 'text/plain; charset=gbk')
self.assertEqual(r.response[0], bean.encode('gbk'))
# input is utf-8 string, output is gbk string
q = wrappers.Request(wtest.EnvironBuilder().get_environ())
r = http_util.Respond(q, bean.encode('utf-8'), 'text/plain; charset=gbk')
self.assertEqual(r.response[0], bean.encode('gbk'))
# input is object with unicode strings, output is gbk json
q = wrappers.Request(wtest.EnvironBuilder().get_environ())
r = http_util.Respond(q, {'red': bean}, 'application/json; charset=gbk')
self.assertEqual(r.response[0], b'{"red": "' + bean.encode('gbk') + b'"}')
# input is object with utf-8 strings, output is gbk json
q = wrappers.Request(wtest.EnvironBuilder().get_environ())
r = http_util.Respond(
q, {'red': bean.encode('utf-8')}, 'application/json; charset=gbk')
self.assertEqual(r.response[0], b'{"red": "' + bean.encode('gbk') + b'"}')
# input is object with gbk strings, output is gbk json
q = wrappers.Request(wtest.EnvironBuilder().get_environ())
r = http_util.Respond(
q, {'red': bean.encode('gbk')},
'application/json; charset=gbk',
encoding='gbk')
self.assertEqual(r.response[0], b'{"red": "' + bean.encode('gbk') + b'"}')
def testAcceptGzip_compressesResponse(self):
fall_of_hyperion_canto1_stanza1 = '\n'.join([
'Fanatics have their dreams, wherewith they weave',
'A paradise for a sect; the savage too',
'From forth the loftiest fashion of his sleep',
'Guesses at Heaven; pity these have not',
'Trac\'d upon vellum or wild Indian leaf',
'The shadows of melodious utterance.',
'But bare of laurel they live, dream, and die;',
'For Poesy alone can tell her dreams,',
'With the fine spell of words alone can save',
'Imagination from the sable charm',
'And dumb enchantment. Who alive can say,',
'\'Thou art no Poet may\'st not tell thy dreams?\'',
'Since every man whose soul is not a clod',
'Hath visions, and would speak, if he had loved',
'And been well nurtured in his mother tongue.',
'Whether the dream now purpos\'d to rehearse',
'Be poet\'s or fanatic\'s will be known',
'When this warm scribe my hand is in the grave.',
])
e1 = wtest.EnvironBuilder(headers={'Accept-Encoding': '*'}).get_environ()
any_encoding = wrappers.Request(e1)
r = http_util.Respond(
any_encoding, fall_of_hyperion_canto1_stanza1, 'text/plain')
self.assertEqual(r.headers.get('Content-Encoding'), 'gzip')
self.assertEqual(
_gunzip(r.response[0]), fall_of_hyperion_canto1_stanza1.encode('utf-8'))
e2 = wtest.EnvironBuilder(headers={'Accept-Encoding': 'gzip'}).get_environ()
gzip_encoding = wrappers.Request(e2)
r = http_util.Respond(
gzip_encoding, fall_of_hyperion_canto1_stanza1, 'text/plain')
self.assertEqual(r.headers.get('Content-Encoding'), 'gzip')
self.assertEqual(
_gunzip(r.response[0]), fall_of_hyperion_canto1_stanza1.encode('utf-8'))
r = http_util.Respond(
any_encoding, fall_of_hyperion_canto1_stanza1, 'image/png')
self.assertEqual(
r.response[0], fall_of_hyperion_canto1_stanza1.encode('utf-8'))
def testJson_getsAutoSerialized(self):
q = wrappers.Request(wtest.EnvironBuilder().get_environ())
r = http_util.Respond(q, [1, 2, 3], 'application/json')
self.assertEqual(r.response[0], b'[1, 2, 3]')
def testExpires_setsCruiseControl(self):
q = wrappers.Request(wtest.EnvironBuilder().get_environ())
r = http_util.Respond(q, '<b>hello world</b>', 'text/html', expires=60)
self.assertEqual(r.headers.get('Cache-Control'), 'private, max-age=60')
def _gunzip(bs):
return gzip.GzipFile('', 'rb', 9, six.BytesIO(bs)).read()
if __name__ == '__main__':
test.main()
| apache-2.0 | -1,895,963,755,092,176,100 | 40.435897 | 80 | 0.662902 | false |
googleapis/googleapis-gen | google/cloud/websecurityscanner/v1alpha/websecurityscanner-v1alpha-py/google/cloud/websecurityscanner_v1alpha/types/crawled_url.py | 1 | 1644 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.cloud.websecurityscanner.v1alpha',
manifest={
'CrawledUrl',
},
)
class CrawledUrl(proto.Message):
r"""A CrawledUrl resource represents a URL that was crawled
during a ScanRun. Web Security Scanner Service crawls the web
applications, following all links within the scope of sites, to
find the URLs to test against.
Attributes:
http_method (str):
Output only. The http method of the request
that was used to visit the URL, in uppercase.
url (str):
Output only. The URL that was crawled.
body (str):
Output only. The body of the request that was
used to visit the URL.
"""
http_method = proto.Field(
proto.STRING,
number=1,
)
url = proto.Field(
proto.STRING,
number=2,
)
body = proto.Field(
proto.STRING,
number=3,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -8,863,311,507,948,790,000 | 27.344828 | 74 | 0.65146 | false |
garimakhulbe/autorest | src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/StorageManagementClient/storagemanagementclient/models/storage_account.py | 4 | 6450 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class StorageAccount(Resource):
"""The storage account.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict
:param provisioning_state: Gets the status of the storage account at the
time the operation was called. Possible values include: 'Creating',
'ResolvingDNS', 'Succeeded'
:type provisioning_state: str or :class:`ProvisioningState
<Fixtures.AcceptanceTestsStorageManagementClient.models.ProvisioningState>`
:param account_type: Gets the type of the storage account. Possible
values include: 'Standard_LRS', 'Standard_ZRS', 'Standard_GRS',
'Standard_RAGRS', 'Premium_LRS'
:type account_type: str or :class:`AccountType
<Fixtures.AcceptanceTestsStorageManagementClient.models.AccountType>`
:param primary_endpoints: Gets the URLs that are used to perform a
retrieval of a public blob, queue or table object.Note that StandardZRS
and PremiumLRS accounts only return the blob endpoint.
:type primary_endpoints: :class:`Endpoints
<Fixtures.AcceptanceTestsStorageManagementClient.models.Endpoints>`
:param primary_location: Gets the location of the primary for the storage
account.
:type primary_location: str
:param status_of_primary: Gets the status indicating whether the primary
location of the storage account is available or unavailable. Possible
values include: 'Available', 'Unavailable'
:type status_of_primary: str or :class:`AccountStatus
<Fixtures.AcceptanceTestsStorageManagementClient.models.AccountStatus>`
:param last_geo_failover_time: Gets the timestamp of the most recent
instance of a failover to the secondary location. Only the most recent
timestamp is retained. This element is not returned if there has never
been a failover instance. Only available if the accountType is
StandardGRS or StandardRAGRS.
:type last_geo_failover_time: datetime
:param secondary_location: Gets the location of the geo replicated
secondary for the storage account. Only available if the accountType is
StandardGRS or StandardRAGRS.
:type secondary_location: str
:param status_of_secondary: Gets the status indicating whether the
secondary location of the storage account is available or unavailable.
Only available if the accountType is StandardGRS or StandardRAGRS.
Possible values include: 'Available', 'Unavailable'
:type status_of_secondary: str or :class:`AccountStatus
<Fixtures.AcceptanceTestsStorageManagementClient.models.AccountStatus>`
:param creation_time: Gets the creation date and time of the storage
account in UTC.
:type creation_time: datetime
:param custom_domain: Gets the user assigned custom domain assigned to
this storage account.
:type custom_domain: :class:`CustomDomain
<Fixtures.AcceptanceTestsStorageManagementClient.models.CustomDomain>`
:param secondary_endpoints: Gets the URLs that are used to perform a
retrieval of a public blob, queue or table object from the secondary
location of the storage account. Only available if the accountType is
StandardRAGRS.
:type secondary_endpoints: :class:`Endpoints
<Fixtures.AcceptanceTestsStorageManagementClient.models.Endpoints>`
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'ProvisioningState'},
'account_type': {'key': 'properties.accountType', 'type': 'AccountType'},
'primary_endpoints': {'key': 'properties.primaryEndpoints', 'type': 'Endpoints'},
'primary_location': {'key': 'properties.primaryLocation', 'type': 'str'},
'status_of_primary': {'key': 'properties.statusOfPrimary', 'type': 'AccountStatus'},
'last_geo_failover_time': {'key': 'properties.lastGeoFailoverTime', 'type': 'iso-8601'},
'secondary_location': {'key': 'properties.secondaryLocation', 'type': 'str'},
'status_of_secondary': {'key': 'properties.statusOfSecondary', 'type': 'AccountStatus'},
'creation_time': {'key': 'properties.creationTime', 'type': 'iso-8601'},
'custom_domain': {'key': 'properties.customDomain', 'type': 'CustomDomain'},
'secondary_endpoints': {'key': 'properties.secondaryEndpoints', 'type': 'Endpoints'},
}
def __init__(self, location, tags=None, provisioning_state=None, account_type=None, primary_endpoints=None, primary_location=None, status_of_primary=None, last_geo_failover_time=None, secondary_location=None, status_of_secondary=None, creation_time=None, custom_domain=None, secondary_endpoints=None):
super(StorageAccount, self).__init__(location=location, tags=tags)
self.provisioning_state = provisioning_state
self.account_type = account_type
self.primary_endpoints = primary_endpoints
self.primary_location = primary_location
self.status_of_primary = status_of_primary
self.last_geo_failover_time = last_geo_failover_time
self.secondary_location = secondary_location
self.status_of_secondary = status_of_secondary
self.creation_time = creation_time
self.custom_domain = custom_domain
self.secondary_endpoints = secondary_endpoints
| mit | 619,175,889,139,228,500 | 51.439024 | 305 | 0.686512 | false |
NSLS-II-CHX/ipython_ophyd | startup/80-pseudomotors.py | 2 | 1216 | from ophyd import (EpicsMotor, PseudoPositioner, PseudoSingle)
from ophyd import Component as Cpt
class SamplePositioner(PseudoPositioner):
'''
Maintains an offset between a master/slave set of positioners
such that the slave movement is the negative of the master's relative
motion (i.e. maintains constant, negative, relative offset).
Assumes that the user has adjusted the axes to their initial positions.
Example
-------------
psamp_x = SamplePositioner(prefix='', name='psamp_x', concurrent=True)
'''
physical_sample_holder = Cpt(EpicsMotor, 'XF:11IDB-ES{Dif-Ax:XH}Mtr')
beamstop_x = Cpt(EpicsMotor, 'XF:11IDB-OP{BS:Samp-Ax:X}Mtr')
sample_holder = Cpt(PseudoSingle, limits=(0, 0))
def forward(self, pos):
"pos is a self.PseudoPosition"
delta = pos - pos.sample_holder.pos
return self.RealPosition(physical_sample_holder=pos.sample_holder,
beamstop_x=self.beamstop_x.position - delta)
def inverse(self, pos):
"pos is self.RealPosition"
return self.PseudoPosition(sample_holder=pos.physical_sample_holder)
psamp_x = SamplePositioner(prefix='', name='psamp_x', concurrent=True)
| bsd-2-clause | -6,584,200,962,176,377,000 | 38.225806 | 77 | 0.683388 | false |
qsantos/spyce | spyce/coordinates.py | 1 | 3209 | import math
class CelestialCoordinates:
"""Celestial coordinates of a point or a direction
Celestial coordinates are spherical coordinates. The referential is thus
given by an origin, a fundamental plane and a primary direction.
* ecliptic coordinates: planet center, ecliptic, vernal equinox
* equatorial coordinates: planet center, celestial equator, vernal equinox
Note that the vernal equinox, the celestial equator and the ecliptic are
respectively the northward equinox, the equatorial plane and the orbital
plane *of the Earth*. It does mean that the direction of the north pole of
Mars is given within a referential centered on Mars, oriented with Earth.
"""
# the obliquity of the ecliptic is Earth's obliquity (tilt); that is, the
# angle between the celestial equator (Earth's equatorial plane) and the
# ecliptic (Earth's orbital plane)
obliquity_of_the_ecliptic = 0.40910517666747087
def __init__(self, right_ascension, declination, ecliptic_longitude,
ecliptic_latitude, distance):
"""Use from_equatorial() or from_ecliptic()"""
self.right_ascension = right_ascension
self.declination = declination
self.ecliptic_longitude = ecliptic_longitude
self.ecliptic_latitude = ecliptic_latitude
self.distance = distance
@classmethod
def from_equatorial(cls, right_ascension, declination, distance=math.inf):
"""Locate an object from its equatorial coordinates (see class doc)
If distance is omitted, it is assumed to be infinite; the coordinates
then refer either to a point infinitely far away, or to a direction.
"""
e = cls.obliquity_of_the_ecliptic
ecliptic_longitude = math.atan(
(
math.sin(right_ascension) * math.cos(e) +
math.tan(declination) * math.sin(e)
)
/ math.cos(right_ascension)
)
ecliptic_latitude = math.asin(
math.sin(declination) * math.cos(e) -
math.cos(declination) * math.sin(e) * math.sin(right_ascension)
)
return cls(right_ascension, declination, ecliptic_longitude,
ecliptic_latitude, distance)
@classmethod
def from_ecliptic(cls, ecliptic_longitude, ecliptic_latitude,
distance=math.inf):
"""Locate an object from its ecliptic coordinates (see class doc)
If distance is omitted, it is assumed to be infinite; the coordinates
then refer either to a point infinitely far away, or to a direction.
"""
e = cls.obliquity_of_the_ecliptic
right_ascension = math.atan(
(
math.sin(ecliptic_longitude) * math.cos(e) -
math.tan(ecliptic_latitude) * math.sin(e)
) / math.cos(ecliptic_longitude)
)
declination = math.asin(
math.sin(ecliptic_latitude) * math.cos(e) +
math.cos(ecliptic_latitude) * math.sin(e) *
math.sin(ecliptic_longitude)
)
return cls(right_ascension, declination, ecliptic_longitude,
ecliptic_latitude, distance)
| gpl-3.0 | 9,017,531,885,622,832,000 | 41.223684 | 78 | 0.644438 | false |
krzychb/rtd-test-bed | tools/tiny-test-fw/TinyFW.py | 1 | 8904 | # Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Interface for test cases. """
import os
import time
import traceback
import functools
import socket
from datetime import datetime
import junit_xml
import Env
import DUT
import App
import Utility
class DefaultEnvConfig(object):
"""
default test configs. There're 3 places to set configs, priority is (high -> low):
1. overwrite set by caller of test method
2. values set by test_method decorator
3. default env config get from this class
"""
DEFAULT_CONFIG = {
"app": App.BaseApp,
"dut": DUT.BaseDUT,
"env_tag": "default",
"env_config_file": None,
"test_suite_name": None,
}
@classmethod
def set_default_config(cls, **kwargs):
"""
:param kwargs: configs need to be updated
:return: None
"""
cls.DEFAULT_CONFIG.update(kwargs)
@classmethod
def get_default_config(cls):
"""
:return: current default config
"""
return cls.DEFAULT_CONFIG.copy()
set_default_config = DefaultEnvConfig.set_default_config
get_default_config = DefaultEnvConfig.get_default_config
MANDATORY_INFO = {
"execution_time": 1,
"env_tag": "default",
"category": "function",
"ignore": False,
}
class JunitReport(object):
# wrapper for junit test report
# TODO: JunitReport methods are not thread safe (although not likely to be used this way).
JUNIT_FILE_NAME = "XUNIT_RESULT.xml"
JUNIT_DEFAULT_TEST_SUITE = "test-suite"
JUNIT_TEST_SUITE = junit_xml.TestSuite(JUNIT_DEFAULT_TEST_SUITE,
hostname=socket.gethostname(),
timestamp=datetime.utcnow().isoformat())
JUNIT_CURRENT_TEST_CASE = None
_TEST_CASE_CREATED_TS = 0
@classmethod
def output_report(cls, junit_file_path):
""" Output current test result to file. """
with open(os.path.join(junit_file_path, cls.JUNIT_FILE_NAME), "w") as f:
cls.JUNIT_TEST_SUITE.to_file(f, [cls.JUNIT_TEST_SUITE], prettyprint=False)
@classmethod
def get_current_test_case(cls):
"""
By default, the test framework will handle junit test report automatically.
While some test case might want to update some info to test report.
They can use this method to get current test case created by test framework.
:return: current junit test case instance created by ``JunitTestReport.create_test_case``
"""
return cls.JUNIT_CURRENT_TEST_CASE
@classmethod
def test_case_finish(cls, test_case):
"""
Append the test case to test suite so it can be output to file.
Execution time will be automatically updated (compared to ``create_test_case``).
"""
test_case.elapsed_sec = time.time() - cls._TEST_CASE_CREATED_TS
cls.JUNIT_TEST_SUITE.test_cases.append(test_case)
@classmethod
def create_test_case(cls, name):
"""
Extend ``junit_xml.TestCase`` with:
1. save create test case so it can be get by ``get_current_test_case``
2. log create timestamp, so ``elapsed_sec`` can be auto updated in ``test_case_finish``.
:param name: test case name
:return: instance of ``junit_xml.TestCase``
"""
# set stdout to empty string, so we can always append string to stdout.
# It won't affect output logic. If stdout is empty, it won't be put to report.
test_case = junit_xml.TestCase(name, stdout="")
cls.JUNIT_CURRENT_TEST_CASE = test_case
cls._TEST_CASE_CREATED_TS = time.time()
return test_case
@classmethod
def update_performance(cls, performance_items):
"""
Update performance results to ``stdout`` of current test case.
:param performance_items: a list of performance items. each performance item is a key-value pair.
"""
assert cls.JUNIT_CURRENT_TEST_CASE
for item in performance_items:
cls.JUNIT_CURRENT_TEST_CASE.stdout += "[{}]: {}\n".format(item[0], item[1])
def test_method(**kwargs):
"""
decorator for test case function.
The following keyword arguments are pre-defined.
Any other keyword arguments will be regarded as filter for the test case,
able to access them by ``case_info`` attribute of test method.
:keyword app: class for test app. see :doc:`App <App>` for details
:keyword dut: class for current dut. see :doc:`DUT <DUT>` for details
:keyword env_tag: name for test environment, used to select configs from config file
:keyword env_config_file: test env config file. usually will not set this keyword when define case
:keyword test_suite_name: test suite name, used for generating log folder name and adding xunit format test result.
usually will not set this keyword when define case
:keyword junit_report_by_case: By default the test fw will handle junit report generation.
In some cases, one test function might test many test cases.
If this flag is set, test case can update junit report by its own.
"""
def test(test_func):
case_info = MANDATORY_INFO.copy()
case_info["name"] = case_info["ID"] = test_func.__name__
case_info["junit_report_by_case"] = False
case_info.update(kwargs)
@functools.wraps(test_func)
def handle_test(extra_data=None, **overwrite):
"""
create env, run test and record test results
:param extra_data: extra data that runner or main passed to test case
:param overwrite: args that runner or main want to overwrite
:return: None
"""
# create env instance
env_config = DefaultEnvConfig.get_default_config()
for key in kwargs:
if key in env_config:
env_config[key] = kwargs[key]
env_config.update(overwrite)
env_inst = Env.Env(**env_config)
# prepare for xunit test results
junit_file_path = env_inst.app_cls.get_log_folder(env_config["test_suite_name"])
junit_test_case = JunitReport.create_test_case(case_info["ID"])
result = False
try:
Utility.console_log("starting running test: " + test_func.__name__, color="green")
# execute test function
test_func(env_inst, extra_data)
# if finish without exception, test result is True
result = True
except Exception as e:
# handle all the exceptions here
traceback.print_exc()
# log failure
junit_test_case.add_failure_info(str(e) + ":\r\n" + traceback.format_exc())
finally:
# do close all DUTs, if result is False then print DUT debug info
close_errors = env_inst.close(dut_debug=(not result))
# We have a hook in DUT close, allow DUT to raise error to fail test case.
# For example, we don't allow DUT exception (reset) during test execution.
# We don't want to implement in exception detection in test function logic,
# as we need to add it to every test case.
# We can implement it in DUT receive thread,
# and raise exception in DUT close to fail test case if reset detected.
if close_errors:
for error in close_errors:
junit_test_case.add_failure_info(str(error))
result = False
if not case_info["junit_report_by_case"]:
JunitReport.test_case_finish(junit_test_case)
# end case and output result
JunitReport.output_report(junit_file_path)
if result:
Utility.console_log("Test Succeed: " + test_func.__name__, color="green")
else:
Utility.console_log(("Test Fail: " + test_func.__name__), color="red")
return result
handle_test.case_info = case_info
handle_test.test_method = True
return handle_test
return test
| apache-2.0 | 4,582,388,539,374,974,000 | 37.713043 | 119 | 0.615004 | false |
open-geotechnical/ogt-ags-py | ogtgui/xwidgets.py | 1 | 19365 | # -*- coding: utf-8 -*-
"""
@author: Peter Morgan <[email protected]>
"""
from Qt import Qt, QtCore, QtGui, pyqtSignal
from img import Ico
DEFAULT_SPACING = 0
DEFAULT_MARGIN = 0
DEFAULT_BUTTON_WIDTH = 80
#=====================================================
# Layouts
def hlayout(spacing=DEFAULT_SPACING, margin=DEFAULT_MARGIN):
"""Convenience function to create a QHBoxLayout"""
lay = QtGui.QHBoxLayout()
if isinstance(margin, bool):
margin = DEFAULT_SPACING
if isinstance(spacing, bool):
spacing = DEFAULT_SPACING
lay.setContentsMargins(margin, margin, margin, margin)
lay.setSpacing(spacing)
return lay
def vlayout(spacing=DEFAULT_SPACING, margin=DEFAULT_MARGIN):
"""Convenience function to create a QVBoxLayout"""
lay = QtGui.QVBoxLayout()
if isinstance(margin, bool):
margin = DEFAULT_SPACING
if isinstance(spacing, bool):
spacing = DEFAULT_SPACING
lay.setContentsMargins(margin, margin, margin, margin)
lay.setSpacing(spacing)
return lay
class XLabel(QtGui.QLabel):
def __init__( self, parent=None, style=None, align=None,
text=None, tooltip=None, bold=False, width=None):
QtGui.QLabel.__init__( self, parent )
self.set_bold(bold)
if text:
self.setText(text)
if tooltip:
self.setToolTip(tooltip)
if align != None:
self.setAlignment(align)
if style:
self.setStyleSheet(style)
def set_bold(self, state):
f = self.font()
f.setBold(state)
self.setFont(f)
def deadlabel(txt, align=Qt.AlignLeft, bold=False, style=None):
"""Convenience function to create a QLabel"""
lbl = QtGui.QLabel()
lbl.setText(txt)
lbl.setAlignment(align)
if bold:
f = lbl.font()
f.setBold(True)
lbl.setFont(f)
if style:
lbl.setStyleSheet(style)
return lbl
class XToolButton(QtGui.QToolButton):
def __init__( self, parent=None, both=True, ico=None,
popup=False, autoRaise=True, menu=False, disabled=False,
text=None, tooltip=None, bold=False,
callback=None, toggledCallback=None,
checkable=None, checked=None,
width=None):
QtGui.QToolButton.__init__( self, parent )
self.setAutoRaise(autoRaise)
self.setDisabled(disabled)
if width:
self.setFixedWidth(width)
if both:
self.setToolButtonStyle( Qt.ToolButtonTextBesideIcon)
else:
self.setToolButtonStyle( Qt.ToolButtonIconOnly)
if checkable != None:
self.setCheckable(checkable)
if checked != None:
self.setChecked(checked)
if callback:
self.clicked.connect(callback)
if toggledCallback:
self.toggled.connect(toggledCallback)
if tooltip:
self.setToolTip(tooltip)
if text:
self.setText(text)
if bold:
self.setBold(True)
if ico:
self.set_ico(ico)
if popup:
self.setPopupMode(QtGui.QToolButton.InstantPopup)
if menu:
self.setMenu(QtGui.QMenu())
def set_ico(self, ico):
self.setIcon(Ico.icon(ico))
def setBold(self, state=True):
f = self.font()
f.setBold(True)
self.setFont(f)
def set_bg(self, color):
self.setStyleSheet("background-color: %s" % color)
class XTableWidgetItem(QtGui.QTableWidgetItem):
"""Extended QTableWidgetItem with convenience functions"""
def __init__(self):
QtGui.QTableWidgetItem.__init__(self)
def set(self, text=None, bold=False, bg=None, fg=None, align=None, check=None):
if text:
self.setText(text)
if bold:
self.set_bold(True)
if bg:
self.set_bg(bg)
if fg:
self.set_bg(fg)
if align:
self.setTextAlignment(align)
if check != None:
self.setCheckState(check)
def set_bold(self, state):
f = self.font()
f.setBold(state)
self.setFont(f)
def set_bg(self, bg_color):
colo = QtGui.QColor()
colo.setNamedColor(bg_color)
self.setBackgroundColor(colo)
def set_fg(self, bg_color):
colo = QtGui.QColor()
colo.setNamedColor(bg_color)
self.setForeground(colo)
class XTreeWidgetItem(QtGui.QTreeWidgetItem):
"""Extended QTableWidgetItem with convenience functions"""
def __init__(self):
QtGui.QTreeWidgetItem.__init__(self)
def set(self, cidx, text=None, bold=False, bg=None, fg=None, align=None, check=None, ico=None):
if text:
self.setText(cidx, str(text))
if bold:
self.set_bold(cidx, True)
if bg:
self.set_bg(cidx, bg)
if fg:
self.set_fg(cidx, fg)
if align:
self.setTextAlignment(cidx, align)
if check != None:
self.setCheckState(cidx, check)
if ico:
self.set_ico(cidx, ico)
def set_bold(self, cidx, state):
f = self.font(cidx)
f.setBold(state)
self.setFont(cidx, f)
def set_bg(self,cidx, bg_color):
colo = QtGui.QColor()
colo.setNamedColor(bg_color)
self.setBackgroundColor(cidx, colo)
def set_fg(self, cidx, bg_color):
colo = QtGui.QColor()
colo.setNamedColor(bg_color)
self.setForeground(cidx, colo)
def set_ico(self, cidx, ico):
self.setIcon(cidx, Ico.icon(ico))
def i(self, cidx):
try:
return int(str(self.text(cidx)))
except:
return None
class GroupHBox(QtGui.QGroupBox):
def __init__(self, parent=None):
QtGui.QGroupBox.__init__(self, parent)
self.layout = QtGui.QHBoxLayout()
self.setLayout(self.layout)
def setContentsMargins(self, a,b,c,d):
self.layout.setContentsMargins(a,b,c,d)
def addWidget(self, widget, stretch=0):
self.layout.addWidget(widget, stretch)
def addLayout(self, widget, stretch=0):
self.layout.addLayout(widget, stretch)
def addStretch(self, stretch):
self.layout.addStretch(stretch)
class GroupVBox(QtGui.QGroupBox):
def __init__(self, parent=None, bold=False):
QtGui.QGroupBox.__init__(self, parent)
self.layout = QtGui.QVBoxLayout()
self.layout.setSpacing(0)
self.setLayout(self.layout)
def setContentsMargins(self, a,b,c,d):
self.layout.setContentsMargins(a,b,c,d)
def addLabel(self, txt):
lbl = QtGui.QLabel()
lbl.setText(txt)
lbl.setStyleSheet("font-family: monospace; font-size: 8pt; color: #666666; background-color: #efefef; padding: 3px;")
self.layout.addWidget(lbl)
def addWidget(self, widget, stretch=0):
self.layout.addWidget(widget, stretch)
def addLayout(self, widget, stretch=0):
self.layout.addLayout(widget, stretch)
def addSpacing(self, s):
self.layout.addSpacing( s)
def addStretch(self, stretch):
self.layout.addStretch(stretch)
def setSpacing(self, x):
self.layout.setSpacing(x)
class GroupGridBox(QtGui.QGroupBox):
def __init__(self, parent=None):
QtGui.QGroupBox.__init__(self, parent)
self.grid = QtGui.QGridLayout()
self.setLayout(self.grid)
class StandardItem( QtGui.QStandardItem ):
def __init__( self):
QtGui.QStandardItem.__init__( self )
#super(QtGui.QStandardItem, self).__init__()
self.setEditable(False)
def set_bg(self, color):
self.setBackground( G.colors.to_object( color ) )
def set_fg(self, color):
self.setForeground( G.colors.to_object( color ) )
def set_bold( self, bold):
font = self.font()
font.setBold( bold )
self.setFont( font )
def set_font_size( self, size):
font = self.font()
font.setPointSize(size)
self.setFont( font )
def setIco(self, ico):
self.setIcon(Ico.icon(ico))
def setText(self, txt, align=None, ico=None, icon=None, bold=False):
self.set(txt, align=align, ico=ico, icon=icon, bold=bold)
def set(self, txt, align=None, ico=None, icon=None, bold=False, font=None, bg=None):
QtGui.QStandardItem.setText(self, str(txt))
if align:
self.setTextAlignment( align)
if ico:
self.setIco(ico)
if icon:
self.setIcon(icon)
self.set_bold(bold)
if font:
self.set_font_family(font)
if bg:
self.set_bg(bg)
def set_font_family( self, fam):
font = self.font( )
font.setFamily( fam )
self.setFont( font )
def s(self):
return str(self.text())
def i(self):
x, ok = self.text().toInt()
if not ok:
return None
return x
def b(self):
return str(self.text()) == "1"
def ds(self):
return str(self.data().toString())
def lbl_checked(self, val, bg_color=None ):
self.setTextAlignment( QtCore.Qt.AlignCenter)
if bg_color == None:
bg_color = "#FFECAA"
if bool(val):
self.setText( "Yes")
self.setData("1")
self.set_bg( bg_color )
else:
self.setText( "-")
self.setData("")
self.set_bg( "#efefef")
class ClearButton( QtGui.QToolButton ):
def __init__( self, parent, callback=None ):
QtGui.QToolButton.__init__( self )
self.setIcon( Ico.icon( Ico.Clear ) )
self.setToolTip("Clear")
self.setToolButtonStyle( QtCore.Qt.ToolButtonIconOnly )
self.setAutoRaise(True)
self.setFixedWidth(16)
if callback:
self.connect(self, QtCore.SIGNAL("clicked()"), callback)
class IconLabel(QtGui.QLabel):
def __init__( self, parent=None, ico=None):
QtGui.QLabel.__init__( self, parent)
self.setContentsMargins(5,0,0,0)
#img_file_path = G.settings.image_path( "/missc/arrow_left_down.gif" )
icon = Ico.icon(ico)
self.setPixmap( icon.pixmap(QtCore.QSize( 16, 16 )) )
self.setFixedWidth(20)
class LNTextEdit(QtGui.QFrame):
"""Text widget with support for line numbers
https://nachtimwald.com/2009/08/19/better-qplaintextedit-with-line-numbers/
"""
class NumberBar(QtGui.QWidget):
def __init__(self, edit):
QtGui.QWidget.__init__(self, edit)
self.edit = edit
self.adjustWidth(1)
def paintEvent(self, event):
self.edit.numberbarPaint(self, event)
QtGui.QWidget.paintEvent(self, event)
def adjustWidth(self, count):
width = self.fontMetrics().width(unicode(count))
if self.width() != width:
self.setFixedWidth(width)
def updateContents(self, rect, scroll):
if scroll:
self.scroll(0, scroll)
else:
# It would be nice to do
# self.update(0, rect.y(), self.width(), rect.height())
# But we can't because it will not remove the bold on the
# current line if word wrap is enabled and a new block is
# selected.
self.update()
class PlainTextEdit(QtGui.QPlainTextEdit):
def __init__(self, *args):
QtGui.QPlainTextEdit.__init__(self, *args)
# self.setFrameStyle(QFrame.NoFrame)
self.setFrameStyle(QtGui.QFrame.NoFrame)
self.highlight()
# self.setLineWrapMode(QPlainTextEdit.NoWrap)
self.setWordWrapMode(QtGui.QTextOption.NoWrap)
self.setLineWrapMode(QtGui.QPlainTextEdit.NoWrap)
self.setStyleSheet("font-family: monospace")
self.cursorPositionChanged.connect(self.highlight)
def highlight(self):
hi_selection = QtGui.QTextEdit.ExtraSelection()
hi_selection.format.setBackground(self.palette().alternateBase())
hi_selection.format.setProperty(QtGui.QTextFormat.FullWidthSelection, QtCore.QVariant(True))
hi_selection.cursor = self.textCursor()
hi_selection.cursor.clearSelection()
self.setExtraSelections([hi_selection])
def numberbarPaint(self, number_bar, event):
font_metrics = self.fontMetrics()
current_line = self.document().findBlock(self.textCursor().position()).blockNumber() + 1
block = self.firstVisibleBlock()
line_count = block.blockNumber()
painter = QtGui.QPainter(number_bar)
painter.fillRect(event.rect(), self.palette().base())
# Iterate over all visible text blocks in the document.
while block.isValid():
line_count += 1
block_top = self.blockBoundingGeometry(block).translated(self.contentOffset()).top()
# Check if the position of the block is out side of the visible
# area.
if not block.isVisible() or block_top >= event.rect().bottom():
break
# We want the line number for the selected line to be bold.
if line_count == current_line:
font = painter.font()
font.setBold(True)
painter.setFont(font)
else:
font = painter.font()
font.setBold(False)
painter.setFont(font)
# Draw the line number right justified at the position of the line.
paint_rect = QtCore.QRect(0, block_top, number_bar.width(), font_metrics.height())
painter.drawText(paint_rect, Qt.AlignRight, unicode(line_count))
block = block.next()
painter.end()
def __init__(self, *args):
QtGui.QFrame.__init__(self, *args)
self.setFrameStyle(QtGui.QFrame.StyledPanel | QtGui.QFrame.Sunken)
self.edit = self.PlainTextEdit()
self.number_bar = self.NumberBar(self.edit)
hbox = QtGui.QHBoxLayout(self)
hbox.setSpacing(0)
hbox.setMargin(0)
hbox.addWidget(self.number_bar)
hbox.addWidget(self.edit)
self.edit.blockCountChanged.connect(self.number_bar.adjustWidth)
self.edit.updateRequest.connect(self.number_bar.updateContents)
def getText(self):
return unicode(self.edit.toPlainText())
def setText(self, text):
self.edit.setPlainText(text)
def isModified(self):
return self.edit.document().isModified()
def setModified(self, modified):
self.edit.document().setModified(modified)
def setLineWrapMode(self, mode):
self.edit.setLineWrapMode(mode)
class ToolBarGroup(QtGui.QWidget):
def __init__(self, parent=None, title=None, width=None, hide_labels=False,
is_group=False, toggle_icons=False, toggle_callback=None):
QtGui.QWidget.__init__(self, parent)
if width:
self.setFixedWidth(width)
self.icon_on = Ico.FilterOn
self.icon_off = Ico.FilterOff
self.toggle_icons = toggle_icons
self.toggle_callback = toggle_callback
self.hide_labels = hide_labels
self.buttonGroup = None
self.is_group = is_group
if self.is_group:
self.buttonGroup = QtGui.QButtonGroup()
self.buttonGroup.setExclusive(True)
if self.toggle_callback:
self.buttonGroup.buttonClicked.connect(self.on_button_clicked)
self.group_var = None
self.callback = None
self.show_icons = True
self.icon_size = 12
self.bg_color = '#333333'
## Main Layout
mainLayout = QtGui.QVBoxLayout()
mainLayout.setContentsMargins(0, 0, 0, 0)
mainLayout.setSpacing(0)
self.setLayout(mainLayout)
## Label
self.label = QtGui.QLabel()
bg = "#8F8F8F" ##eeeeee"
fg = "#eeeeee" ##333333"
lbl_sty = "background: %s; " % bg # qlineargradient(x1: 0, y1: 0, x2: 1, y2: 0, stop: 0 #fefefe, stop: 1 #CECECE);"
lbl_sty += " color: %s; font-size: 8pt; padding: 1px;" % fg # border: 1px outset #cccccc;"
self.label.setStyleSheet(lbl_sty)
self.label.setAlignment(QtCore.Qt.AlignCenter)
mainLayout.addWidget(self.label)
## Toolbar
self.toolbar = QtGui.QToolBar()
self.toolbar.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.toolbar.setFixedHeight(30)
mainLayout.addWidget(self.toolbar)
if title:
self.set_title(title)
def set_title(self, title):
self.label.setText("%s" % title)
def addWidget(self, widget):
self.toolbar.addWidget(widget)
return widget
def addAction(self, act):
self.toolbar.addAction(act)
def addButton(self, ico=None, text=None, callback=None, idx=None, toggle_callback=None, tooltip=None,
ki=None, bold=False, checkable=False, checked=None, width=None, return_action=False):
butt = QtGui.QToolButton()
if self.is_group:
if idx != None:
self.buttonGroup.addButton(butt, idx)
else:
self.buttonGroup.addButton(butt)
if self.hide_labels == False:
if text != None:
butt.setText(text)
if text == None:
butt.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)
else:
butt.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
if tooltip:
butt.setToolTip(tooltip)
if self.toggle_icons:
butt.setIconSize(QtCore.QSize(10, 10))
butt.setIcon(Ico.icon(self.icon_off))
if ico:
butt.setIcon(Ico.icon(ico))
butt.setIconSize(QtCore.QSize(10, 10))
butt.setCheckable(checkable)
if checked != None:
butt.setChecked(checked)
butt.setProperty("ki", ki)
nuAct = self.toolbar.addWidget(butt)
if callback:
self.connect(butt, QtCore.SIGNAL("clicked()"), callback)
#if toggle_callback:
# self.connect(butt, QtCore.SIGNAL("toggled(bool)"), toggle_callback)
if bold:
self.set_bold(butt)
if width:
butt.setFixedWidth(width)
self.on_button_clicked(block=True)
if return_action:
return nuAct
return butt
def set_bold(self, w):
f = w.font()
f.setBold(True)
w.setFont(f)
def on_button_clicked(self, butt=None, block=False):
if self.is_group:
for b in self.buttonGroup.buttons():
b.setIcon( Ico.icon(self.icon_on if b.isChecked() else self.icon_off) )
if block == False and b.isChecked():
if self.toggle_callback:
self.toggle_callback(self.buttonGroup.id(b))
def get_id(self):
id = self.buttonGroup.checkedId()
if id == -1:
return None
return id
| gpl-2.0 | -6,770,655,444,690,900,000 | 27.688889 | 125 | 0.582804 | false |
boada/ICD | sandbox/plot_snippets/p1.py | 1 | 1504 | #!/usr/bin/env python
# File: plot_icd_vs_mass.py
# Created on: Mon 12 Mar 2012 11:50:09 AM CDT
# Last Change: Fri Sep 28 15:47:02 2012
# Purpose of script: <+INSERT+>
# Author: Steven Boada
import pylab as pyl
from mk_galaxy_struc import mk_galaxy_struc
def plot_icd_vs_mass():
galaxies = mk_galaxy_struc()
# Add the figures
# Mass vs ICD plot I-H
f1 = pyl.figure(1,figsize=(6,4))
f1s1 = f1.add_subplot(211)
#Upper and Lower limit arrow verts
arrowup_verts = [[0.,0.], [-1., -1], [0.,0.], [0.,-2.],[0.,0.], [1, -1]]
arrowdown_verts = [[0.,0.], [-1., 1], [0.,0.], [0.,2.],[0.,0.], [1, 1]]
for i in range(len(galaxies)):
if galaxies[i].ston_I > 30.0:
f1s1.scatter(galaxies[i].Mass, galaxies[i].ICD_IH,
c='#F28500', s=50, zorder=2)
#Add upper limit arrows
if galaxies[i].ICD_IH > 0.25:
f1s1.scatter(galaxies[i].Mass,0.25,s=100,marker=None,
verts=arrowup_verts)
if galaxies[i].ICD_IH < -0.05:
f1s1.scatter(galaxies[i].Mass,-0.05,s=100,marker=None,
verts=arrowdown_verts)
############
# FIGURE 1 #
############
pyl.figure(1)
f1s1.set_xscale('log')
f1s1.set_xlim(3e7,1e12)
f1s1.set_ylim(-0.05,0.25)
f1s1.hlines(0.0,3e7,1e12)
# labels
f1s1.set_xlabel(r"Mass [$M_{\odot}]$")
f1s1.set_ylabel(r"$\xi[I,H]$")
# pyl.show()
return f1s1
if __name__=='__main__':
plot_icd_vs_mass()
| mit | 7,918,454,572,545,530,000 | 25.857143 | 76 | 0.539894 | false |
nlu90/heron | heron/tools/cli/src/python/main.py | 4 | 12421 | #!/usr/bin/env python2.7
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' main.py '''
import argparse
import atexit
import getpass
import os
import shutil
import sys
import time
import traceback
import heron.common.src.python.utils.log as log
import heron.tools.common.src.python.utils.config as config
import heron.tools.cli.src.python.cdefs as cdefs
import heron.tools.cli.src.python.cliconfig as cliconfig
import heron.tools.cli.src.python.help as cli_help
import heron.tools.cli.src.python.activate as activate
import heron.tools.cli.src.python.deactivate as deactivate
import heron.tools.cli.src.python.kill as kill
import heron.tools.cli.src.python.result as result
import heron.tools.cli.src.python.restart as restart
import heron.tools.cli.src.python.submit as submit
import heron.tools.cli.src.python.update as update
import heron.tools.cli.src.python.version as version
import heron.tools.cli.src.python.config as hconfig
from heron.tools.cli.src.python.opts import cleaned_up_files
Log = log.Log
HELP_EPILOG = '''Getting more help:
heron help <command> Prints help and options for <command>
For detailed documentation, go to http://heronstreaming.io'''
# pylint: disable=protected-access,superfluous-parens
class _HelpAction(argparse._HelpAction):
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
# retrieve subparsers from parser
subparsers_actions = [
action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)
]
# there will probably only be one subparser_action,
# but better save than sorry
for subparsers_action in subparsers_actions:
# get all subparsers and print help
for choice, subparser in subparsers_action.choices.items():
print("Subparser '{}'".format(choice))
print(subparser.format_help())
return
################################################################################
def get_command_handlers():
'''
Create a map of command names and handlers
'''
return {
'activate': activate,
'config': hconfig,
'deactivate': deactivate,
'help': cli_help,
'kill': kill,
'restart': restart,
'submit': submit,
'update': update,
'version': version
}
################################################################################
def create_parser(command_handlers):
'''
Main parser
:return:
'''
parser = argparse.ArgumentParser(
prog='heron',
epilog=HELP_EPILOG,
formatter_class=config.SubcommandHelpFormatter,
add_help=True)
subparsers = parser.add_subparsers(
title="Available commands",
metavar='<command> <options>')
command_list = sorted(command_handlers.items())
for command in command_list:
command[1].create_parser(subparsers)
return parser
################################################################################
def run(handlers, command, parser, command_args, unknown_args):
'''
Run the command
:param command:
:param parser:
:param command_args:
:param unknown_args:
:return:
'''
if command in handlers:
return handlers[command].run(command, parser, command_args, unknown_args)
else:
err_context = 'Unknown subcommand: %s' % command
return result.SimpleResult(result.Status.InvocationError, err_context)
def cleanup(files):
'''
:param files:
:return:
'''
for cur_file in files:
if os.path.isdir(cur_file):
shutil.rmtree(cur_file)
else:
shutil.rmtree(os.path.dirname(cur_file))
################################################################################
def check_environment():
'''
Check whether the environment variables are set
:return:
'''
if not config.check_java_home_set():
sys.exit(1)
if not config.check_release_file_exists():
sys.exit(1)
################################################################################
# pylint: disable=unused-argument
def server_deployment_mode(command, parser, cluster, cl_args):
'''
check the server deployment mode for the given cluster
if it is valid return the valid set of args
:param cluster:
:param cl_args:
:return:
'''
# Read the cluster definition, if not found
client_confs = cdefs.read_server_mode_cluster_definition(cluster, cl_args)
if not client_confs[cluster]:
return dict()
# tell the user which definition that we are using
if not cl_args.get('service_url', None):
Log.debug("Using cluster definition from file %s" \
% cliconfig.get_cluster_config_file(cluster))
else:
Log.debug("Using cluster service url %s" % cl_args['service_url'])
# if cluster definition exists, but service_url is not set, it is an error
if not 'service_url' in client_confs[cluster]:
config_file = cliconfig.get_cluster_config_file(cluster)
Log.error('No service url for %s cluster in %s', cluster, config_file)
sys.exit(1)
# get overrides
if 'config_property' in cl_args:
pass
try:
cluster_role_env = (cl_args['cluster'], cl_args['role'], cl_args['environ'])
config.server_mode_cluster_role_env(cluster_role_env, client_confs)
cluster_tuple = config.defaults_cluster_role_env(cluster_role_env)
except Exception as ex:
Log.error("Argument cluster/[role]/[env] is not correct: %s", str(ex))
sys.exit(1)
new_cl_args = dict()
new_cl_args['cluster'] = cluster_tuple[0]
new_cl_args['role'] = cluster_tuple[1]
new_cl_args['environ'] = cluster_tuple[2]
new_cl_args['service_url'] = client_confs[cluster]['service_url'].rstrip('/')
new_cl_args['deploy_mode'] = config.SERVER_MODE
cl_args.update(new_cl_args)
return cl_args
################################################################################
# pylint: disable=superfluous-parens
def direct_deployment_mode(command, parser, cluster, cl_args):
'''
check the direct deployment mode for the given cluster
if it is valid return the valid set of args
:param command:
:param parser:
:param cluster:
:param cl_args:
:return:
'''
cluster = cl_args['cluster']
try:
config_path = cl_args['config_path']
override_config_file = config.parse_override_config_and_write_file(cl_args['config_property'])
except KeyError:
# if some of the arguments are not found, print error and exit
subparser = config.get_subparser(parser, command)
print(subparser.format_help())
return dict()
# check if the cluster config directory exists
if not cdefs.check_direct_mode_cluster_definition(cluster, config_path):
Log.error("Cluster config directory \'%s\' does not exist", config_path)
return dict()
config_path = config.get_heron_cluster_conf_dir(cluster, config_path)
if not os.path.isdir(config_path):
Log.error("Cluster config directory \'%s\' does not exist", config_path)
return dict()
Log.info("Using cluster definition in %s" % config_path)
try:
cluster_role_env = (cl_args['cluster'], cl_args['role'], cl_args['environ'])
config.direct_mode_cluster_role_env(cluster_role_env, config_path)
cluster_tuple = config.defaults_cluster_role_env(cluster_role_env)
except Exception as ex:
Log.error("Argument cluster/[role]/[env] is not correct: %s", str(ex))
return dict()
new_cl_args = dict()
new_cl_args['cluster'] = cluster_tuple[0]
new_cl_args['role'] = cluster_tuple[1]
new_cl_args['environ'] = cluster_tuple[2]
new_cl_args['config_path'] = config_path
new_cl_args['override_config_file'] = override_config_file
new_cl_args['deploy_mode'] = config.DIRECT_MODE
cl_args.update(new_cl_args)
return cl_args
################################################################################
def deployment_mode(command, parser, cl_args):
# first check if it is server mode
new_cl_args = server_deployment_mode(command, parser, cl_args['cluster'], cl_args)
if len(new_cl_args) > 0:
return new_cl_args
# now check if it is direct mode
new_cl_args = direct_deployment_mode(command, parser, cl_args['cluster'], cl_args)
if len(new_cl_args) > 0:
return new_cl_args
return dict()
################################################################################
def extract_common_args(command, parser, cl_args):
'''
Extract all the common args for all commands
:param command:
:param parser:
:param cl_args:
:return:
'''
try:
cluster_role_env = cl_args.pop('cluster/[role]/[env]')
except KeyError:
try:
cluster_role_env = cl_args.pop('cluster') # for version command
except KeyError:
# if some of the arguments are not found, print error and exit
subparser = config.get_subparser(parser, command)
print(subparser.format_help())
return dict()
new_cl_args = dict()
cluster_tuple = config.get_cluster_role_env(cluster_role_env)
new_cl_args['cluster'] = cluster_tuple[0]
new_cl_args['role'] = cluster_tuple[1]
new_cl_args['environ'] = cluster_tuple[2]
new_cl_args['submit_user'] = getpass.getuser()
cl_args.update(new_cl_args)
return cl_args
################################################################################
def execute(handlers, local_commands):
'''
Run the command
:return:
'''
# verify if the environment variables are correctly set
check_environment()
# create the argument parser
parser = create_parser(handlers)
# if no argument is provided, print help and exit
if len(sys.argv[1:]) == 0:
parser.print_help()
return 0
# insert the boolean values for some of the options
sys.argv = config.insert_bool_values(sys.argv)
try:
# parse the args
args, unknown_args = parser.parse_known_args()
except ValueError as ex:
Log.error("Error while parsing arguments: %s", str(ex))
Log.debug(traceback.format_exc())
sys.exit(1)
command_line_args = vars(args)
# set log level
log.set_logging_level(command_line_args)
Log.debug("Input Command Line Args: %s", command_line_args)
# command to be execute
command = command_line_args['subcommand']
is_local_command = command in local_commands
if command == 'version':
results = run(handlers, command, parser, command_line_args, unknown_args)
return 0 if result.is_successful(results) else 1
if not is_local_command:
log.set_logging_level(command_line_args)
Log.debug("Input Command Line Args: %s", command_line_args)
# determine the mode of deployment
command_line_args = extract_common_args(command, parser, command_line_args)
command_line_args = deployment_mode(command, parser, command_line_args)
# bail out if args are empty
if not command_line_args:
return 1
# register dirs cleanup function during exit
if command_line_args['deploy_mode'] == config.DIRECT_MODE and command != "version":
cleaned_up_files.append(command_line_args['override_config_file'])
atexit.register(cleanup, cleaned_up_files)
# print the input parameters, if verbose is enabled
Log.debug("Processed Command Line Args: %s", command_line_args)
start = time.time()
results = run(handlers, command, parser, command_line_args, unknown_args)
if not is_local_command:
result.render(results)
end = time.time()
if not is_local_command:
sys.stdout.flush()
Log.debug('Elapsed time: %.3fs.', (end - start))
return 0 if result.is_successful(results) else 1
def main():
# Create a map of supported commands and handlers
command_handlers = get_command_handlers()
# Execute
local_commands = ('help', 'version', 'config')
return execute(command_handlers, local_commands)
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | 4,054,049,120,241,187,000 | 30.767263 | 98 | 0.656469 | false |
etombini/wlister | tests/wlister-items.py | 1 | 9806 | try:
import unittest2 as unittest
except ImportError:
import unittest
import requests
import json
import telnetlib
class ProxyTest(unittest.TestCase):
def test_flask_ok(self):
r = requests.get('http://localhost:5000')
self.assertEqual(r.status_code, 200,
'HTTP target service is not running')
def test_proxy_ok(self):
r = requests.get('http://localhost/')
self.assertEqual(r.status_code, 200,
'Proxy service is not running properly')
def test_dummy_uri(self):
r = requests.get('http://localhost/abc')
self.assertEqual(r.status_code, 404,
'Filtering service not working')
class AttributesTest(unittest.TestCase):
def test_method_ok(self):
r = requests.get("http://localhost/method_get/")
self.assertEqual(r.status_code, 200)
def test_method_ko(self):
content = {"var1": "val1", "var2": "val2"}
r = requests.post('http://localhost/method_get/', data=content)
self.assertEqual(r.status_code, 404)
def test_protocol_ok(self):
t = telnetlib.Telnet('localhost', 80)
t.write('GET /protocol/ HTTP/1.1\n')
t.write('Host: localhost\n')
t.write('Accept: */*\n')
t.write('Accept-Encoding: gzip,deflate,compress\n')
t.write('\n\n')
r = t.read_all()
t.close()
self.assertEqual(int(r[9:12]), 200)
def test_protocol_ko(self):
t = telnetlib.Telnet('localhost', 80)
t.write('GET /protocol/ HTTP/1.0\n')
# t.write('Host: localhost\n')
t.write('Accept: */*\n')
t.write('Accept-Encoding: gzip,deflate,compress\n\n')
r = t.read_all()
t.close()
self.assertEqual(int(r[9:12]), 404)
def test_uri_ok(self):
r = requests.get("http://localhost/uri/")
self.assertEqual(r.status_code, 200)
def test_uri_ko(self):
r = requests.get("http://localhost/uri_ko/")
self.assertEqual(r.status_code, 404)
def test_args_ok(self):
r = requests.get("http://localhost/args/?var1=val1")
self.assertEqual(r.status_code, 200)
def test_args_ko(self):
r = requests.get("http://localhost/args/?var1=val1&var2=val2&var3=val3")
self.assertEqual(r.status_code, 404)
class PrerequisiteTest(unittest.TestCase):
def test_prerequisite_ko(self):
r = requests.get('http://localhost/never_matching_tag/')
self.assertEqual(r.status_code, 404)
def test_prerequisite_ok(self):
r = requests.get('http://localhost/matching_tag/')
self.assertEqual(r.status_code, 200)
class ParametersTest(unittest.TestCase):
def test_parameters_ok_same_order(self):
r = requests.get('http://localhost/parameters?var1=val1&var2=val2')
self.assertEqual(r.status_code, 200)
def test_parameters_ok_different_order(self):
r = requests.get('http://localhost/parameters?var2=val2&var1=val1')
self.assertEqual(r.status_code, 200)
def test_parameters_ko_wrong_value(self):
r = requests.get('http://localhost/parameters' +
'?var1=ValueNotExpected&var2=val2')
self.assertEqual(r.status_code, 404)
def test_parameters_ko_less_parameter(self):
r = requests.get('http://localhost/parameters?var2=val2')
self.assertEqual(r.status_code, 404)
def test_parameters_ko_more_parameter(self):
r = requests.get('http://localhost/parameters' +
'?var1=val1&var2=val2&UnexpectedParameter=whatever')
self.assertEqual(r.status_code, 404)
def test_parameters_ko_duplicated(self):
r = requests.get('http://localhost/parameters' +
'?var1=val1&var2=val2&var1=val1')
self.assertEqual(r.status_code, 404)
class ContentUrlEncodedTest(unittest.TestCase):
def test_content_url_encoded_ok_same_order(self):
content = {"var1": "val1", "var2": "val2"}
r = requests.post('http://localhost/post/', data=content)
self.assertEqual(r.status_code, 200)
def test_content_url_encoded_ok_different_order(self):
content = {"var2": "val2", "var1": "val1"}
r = requests.post('http://localhost/post/', data=content)
self.assertEqual(r.status_code, 200)
def test_content_url_encoded_ko_less_parameter(self):
content = {"var1": "val1"}
r = requests.post('http://localhost/post/', data=content)
self.assertEqual(r.status_code, 404)
def test_content_url_encoded_ko_more_parameter(self):
content = {"var1": "val1", "var2": "val2",
"UnexpectedParamter": "whatever"}
r = requests.post('http://localhost/post/', data=content)
self.assertEqual(r.status_code, 404)
def test_content_url_encoded_ko_wrong_value(self):
content = {"var1": "UnexpectedValue", "var2": "val2"}
r = requests.post('http://localhost/post/', data=content)
self.assertEqual(r.status_code, 404)
def test_content_url_encoded_ko_wrong_value_too_large(self):
v = 'val1' * 10
content = {"var1": v, "var2": "val2"}
r = requests.post('http://localhost/post/', data=content)
self.assertEqual(r.status_code, 404)
class HeadersTest(unittest.TestCase):
def test_headers_ok(self):
h = {'header-test': 'test'}
r = requests.get('http://localhost/headers/', headers=h)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.content, "OK", "Response is not OK")
def test_headers_ko_wrong_value(self):
h = {'header-test': 'UnexpectedValue'}
r = requests.get('http://localhost/headers/', headers=h)
self.assertEqual(r.status_code, 404)
def test_headers_ko_duplicated_header(self):
t = telnetlib.Telnet('localhost', 80)
t.write('GET /headers/ HTTP/1.1\n')
t.write('Host: localhost\n')
t.write('Accept: */*\n')
t.write('User-Agent: python-requests/2.2.0 CPython/2.7.3 Linux/3.8.0-29-generic\n')
t.write('Accept-Encoding: gzip, deflate, compress\n')
t.write('header-test: test\n')
t.write('header-test: test\n\n')
r = t.read_all()
t.close()
self.assertEqual(int(r[9:12]), 404)
def test_headers_ko_less_header(self):
t = telnetlib.Telnet('localhost', 80)
t.write('GET /headers/ HTTP/1.1\n')
t.write('Host: localhost\n')
# t.write('Accept: */*\n')
t.write('User-Agent: python-requests/2.2.0 CPython/2.7.3 Linux/3.8.0-29-generic\n')
t.write('Accept-Encoding: gzip, deflate, compress\n')
t.write('header-test: test\n\n')
r = t.read_all()
t.close()
self.assertEqual(int(r[9:12]), 404)
def test_headers_ko_more_header(self):
t = telnetlib.Telnet('localhost', 80)
t.write('GET /headers/ HTTP/1.1\n')
t.write('Host: localhost\n')
t.write('Accept: */*\n')
t.write('Supernumerary: ouh yeah\n')
t.write('User-Agent: python-requests/2.2.0 CPython/2.7.3 Linux/3.8.0-29-generic\n')
t.write('Accept-Encoding: gzip, deflate, compress\n')
t.write('header-test: test\n\n')
r = t.read_all()
t.close()
self.assertEqual(int(r[9:12]), 404)
def test_headers_add_ok(self):
r = requests.get("http://localhost/set_header/")
self.assertEqual(r.status_code, 200)
class JSONTest(unittest.TestCase):
def test_json_ok(self):
content = {"var01": "val01", "var02": "val02"}
headers = {'content-type': 'application/json'}
r = requests.post("http://localhost/content_json/", data=json.dumps(content), headers=headers)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.content, "OK")
def test_json_ok_change_order(self):
content = '{"var01": "val01", "var02": "val02"}'
headers = {'content-type': 'application/json'}
r = requests.post("http://localhost/content_json/", data=content, headers=headers)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.content, "OK")
def test_json_ko_wrong_value(self):
content = {"var01": "val03", "var02": "val02"}
headers = {'content-type': 'application/json'}
r = requests.post("http://localhost/content_json/", data=json.dumps(content), headers=headers)
self.assertEqual(r.status_code, 404)
def test_json_ko_too_many_values(self):
content = {"var01": "val01", "var02": "val02", "var03": "val03"}
headers = {'content-type': 'application/json'}
r = requests.post("http://localhost/content_json/", data=json.dumps(content), headers=headers)
self.assertEqual(r.status_code, 404)
def test_json_ko_not_enough_values(self):
content = {"var01": "val01"}
headers = {'content-type': 'application/json'}
r = requests.post("http://localhost/content_json/", data=json.dumps(content), headers=headers)
self.assertEqual(r.status_code, 404)
def test_json_ko_wrong_parameters(self):
content = {"var01": "val01", "var03": "val02"}
headers = {'content-type': 'application/json'}
r = requests.post("http://localhost/content_json/", data=json.dumps(content), headers=headers)
self.assertEqual(r.status_code, 404)
class LogTest(unittest.TestCase):
def test_log_request(self):
r = requests.get("http://localhost/logging/?test=tata&test=toto")
self.assertEqual(r.status_code, 200)
def test_log_request_post(self):
content = {"var01": "val01", "var03": "val02"}
r = requests.post("http://localhost/logging/?test=tata&test=toto", data=content)
self.assertEqual(r.status_code, 200)
| isc | 7,142,214,089,603,350,000 | 36.715385 | 102 | 0.610239 | false |
trabucayre/gnuradio | gr-blocks/python/blocks/qa_selector.py | 2 | 5882 | #!/usr/bin/env python
#
# Copyright 2019 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gr_unittest, blocks
class test_selector(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_select_same(self):
src_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
expected_result = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
expected_drop = ()
num_inputs = 4; num_outputs = 4
input_index = 1; output_index = 2
op = blocks.selector(gr.sizeof_char, input_index, output_index)
src = []
dst = []
for ii in range(num_inputs):
src.append( blocks.vector_source_b(src_data))
self.tb.connect(src[ii], (op,ii))
for jj in range(num_outputs):
dst.append(blocks.vector_sink_b())
self.tb.connect((op,jj),dst[jj])
self.tb.run()
dst_data = dst[output_index].data()
self.assertEqual(expected_result, dst_data)
def test_select_input(self):
num_inputs = 4; num_outputs = 4
input_index = 1; output_index = 2
op = blocks.selector(gr.sizeof_char, input_index, output_index)
src = []
dst = []
for ii in range(num_inputs):
src_data = [ii+1]*10
src.append( blocks.vector_source_b(src_data))
self.tb.connect(src[ii], (op,ii))
for jj in range(num_outputs):
dst.append(blocks.vector_sink_b())
self.tb.connect((op,jj),dst[jj])
self.tb.run()
expected_result = [input_index+1]*10
dst_data = list(dst[output_index].data())
self.assertEqual(expected_result, dst_data)
def test_dump(self):
num_inputs = 4; num_outputs = 4
input_index = 1; output_index = 2
output_not_selected = 3
op = blocks.selector(gr.sizeof_char, input_index, output_index)
src = []
dst = []
for ii in range(num_inputs):
src_data = [ii+1]*10
src.append( blocks.vector_source_b(src_data))
self.tb.connect(src[ii], (op,ii))
for jj in range(num_outputs):
dst.append(blocks.vector_sink_b())
self.tb.connect((op,jj),dst[jj])
self.tb.run()
expected_result = []
dst_data = list(dst[output_not_selected].data())
self.assertEqual(expected_result, dst_data)
def test_not_enabled (self):
src_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
expected_result = []
num_inputs = 4; num_outputs = 4
input_index = 1; output_index = 2
op = blocks.selector(gr.sizeof_char, input_index, output_index)
op.set_enabled(False)
src = []
dst = []
for ii in range(num_inputs):
src.append( blocks.vector_source_b(src_data))
self.tb.connect(src[ii], (op,ii))
for jj in range(num_outputs):
dst.append(blocks.vector_sink_b())
self.tb.connect((op,jj),dst[jj])
self.tb.run()
dst_data = dst[output_index].data()
self.assertEqual(expected_result, dst_data)
# These tests cannot be run as set_index can only be called after check_topology is called
# def test_set_indices(self):
# num_inputs = 4; num_outputs = 4
# input_index = 1; output_index = 2
# op = blocks.selector(gr.sizeof_char, 0, 0)
# src = []
# dst = []
# for ii in range(num_inputs):
# src_data = [ii+1]*10
# src.append( blocks.vector_source_b(src_data))
# self.tb.connect(src[ii], (op,ii))
# for jj in range(num_outputs):
# dst.append(blocks.vector_sink_b())
# self.tb.connect((op,jj),dst[jj])
# op.set_input_index(input_index)
# op.set_output_index(output_index)
# self.tb.run()
# expected_result = [input_index+1]*10
# dst_data = list(dst[output_index].data())
# self.assertEqual(expected_result, dst_data)
# def test_dont_set_indices(self):
# num_inputs = 4; num_outputs = 4
# input_index = 1; output_index = 2
# op = blocks.selector(gr.sizeof_char, 0, 0)
# #op.set_input_index(input_index)
# #op.set_output_index(output_index)
# src = []
# dst = []
# for ii in range(num_inputs):
# src_data = [ii+1]*10
# src.append( blocks.vector_source_b(src_data))
# self.tb.connect(src[ii], (op,ii))
# for jj in range(num_outputs):
# dst.append(blocks.vector_sink_b())
# self.tb.connect((op,jj),dst[jj])
# self.tb.run()
# expected_result = [input_index+1]*10
# dst_data = list(dst[output_index].data())
# self.assertNotEqual(expected_result, dst_data)
def test_float_vector(self):
num_inputs = 4; num_outputs = 4
input_index = 1; output_index = 2
veclen = 3
op = blocks.selector(gr.sizeof_float*veclen, input_index, output_index)
src = []
dst = []
for ii in range(num_inputs):
src_data = [float(ii)+1]*10*veclen
src.append( blocks.vector_source_f(src_data, repeat=False, vlen=veclen))
self.tb.connect(src[ii], (op,ii))
for jj in range(num_outputs):
dst.append(blocks.vector_sink_f(vlen=veclen))
self.tb.connect((op,jj),dst[jj])
self.tb.run()
expected_result = [float(input_index)+1]*10*veclen
dst_data = list(dst[output_index].data())
self.assertEqual(expected_result, dst_data)
if __name__ == '__main__':
gr_unittest.run(test_selector)
| gpl-3.0 | -6,100,212,019,102,513,000 | 26.876777 | 94 | 0.539442 | false |
chrinide/FrequentPatternMining | code/timing.py | 2 | 2942 | #!/usr/bin/env python2.6
######################################################################
# timing.py
######################################################################
# A script for comparative timings of FP algorithms.
######################################################################
# For license information, see LICENSE file
# For copyright information, see COPYRIGHT file
######################################################################
import sys
from timeit import Timer
from dataset import Dataset, NumericalDataset, VerticalDataset
from fp_mining import aprioriPatterns, fpGrowthPatterns, eclatPatterns
if len(sys.argv) < 2:
print "Usage: {0} [output_file] [runs] [trials]".format(sys.argv[0])
sys.exit(-1)
######################################################################
# Configuration
######################################################################
file_out = sys.argv[1]
filepath = '../data/'
tiny_files = ['tiny.dat']
files = ['chess_tiny.dat','chess_small.dat','chess.dat']
tiny_sizes = [1,2]
sizes = [1,2,3,4,5]
tiny_runs = 1000
runs = 1
trials = 1
if len(sys.argv) > 2:
runs = int(sys.argv[2])
runs = runs if runs > 0 else 1
if len(sys.argv) > 3:
trials = int(sys.argv[3])
trials = trials if trials > 0 else 1
support_percents = [.1,.2,.3,.4,.5]
######################################################################
# Logic
######################################################################
class Incrementor(object):
def __init__(self,value=0):
self.i = value
def retInc(self):
old = self.i
self.i += 1
return old
i = Incrementor()
out_file = open(file_out,'a')
def timePatterns(ds,k,min_sup,runs):
timers = {}
timers['apriori'] = Timer(lambda: aprioriPatterns(ds,k,min_sup))
timers['fp-growth'] = Timer(lambda: fpGrowthPatterns(ds,k,min_sup))
#timers['eclat'] = Timer(lambda: eclatPatterns(ds,k,min_sup))
for key in timers.keys():
timer = timers[key]
print >> out_file, "{0},{1},{2},{3},{4},{5},{6}".format(\
i.retInc(),key,len(ds),k,min_sup,runs,timer.timeit(runs))
for _ in range(trials):
# run tiny tests
for tiny_file in tiny_files:
ds = Dataset()
with open(filepath + tiny_file,'rU') as f:
ds.readFromFile(f)
for size in tiny_sizes:
for prct in support_percents:
min_sup = int(prct * float(len(ds)))
timePatterns(ds,size,min_sup,tiny_runs)
out_file.flush()
# run tests
if runs < 1:
continue
for filename in files:
ds = Dataset()
with open(filepath + filename,'rU') as f:
ds.readFromFile(f)
for size in sizes:
for prct in support_percents:
min_sup = int(prct * float(len(ds)))
timePatterns(ds,size,min_sup,runs)
out_file.flush()
| isc | -6,962,409,095,318,493,000 | 27.563107 | 72 | 0.482325 | false |
SphinxKnight/kuma | docs/conf.py | 2 | 8785 | # -*- coding: utf-8 -*-
#
# Kuma documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 5 15:41:51 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# import sys, os
from collections import OrderedDict
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Kuma'
copyright = u'Mozilla'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'latest'
# The full version, including alpha/beta/rc tags.
release = 'latest'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["."]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'github_user': 'mozilla',
'github_repo': 'kuma',
'github_button': False,
'description': ('The platform that powers '
'<a href="https://developer.mozilla.org/en-US/">MDN</a>'),
'travis_button': False,
'codecov_button': False,
'extra_nav_links': OrderedDict((
('MDN', 'https://developer.mozilla.org'),
('MDN Staging', 'https://developer.allizom.org'),
('Kuma on GitHub', 'https://github.com/mozilla/kuma'),
('KumaScript on GitHub', 'https://github.com/mdn/kumascript'),
)),
'show_related': True,
'page_width': '100%'
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Kuma Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Kumadoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Kuma.tex', u'Kuma Documentation',
u'Mozilla', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then top-level headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'kuma', u'Kuma Documentation',
[u'Mozilla'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Kuma', u'Kuma Documentation',
u'Mozilla', 'Kuma',
'The Django based project of developer.mozilla.org.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| mpl-2.0 | -2,913,056,370,596,294,700 | 30.945455 | 80 | 0.683779 | false |
moxon6/chemlab | build/lib/chemlab/io/handlers/mol.py | 5 | 1539 | '''Mol file handler'''
import numpy as np
from .base import IOHandler
from ...core import Molecule
class MolIO(IOHandler):
'''Reader for MDL molfile
http://en.wikipedia.org/wiki/Chemical_table_file.
**Features**
.. method:: read("molecule")
Read the molecule in a :py:class:`~chemlab.core.Molecule`
instance.
'''
can_read = ['molecule']
def read(self, feature):
self.check_feature(feature, "read")
if feature == 'molecule':
string = self.fd.read().decode('utf-8')
return parse_mol_string(string)
def parse_mol_string(string):
lines = string.splitlines()
# lines 0-2 are header/comments
# line 3 is counting
natoms = int(lines[3][0:3])
nbonds = int(lines[3][3:6])
coords = []
types = []
bonds = []
bond_types = []
for i in range(natoms):
at_fields = lines[i + 4].split()
x, y, z, typ = at_fields[:4]
coords.append([float(x), float(y), float(z)])
types.append(typ)
offset = natoms + 4
for i in range(nbonds):
s = lines[offset + i][0:3]
e = lines[offset + i][3:6]
t = lines[offset + i][6:9]
bonds.append((int(s),int(e)))
bond_types.append(int(t))
mol = Molecule.from_arrays(r_array = np.array(coords)/10, # To nm
type_array = np.array(types))
mol.bonds = np.array(bonds) - 1
mol.bond_orders = np.array(bond_types)
return mol
| gpl-3.0 | -4,905,559,166,525,913,000 | 24.229508 | 69 | 0.54256 | false |
manahl/arctic | tests/unit/store/test_pandas_ndarray_store.py | 1 | 2759 | import numpy as np
from mock import Mock, sentinel, patch
from pytest import raises
# Do not remove PandasStore
from arctic.store._pandas_ndarray_store import PandasDataFrameStore, PandasPanelStore, PandasStore
from tests.util import read_str_as_pandas
def test_panel_converted_to_dataframe_and_stacked_to_write():
store = PandasPanelStore()
panel = Mock(shape=(1, 2, 3), axes=[Mock(names=['n%d' % i]) for i in range(3)])
panel.to_frame.return_value.dtypes = [sentinel.dtype]
with patch.object(PandasDataFrameStore, 'write') as mock_write:
with patch('arctic.store._pandas_ndarray_store.DataFrame') as DF:
store.write(sentinel.mlib, sentinel.version, sentinel.symbol, panel, sentinel.prev)
panel.to_frame.assert_called_with(filter_observations=False)
DF.assert_called_with(panel.to_frame.return_value.stack.return_value)
mock_write.assert_called_with(sentinel.mlib, sentinel.version, sentinel.symbol,
DF.return_value, sentinel.prev)
def test_panel_append_not_supported():
store = PandasPanelStore()
panel = Mock(shape=(1, 2, 3), axes=[Mock(names=['n%d' % i]) for i in range(3)], dtypes=['a'])
with raises(ValueError):
store.append(sentinel.mlib, sentinel.version, sentinel.symbol, panel, sentinel.prev)
def test_panel_converted_from_dataframe_for_reading():
store = PandasPanelStore()
with patch.object(PandasDataFrameStore, 'read') as mock_read:
res = store.read(sentinel.mlib, sentinel.version, sentinel.symbol)
mock_read.assert_called_with(sentinel.mlib, sentinel.version, sentinel.symbol)
assert res == mock_read.return_value.to_panel.return_value
def test_raises_upon_empty_panel_write():
store = PandasPanelStore()
panel = Mock(shape=(1, 0, 3))
with raises(ValueError):
store.write(sentinel.mlib, sentinel.version, sentinel.symbol, panel, sentinel.prev)
def test_read_multi_index_with_no_ts_info():
# github #81: old multi-index ts would not have tz info in metadata. Ensure read is not broken
df = read_str_as_pandas("""index 1 | index 2 | SPAM
2012-09-08 | 2015-01-01 | 1.0
2012-09-09 | 2015-01-02 | 1.1
2012-10-08 | 2015-01-03 | 2.0""", num_index=2)
store = PandasDataFrameStore()
record = store.SERIALIZER.serialize(df)[0]
# now take away timezone info from metadata
record = np.array(record.tolist(), dtype=np.dtype([('index 1', '<M8[ns]'), ('index 2', '<M8[ns]'), ('SPAM', '<f8')],
metadata={'index': ['index 1', 'index 2'], 'columns': ['SPAM']}))
assert store.SERIALIZER._index_from_records(record).equals(df.index)
| lgpl-2.1 | -7,561,901,930,065,368,000 | 47.403509 | 120 | 0.657122 | false |
JustinWingChungHui/okKindred | suggested_image_tagging/views.py | 2 | 2414 | from django.shortcuts import get_object_or_404
from rest_framework import viewsets
from rest_framework.exceptions import PermissionDenied, ParseError
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from suggested_image_tagging.models import SuggestedTag
from suggested_image_tagging.serializers import SuggestedTagSerializer
from image_tagging_api.serializers import TagSerializer
from message_queue.models import create_message
from common.utils import intTryParse
class SuggestedTagView(viewsets.GenericViewSet):
permission_classes = (IsAuthenticated,)
serializer_class = SuggestedTagSerializer
def get_queryset(self):
return SuggestedTag.objects.filter(image__family_id = self.request.user.family_id)
def list(self, request, *args, **kwargs):
'''
Lists suggested tags in users family.
Use query parameters ?image_id=<id> to filter by image
'''
queryset = SuggestedTag.objects.filter(image__family_id = self.request.user.family_id)
image_id = self.request.query_params.get('image_id', None)
if image_id is None:
raise ParseError('Invalid image_id')
queryset = queryset.filter(image_id=image_id)
serializer = SuggestedTagSerializer(queryset, many=True)
return Response(serializer.data)
def partial_update(self, request, pk=None):
'''
Converts suggested tag into a tag
'''
queryset = SuggestedTag.objects.filter(image__family_id = self.request.user.family_id)
suggested_tag = get_object_or_404(queryset, pk=pk)
person_id, person_id_valid = intTryParse(request.data.get("person_id"))
if not person_id_valid:
raise ParseError('Invalid person_id')
new_tag = suggested_tag.convertToTag(person_id)
# Send notification email
new_tag.send_tag_notification_email()
create_message('tag_converted_process', new_tag.id)
serializer = TagSerializer(new_tag)
return Response(serializer.data)
def destroy(self, request, pk=None):
'''
Deletes a suggested tag record
'''
queryset = SuggestedTag.objects.filter(image__family_id = self.request.user.family_id)
suggested_tag = get_object_or_404(queryset, pk=pk)
suggested_tag.delete()
return Response('OK')
| gpl-2.0 | 4,217,405,247,969,513,000 | 29.556962 | 94 | 0.690969 | false |
lbdreyer/iris | tools/generate_std_names.py | 4 | 3295 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
A script to convert the standard names information from the provided XML
file into a Python dictionary format.
Takes two arguments: the first is the XML file to process and the second
is the name of the file to write the Python dictionary file into.
By default, Iris will use the source XML file:
etc/cf-standard-name-table.xml
as obtained from:
http://cf-pcmdi.llnl.gov/documents/cf-standard-names
"""
import argparse
import pprint
import xml.etree.ElementTree as ET
STD_VALUES_FILE_TEMPLATE = '''
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
This file contains a dictionary of standard value names that are mapped
to another dictionary of other standard name attributes. Currently only
the `canonical_unit` exists in these attribute dictionaries.
This file is automatically generated. Do not edit this file by hand.
The file will be generated during a standard build/installation::
python setup.py build
python setup.py install
Also, the file can be re-generated in the source distribution via::
python setup.py std_names
Or for more control (e.g. to use an alternative XML file) via::
python tools/generate_std_names.py XML_FILE MODULE_FILE
"""
STD_NAMES = '''.lstrip()
def process_name_table(tree, element_name, *child_elements):
"""
Yields a series of dictionaries with the key being the id of the entry element and the value containing
another dictionary mapping other attributes of the standard name to their values, e.g. units, description, grib value etc.
"""
for elem in tree.iterfind(element_name):
sub_section = {}
for child_elem in child_elements:
found_elem = elem.find(child_elem)
sub_section[child_elem] = found_elem.text if found_elem is not None else None
yield {elem.get("id") : sub_section}
def to_dict(infile, outfile):
values = {}
aliases = {}
tree = ET.parse(infile)
for section in process_name_table(tree, 'entry', 'canonical_units'):
values.update(section)
for section in process_name_table(tree, 'alias', 'entry_id'):
aliases.update(section)
for key, valued in aliases.items():
values.update({
key : {'canonical_units' : values.get(valued['entry_id']).get('canonical_units')}
})
outfile.write(STD_VALUES_FILE_TEMPLATE + pprint.pformat(values))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Create Python code from CF standard name XML.')
parser.add_argument('input', metavar='INPUT',
help='Path to CF standard name XML')
parser.add_argument('output', metavar='OUTPUT',
help='Path to resulting Python code')
args = parser.parse_args()
encoding = {'encoding': 'utf-8'}
with open(args.input, 'r', **encoding) as in_fh:
with open(args.output, 'w', **encoding) as out_fh:
to_dict(in_fh, out_fh)
| lgpl-3.0 | 5,025,103,709,835,514,000 | 30.682692 | 126 | 0.688012 | false |
robwarm/gpaw-symm | gpaw/response/base.py | 1 | 21011 | import sys
from time import time, ctime
import numpy as np
from math import sqrt, pi
from datetime import timedelta
from ase.units import Hartree, Bohr
from gpaw import GPAW, extra_parameters
from gpaw.utilities import unpack, devnull
from gpaw.utilities.blas import gemmdot, gemv
from gpaw.mpi import world, rank, size, serial_comm
from gpaw.lfc import LocalizedFunctionsCollection as LFC
from gpaw.grid_descriptor import GridDescriptor
from gpaw.utilities.memory import maxrss
from gpaw.fd_operators import Gradient
from gpaw.response.cell import get_primitive_cell, set_Gvectors
from gpaw.response.math_func import delta_function, \
two_phi_planewave_integrals
from gpaw.response.parallel import set_communicator, \
parallel_partition, SliceAlongFrequency, SliceAlongOrbitals
from gpaw.response.kernel import calculate_Kxc, calculate_Kc, calculate_Kc_q
from gpaw.kpt_descriptor import KPointDescriptor
from gpaw.wavefunctions.pw import PWLFC
import gpaw.wavefunctions.pw as pw
class BASECHI:
"""This class is to store the basic common stuff for chi and bse."""
def __init__(self,
calc=None,
nbands=None,
w=None,
q=None,
eshift=None,
ecut=10.,
density_cut=None,
G_plus_q=False,
eta=0.2,
rpad=None,
ftol=1e-5,
txt=None,
optical_limit=False):
if rpad is None:
rpad = np.ones(3, int)
self.txtname = txt
self.output_init()
if isinstance(calc, str):
# Always use serial_communicator when a filename is given.
self.calc = GPAW(calc, communicator=serial_comm, txt=None)
else:
# To be optimized so that the communicator is loaded automatically
# according to kcommsize.
#
# so temporarily it is used like this :
# kcommsize = int (should <= world.size)
# r0 = rank % kcommsize
# ranks = np.arange(r0, r0+size, kcommsize)
# calc = GPAW(filename.gpw, communicator=ranks, txt=None)
self.calc = calc
if self.calc is not None:
self.pwmode = isinstance(self.calc.wfs, pw.PWWaveFunctions)
else:
self.pwmode = False
if self.pwmode:
assert self.calc.wfs.world.size == 1
self.nbands = nbands
self.q_c = q
# chi.py modifies the input array w by dividing by Hartree.
# This will change the user-supplied arrays in-place unless
# we create a copy. So now we create a copy. *Grumble*
#
# To make matters worse, w is allowed to be None (why not take
# care of that *before*?? This should really be cleaned up.
if isinstance(w, np.ndarray):
w = w.copy()
self.w_w = w
self.eta = eta
self.ftol = ftol
if isinstance(ecut, int) or isinstance(ecut, float):
self.ecut = np.ones(3) * ecut
else:
assert len(ecut) == 3
self.ecut = np.array(ecut, dtype=float)
self.density_cut = density_cut
self.G_plus_q = G_plus_q
self.rpad = rpad
self.optical_limit = optical_limit
if self.optical_limit:
self.qopt = 1e-5
self.eshift = eshift
def initialize(self):
self.eta /= Hartree
self.ecut /= Hartree
calc = self.calc
self.nspins = self.calc.wfs.nspins
# kpoint init
self.kd = kd = calc.wfs.kd
self.nikpt = kd.nibzkpts
self.ftol /= kd.nbzkpts
# cell init
self.acell_cv = calc.wfs.gd.cell_cv
self.acell_cv, self.bcell_cv, self.vol, self.BZvol = \
get_primitive_cell(self.acell_cv,rpad=self.rpad)
# grid init
gd = calc.wfs.gd.new_descriptor(comm=serial_comm)
self.pbc = gd.pbc_c
self.gd = gd
self.nG0 = np.prod(gd.N_c)
# Number of grid points and volume including zero padding
self.nGrpad = gd.N_c * self.rpad
self.nG0rpad = np.prod(self.nGrpad)
self.d_c = [Gradient(gd, i, n=4, dtype=complex).apply for i in range(3)]
# obtain eigenvalues, occupations
nibzkpt = kd.nibzkpts
kweight_k = kd.weight_k
self.eFermi = self.calc.occupations.get_fermi_level()
try:
self.e_skn
self.printtxt('Use eigenvalues from user.')
except:
self.printtxt('Use eigenvalues from the calculator.')
self.e_skn = {}
self.f_skn = {}
for ispin in range(self.nspins):
self.e_skn[ispin] = np.array([calc.get_eigenvalues(kpt=k, spin=ispin)
for k in range(nibzkpt)]) / Hartree
self.f_skn[ispin] = np.array([calc.get_occupation_numbers(kpt=k, spin=ispin)
/ kweight_k[k]
for k in range(nibzkpt)]) / kd.nbzkpts
#self.printtxt('Eigenvalues(k=0) are:')
#print >> self.txt, self.e_skn[0][0] * Hartree
self.enoshift_skn = {}
for ispin in range(self.nspins):
self.enoshift_skn[ispin] = self.e_skn[ispin].copy()
if self.eshift is not None:
self.add_discontinuity(self.eshift)
self.printtxt('Shift unoccupied bands by %f eV' % (self.eshift))
# k + q init
if self.q_c is not None:
self.qq_v = np.dot(self.q_c, self.bcell_cv) # summation over c
if self.optical_limit:
kq_k = np.arange(kd.nbzkpts)
self.expqr_g = 1.
else:
r_vg = gd.get_grid_point_coordinates() # (3, nG)
qr_g = gemmdot(self.qq_v, r_vg, beta=0.0)
self.expqr_g = np.exp(-1j * qr_g)
del r_vg, qr_g
kq_k = kd.find_k_plus_q(self.q_c)
self.kq_k = kq_k
# Plane wave init
if self.G_plus_q:
self.npw, self.Gvec_Gc, self.Gindex_G = set_Gvectors(self.acell_cv,
self.bcell_cv,
self.gd.N_c,
self.ecut,
q=self.q_c)
else:
self.npw, self.Gvec_Gc, self.Gindex_G = set_Gvectors(self.acell_cv,
self.bcell_cv,
self.gd.N_c,
self.ecut)
# band init
if self.nbands is None:
self.nbands = calc.wfs.bd.nbands
self.nvalence = calc.wfs.nvalence
# Projectors init
setups = calc.wfs.setups
self.spos_ac = calc.atoms.get_scaled_positions()
if self.pwmode:
self.pt = PWLFC([setup.pt_j for setup in setups], self.calc.wfs.pd)
self.pt.set_positions(self.spos_ac)
else:
self.pt = LFC(gd, [setup.pt_j for setup in setups],
KPointDescriptor(self.kd.bzk_kc),
dtype=complex, forces=True)
self.pt.set_positions(self.spos_ac)
# Printing calculation information
self.print_stuff()
return
def output_init(self):
if self.txtname is None:
if rank == 0:
self.txt = sys.stdout
else:
sys.stdout = devnull
self.txt = devnull
elif self.txtname == devnull:
self.txt = devnull
else:
assert type(self.txtname) is str
from ase.parallel import paropen
self.txt = paropen(self.txtname,'w')
def printtxt(self, text):
print >> self.txt, text
def print_stuff(self):
printtxt = self.printtxt
printtxt('')
printtxt('Parameters used:')
printtxt('')
printtxt('Unit cell (a.u.):')
printtxt(self.acell_cv)
printtxt('Volume of cell (a.u.**3) : %f' % self.vol)
printtxt('Reciprocal cell (1/a.u.)')
printtxt(self.bcell_cv)
printtxt('BZ volume (1/a.u.**3) : %f' % self.BZvol)
printtxt('Number of G-vectors / Grid : %d %s'
% (self.nG0, tuple(self.gd.N_c)))
printtxt('')
printtxt('Coulomb interaction cutoff : %s' % self.vcut)
printtxt('')
printtxt('Number of bands : %d' % self.nbands)
printtxt('Number of kpoints : %d' % self.kd.nbzkpts)
if self.ecut[0] == self.ecut[1] and self.ecut[0] == self.ecut[2]:
printtxt('Planewave ecut (eV) : %4.1f' % (self.ecut[0] * Hartree))
else:
printtxt('Planewave ecut (eV) : (%f, %f, %f)' % tuple(self.ecut * Hartree))
printtxt('Number of planewave used : %d' % self.npw)
printtxt('Broadening (eta) : %f' % (self.eta * Hartree))
printtxt('')
if self.q_c is not None:
if self.optical_limit:
printtxt('Optical limit calculation ! (q=1e-5)')
else:
printtxt('q in reduced coordinate : (%f %f %f)' % tuple(self.q_c))
printtxt('q in cartesian coordinate (1/A): (%f %f %f)' % tuple(self.qq_v / Bohr))
printtxt('|q| (1/A) : %f' % np.linalg.norm(self.qq_v / Bohr))
def timing(self, i, t0, n_local, txt):
if i == 0:
dt = time() - t0
self.totaltime = dt * n_local
self.printtxt(' Finished %s 0 in %s, estimate %s left.'
% (txt, timedelta(seconds=round(dt)),
timedelta(seconds=round(self.totaltime))))
if rank == 0 and n_local // 5 > 0:
if i > 0 and i % (n_local // 5) == 0:
dt = time() - t0
self.printtxt(' Finished %s %d in %s, estimate %s left.'
% (txt, i, timedelta(seconds=round(dt)),
timedelta(seconds=round(self.totaltime
- dt))))
def get_phi_aGp(self, q_c=None, parallel=True, alldir=False):
if q_c is None:
q_c = self.q_c
qq_v = self.qq_v
optical_limit = self.optical_limit
else:
optical_limit = False
if np.abs(q_c).sum() < 1e-8:
q_c = np.array([0.0001, 0, 0])
optical_limit = True
qq_v = np.dot(q_c, self.bcell_cv)
setups = self.calc.wfs.setups
spos_ac = self.calc.atoms.get_scaled_positions()
kk_Gv = gemmdot(q_c + self.Gvec_Gc, self.bcell_cv.copy(), beta=0.0)
phi_aGp = {}
phiG0_avp = {}
if parallel:
from gpaw.response.parallel import parallel_partition
npw, npw_local, Gstart, Gend = parallel_partition(
self.npw, self.comm.rank, self.comm.size, reshape=False)
else:
Gstart = 0
Gend = self.npw
for a, id in enumerate(setups.id_a):
phi_aGp[a] = two_phi_planewave_integrals(kk_Gv, setups[a], Gstart, Gend)
for iG in range(Gstart, Gend):
phi_aGp[a][iG] *= np.exp(-1j * 2. * pi *
np.dot(q_c + self.Gvec_Gc[iG], spos_ac[a]) )
if parallel:
self.comm.sum(phi_aGp[a])
# For optical limit, G == 0 part should change
if optical_limit:
for a, id in enumerate(setups.id_a):
nabla_iiv = setups[a].nabla_iiv
phi_aGp[a][0] = -1j * (np.dot(nabla_iiv, qq_v)).ravel()
phiG0_avp[a] = np.zeros((3, len(phi_aGp[a][0])), complex)
for dir in range(3): # 3 dimension
q2_c = np.diag((1,1,1))[dir] * self.qopt
qq2_v = np.dot(q2_c, self.bcell_cv) # summation over c
phiG0_avp[a][dir] = -1j * (np.dot(nabla_iiv, qq2_v)).ravel()
if alldir:
return phi_aGp, phiG0_avp
else:
return phi_aGp
def get_wavefunction(self, ibzk, n, check_focc=True, spin=0):
if (self.calc.wfs.world.size == 1 or self.calc.wfs.gd.comm.size != 1
or self.calc.input_parameters['mode'] == 'lcao'):
if not check_focc:
return
else:
psit_G = self.calc.wfs.get_wave_function_array(n, ibzk, spin)
if self.calc.wfs.world.size == 1:
return np.complex128(psit_G)
if self.calc.wfs.world.rank != 0:
psit_G = self.calc.wfs.gd.empty(dtype=self.calc.wfs.dtype,
global_array=True)
self.calc.wfs.world.broadcast(psit_G, 0)
return np.complex128(psit_G)
else:
# support ground state calculation with kpoint and band parallelization
# but domain decomposition must = 1
kpt_rank, u = self.calc.wfs.kd.get_rank_and_index(0, ibzk)
bzkpt_rank = self.kcomm.rank
band_rank, myn = self.calc.wfs.bd.who_has(n)
assert self.calc.wfs.gd.comm.size == 1
world_rank = (kpt_rank * self.calc.wfs.band_comm.size + band_rank)
# in the following, kpt_rank is assigned to world_rank
klist = np.array([world_rank, u, bzkpt_rank, myn])
klist_kcomm = np.zeros((self.kcomm.size, 4), dtype=int)
self.kcomm.all_gather(klist, klist_kcomm)
check_focc_global = np.zeros(self.kcomm.size, dtype=bool)
self.kcomm.all_gather(np.array([check_focc]), check_focc_global)
psit_G = self.calc.wfs.gd.empty(dtype=self.calc.wfs.dtype)
for i in range(self.kcomm.size):
if check_focc_global[i]:
kpt_rank, u, bzkpt_rank, nlocal = klist_kcomm[i]
if kpt_rank == bzkpt_rank:
if rank == kpt_rank:
psit_G = self.calc.wfs.kpt_u[u].psit_nG[nlocal]
else:
if rank == kpt_rank:
world.send(self.calc.wfs.kpt_u[u].psit_nG[nlocal],
bzkpt_rank, 1300+bzkpt_rank)
if rank == bzkpt_rank:
psit_G = self.calc.wfs.gd.empty(dtype=self.calc.wfs.dtype)
world.receive(psit_G, kpt_rank, 1300+bzkpt_rank)
self.wScomm.broadcast(psit_G, 0)
return psit_G
def add_discontinuity(self, shift):
for ispin in range(self.nspins):
for k in range(self.kd.nibzkpts):
for i in range(self.e_skn[0].shape[1]):
if self.e_skn[ispin][k,i] > self.eFermi:
self.e_skn[ispin][k,i] += shift / Hartree
def density_matrix(self, n, m, k, kq=None,
spin1=0, spin2=0, phi_aGp=None, Gspace=True):
gd = self.gd
kd = self.kd
optical_limit = False
if kq is None:
kq = self.kq_k[k]
expqr_g = self.expqr_g
q_v = self.qq_v
optical_limit = self.optical_limit
q_c = self.q_c
else:
q_c = kd.bzk_kc[kq] - kd.bzk_kc[k]
q_c[np.where(q_c>0.501)] -= 1
q_c[np.where(q_c<-0.499)] += 1
if (np.abs(q_c) < self.ftol).all():
optical_limit = True
q_c = self.q_c
q_v = np.dot(q_c, self.bcell_cv)
r_vg = gd.get_grid_point_coordinates() # (3, nG)
qr_g = gemmdot(q_v, r_vg, beta=0.0)
expqr_g = np.exp(-1j * qr_g)
if optical_limit:
expqr_g = 1
ibzkpt1 = kd.bz2ibz_k[k]
ibzkpt2 = kd.bz2ibz_k[kq]
psitold_g = self.get_wavefunction(ibzkpt1, n, True, spin=spin1)
psit1_g = kd.transform_wave_function(psitold_g, k)
psitold_g = self.get_wavefunction(ibzkpt2, m, True, spin=spin2)
psit2_g = kd.transform_wave_function(psitold_g, kq)
if Gspace is False:
return psit1_g.conj() * psit2_g * expqr_g
else:
tmp_g = psit1_g.conj()* psit2_g * expqr_g
# zero padding is included through the FFT
rho_g = np.fft.fftn(tmp_g, s=self.nGrpad) * self.vol / self.nG0rpad
# Here, planewave cutoff is applied
rho_G = rho_g.ravel()[self.Gindex_G]
if optical_limit:
dpsit_g = gd.empty(dtype=complex)
tmp = np.zeros((3), dtype=complex)
phase_cd = np.exp(2j * pi * gd.sdisp_cd * kd.bzk_kc[kq, :, np.newaxis])
for ix in range(3):
self.d_c[ix](psit2_g, dpsit_g, phase_cd)
tmp[ix] = gd.integrate(psit1_g.conj() * dpsit_g)
rho_G[0] = -1j * np.dot(q_v, tmp)
calc = self.calc
pt = self.pt
if not self.pwmode:
if calc.wfs.world.size > 1 or kd.nbzkpts == 1:
P1_ai = pt.dict()
pt.integrate(psit1_g, P1_ai, k)
P2_ai = pt.dict()
pt.integrate(psit2_g, P2_ai, kq)
else:
P1_ai = self.get_P_ai(k, n, spin1)
P2_ai = self.get_P_ai(kq, m, spin2)
else:
# first calculate P_ai at ibzkpt, then rotate to k
u = self.kd.get_rank_and_index(spin1, ibzkpt1)[1]
Ptmp_ai = pt.dict()
kpt = calc.wfs.kpt_u[u]
pt.integrate(kpt.psit_nG[n], Ptmp_ai, ibzkpt1)
P1_ai = self.get_P_ai(k, n, spin1, Ptmp_ai)
u = self.kd.get_rank_and_index(spin2, ibzkpt2)[1]
Ptmp_ai = pt.dict()
kpt = calc.wfs.kpt_u[u]
pt.integrate(kpt.psit_nG[m], Ptmp_ai, ibzkpt2)
P2_ai = self.get_P_ai(kq, m, spin2, Ptmp_ai)
if phi_aGp is None:
try:
if not self.mode == 'RPA':
if optical_limit:
iq = kd.where_is_q(np.zeros(3), self.bzq_qc)
else:
iq = kd.where_is_q(q_c, self.bzq_qc)
assert np.abs(self.bzq_qc[iq] - q_c).sum() < 1e-8
phi_aGp = self.load_phi_aGp(self.reader, iq) #phi_qaGp[iq]
except AttributeError:
phi_aGp = self.phi_aGp
for a, id in enumerate(self.calc.wfs.setups.id_a):
P_p = np.outer(P1_ai[a].conj(), P2_ai[a]).ravel()
phi_Gp = np.ascontiguousarray(phi_aGp[a], complex)
gemv(1.0, phi_Gp, P_p, 1.0, rho_G)
if optical_limit:
if n==m:
rho_G[0] = 1.
elif np.abs(self.e_skn[spin2][ibzkpt2, m] - self.e_skn[spin1][ibzkpt1, n]) < 1e-5:
rho_G[0] = 0.
else:
rho_G[0] /= (self.enoshift_skn[spin2][ibzkpt2, m] - self.enoshift_skn[spin1][ibzkpt1, n])
return rho_G
def get_P_ai(self, k, n, spin=0, Ptmp_ai=None):
calc = self.calc
kd = self.calc.wfs.kd
spos_ac = self.spos_ac
ibzkpt = kd.bz2ibz_k[k]
u = ibzkpt + kd.nibzkpts * spin
kpt = calc.wfs.kpt_u[u]
s = kd.sym_k[k]
time_reversal = kd.time_reversal_k[k]
P_ai = {}
for a, id in enumerate(calc.wfs.setups.id_a):
b = kd.symmetry.a_sa[s, a]
S_c = (np.dot(spos_ac[a], kd.symmetry.op_scc[s]) - kd.symmetry.ft_sc[s] - spos_ac[b])
#print abs(S_c.round() - S_c).max()
#print 'S_c', abs(S_c).max()
assert abs(S_c.round() - S_c).max() < 1e-8 ##############
k_c = kd.ibzk_kc[kpt.k]
x = np.exp(2j * pi * np.dot(k_c, S_c))
if Ptmp_ai is None:
P_i = np.dot(calc.wfs.setups[a].R_sii[s], kpt.P_ani[b][n]) * x
else:
P_i = np.dot(calc.wfs.setups[a].R_sii[s], Ptmp_ai[b]) * x
if time_reversal:
P_i = P_i.conj()
P_ai[a] = P_i
return P_ai
| gpl-3.0 | 7,385,269,783,080,632,000 | 38.718336 | 109 | 0.48527 | false |
rex-xxx/mt6572_x201 | build/tools/generate-notice-files.py | 9 | 6550 | #!/usr/bin/env python
#
# Copyright (C) 2012 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage: generate-notice-files [plain text output file] [html output file] [file title] [directory of notices]
Generate the Android notice files, including both text and html files.
-h to display this usage message and exit.
"""
from collections import defaultdict
import getopt
import hashlib
import itertools
import os
import os.path
import re
import sys
MD5_BLOCKSIZE = 1024 * 1024
HTML_ESCAPE_TABLE = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
try:
opts, args = getopt.getopt(sys.argv[1:], "h")
except getopt.GetoptError, err:
print str(err)
print __doc__
sys.exit(2)
for o, a in opts:
if o == "-h":
print __doc__
sys.exit(2)
else:
print >> sys.stderr, "unhandled option %s" % (o,)
if len(args) != 4:
print """need exactly four arguments, the two output files, the file title
and the directory containing notices, not %d""" % (len(args),)
print __doc__
sys.exit(1)
def hexify(s):
return ("%02x"*len(s)) % tuple(map(ord, s))
def md5sum(filename):
"""Calculate an MD5 of the file given by FILENAME,
and return hex digest as a string.
Output should be compatible with md5sum command"""
f = open(filename, "rb")
sum = hashlib.md5()
while 1:
block = f.read(MD5_BLOCKSIZE)
if not block:
break
sum.update(block)
f.close()
return hexify(sum.digest())
def html_escape(text):
"""Produce entities within text."""
return "".join(HTML_ESCAPE_TABLE.get(c,c) for c in text)
HTML_OUTPUT_CSS="""
<style type="text/css">
body { padding: 0; font-family: sans-serif; }
.same-license { background-color: #eeeeee; border-top: 20px solid white; padding: 10px; }
.label { font-weight: bold; }
.file-list { margin-left: 1em; color: blue; }
</style>
"""
def combine_notice_files_html(file_hash, input_dir, output_filename):
"""Combine notice files in FILE_HASH and output a HTML version to OUTPUT_FILENAME."""
SRC_DIR_STRIP_RE = re.compile(input_dir + "(/.*).txt")
# Set up a filename to row id table (anchors inside tables don't work in
# most browsers, but href's to table row ids do)
id_table = {}
id_count = 0
for value in file_hash.values():
for filename in value:
id_table[filename] = id_count
id_count += 1
# Open the output file, and output the header pieces
output_file = open(output_filename, "wb")
print >> output_file, "<html><head>"
print >> output_file, HTML_OUTPUT_CSS
print >> output_file, '</head><body topmargin="0" leftmargin="0" rightmargin="0" bottommargin="0">'
# Output our table of contents
print >> output_file, '<div class="toc">'
print >> output_file, "<ul>"
# Flatten the list of lists into a single list of filenames
sorted_filenames = sorted(itertools.chain.from_iterable(file_hash.values()))
# Print out a nice table of contents
for filename in sorted_filenames:
stripped_filename = SRC_DIR_STRIP_RE.sub(r"\1", filename)
print >> output_file, '<li><a href="#id%d">%s</a></li>' % (id_table.get(filename), stripped_filename)
print >> output_file, "</ul>"
print >> output_file, "</div><!-- table of contents -->"
# Output the individual notice file lists
print >>output_file, '<table cellpadding="0" cellspacing="0" border="0">'
for value in file_hash.values():
print >> output_file, '<tr id="id%d"><td class="same-license">' % id_table.get(value[0])
print >> output_file, '<div class="label">Notices for file(s):</div>'
print >> output_file, '<div class="file-list">'
for filename in sorted(value):
print >> output_file, "%s <br/>" % (SRC_DIR_STRIP_RE.sub(r"\1", filename))
print >> output_file, "</div><!-- file-list -->"
print >> output_file
print >> output_file, '<pre class="license-text">'
print >> output_file, html_escape(open(value[0]).read())
print >> output_file, "</pre><!-- license-text -->"
print >> output_file, "</td></tr><!-- same-license -->"
print >> output_file
print >> output_file
print >> output_file
# Finish off the file output
print >> output_file, "</table>"
print >> output_file, "</body></html>"
output_file.close()
def combine_notice_files_text(file_hash, input_dir, output_filename, file_title):
"""Combine notice files in FILE_HASH and output a text version to OUTPUT_FILENAME."""
SRC_DIR_STRIP_RE = re.compile(input_dir + "(/.*).txt")
output_file = open(output_filename, "wb")
print >> output_file, file_title
for value in file_hash.values():
print >> output_file, "============================================================"
print >> output_file, "Notices for file(s):"
for filename in sorted(value):
print >> output_file, SRC_DIR_STRIP_RE.sub(r"\1", filename)
print >> output_file, "------------------------------------------------------------"
print >> output_file, open(value[0]).read()
output_file.close()
def main(args):
txt_output_file = args[0]
html_output_file = args[1]
file_title = args[2]
# Find all the notice files and md5 them
input_dir = os.path.normpath(args[3])
files_with_same_hash = defaultdict(list)
for root, dir, files in os.walk(input_dir):
for file in files:
if file.endswith(".txt"):
filename = os.path.join(root, file)
file_md5sum = md5sum(filename)
files_with_same_hash[file_md5sum].append(filename)
print "Combining NOTICE files into HTML"
combine_notice_files_html(files_with_same_hash, input_dir, html_output_file)
print "Combining NOTICE files into text"
combine_notice_files_text(files_with_same_hash, input_dir, txt_output_file, file_title)
if __name__ == "__main__":
main(args)
| gpl-2.0 | 1,316,537,114,454,833,700 | 33.840426 | 109 | 0.619695 | false |
shubhamgupta123/erpnext | erpnext/setup/doctype/global_defaults/global_defaults.py | 16 | 2813 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
"""Global Defaults"""
import frappe
import frappe.defaults
from frappe.utils import cint
from frappe.custom.doctype.property_setter.property_setter import make_property_setter
keydict = {
# "key in defaults": "key in Global Defaults"
"fiscal_year": "current_fiscal_year",
'company': 'default_company',
'currency': 'default_currency',
"country": "country",
'hide_currency_symbol':'hide_currency_symbol',
'account_url':'account_url',
'disable_rounded_total': 'disable_rounded_total',
'disable_in_words': 'disable_in_words',
}
from frappe.model.document import Document
class GlobalDefaults(Document):
def on_update(self):
"""update defaults"""
for key in keydict:
frappe.db.set_default(key, self.get(keydict[key], ''))
# update year start date and year end date from fiscal_year
year_start_end_date = frappe.db.sql("""select year_start_date, year_end_date
from `tabFiscal Year` where name=%s""", self.current_fiscal_year)
if year_start_end_date:
ysd = year_start_end_date[0][0] or ''
yed = year_start_end_date[0][1] or ''
if ysd and yed:
frappe.db.set_default('year_start_date', ysd.strftime('%Y-%m-%d'))
frappe.db.set_default('year_end_date', yed.strftime('%Y-%m-%d'))
# enable default currency
if self.default_currency:
frappe.db.set_value("Currency", self.default_currency, "enabled", 1)
self.toggle_rounded_total()
self.toggle_in_words()
# clear cache
frappe.clear_cache()
def get_defaults(self):
return frappe.defaults.get_defaults()
def toggle_rounded_total(self):
self.disable_rounded_total = cint(self.disable_rounded_total)
# Make property setters to hide rounded total fields
for doctype in ("Quotation", "Sales Order", "Sales Invoice", "Delivery Note",
"Supplier Quotation", "Purchase Order"):
make_property_setter(doctype, "base_rounded_total", "hidden", self.disable_rounded_total, "Check")
make_property_setter(doctype, "base_rounded_total", "print_hide", 1, "Check")
make_property_setter(doctype, "rounded_total", "hidden", self.disable_rounded_total, "Check")
make_property_setter(doctype, "rounded_total", "print_hide", self.disable_rounded_total, "Check")
def toggle_in_words(self):
self.disable_in_words = cint(self.disable_in_words)
# Make property setters to hide in words fields
for doctype in ("Quotation", "Sales Order", "Sales Invoice", "Delivery Note",
"Supplier Quotation", "Purchase Order", "Purchase Invoice", "Purchase Receipt"):
make_property_setter(doctype, "in_words", "hidden", self.disable_in_words, "Check")
make_property_setter(doctype, "in_words", "print_hide", self.disable_in_words, "Check")
| gpl-3.0 | -8,289,260,682,832,391,000 | 36.506667 | 101 | 0.713118 | false |
kevinjqiu/goal | goal/db/competition.py | 1 | 1721 | from sqlalchemy import Column, String, ForeignKey, Integer
from sqlalchemy.orm import relationship
from .base import Base
class Competition(Base):
__tablename__ = 'competition'
competition_id = Column(String(10), primary_key=True)
country_id = Column(
String(3), ForeignKey('country.country_id'), nullable=False)
name = Column(String(64), nullable=False)
tier = Column(Integer)
promotion_to = Column(Integer, ForeignKey('competition.competition_id'))
relegation_to = Column(Integer, ForeignKey('competition.competition_id'))
num_promoted = Column(Integer)
num_relegated = Column(Integer)
seasons = relationship(
'Season', uselist=True, backref="competition",
order_by='Season.start_year')
higher_tier_competition = relationship(
'Competition', foreign_keys=[relegation_to], uselist=False)
lower_tier_competition = relationship(
'Competition', foreign_keys=[promotion_to], uselist=False)
def __json__(self):
return {
'id': self.competition_id,
'country_id': self.country_id,
'name': self.name,
'tier': self.tier,
}
def __repr__(self):
return "<Competition: {},{}>".format(self.competition_id, self.name)
@classmethod
def get_by_id(cls, competition_id):
return (
cls.session.query(Competition)
.filter_by(competition_id=competition_id)
.one()
)
@classmethod
def get_by_country_id(cls, country_id):
return (
cls.session.query(Competition)
.filter_by(country_id=country_id)
.order_by(Competition.tier.asc())
.all()
)
| bsd-3-clause | 3,090,250,009,907,261,400 | 31.471698 | 77 | 0.617664 | false |
morrisonwudi/zipline | tests/test_assets.py | 8 | 31178 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the zipline.assets package
"""
import sys
from unittest import TestCase
from datetime import datetime, timedelta
import pickle
import uuid
import warnings
import pandas as pd
from pandas.tseries.tools import normalize_date
from pandas.util.testing import assert_frame_equal
from nose_parameterized import parameterized
from numpy import full
from zipline.assets import Asset, Equity, Future, AssetFinder
from zipline.assets.futures import FutureChain
from zipline.errors import (
SymbolNotFound,
MultipleSymbolsFound,
SidAssignmentError,
RootSymbolNotFound,
)
from zipline.finance.trading import with_environment
from zipline.utils.test_utils import (
all_subindices,
make_rotating_asset_info,
)
def build_lookup_generic_cases():
"""
Generate test cases for AssetFinder test_lookup_generic.
"""
unique_start = pd.Timestamp('2013-01-01', tz='UTC')
unique_end = pd.Timestamp('2014-01-01', tz='UTC')
dupe_0_start = pd.Timestamp('2013-01-01', tz='UTC')
dupe_0_end = dupe_0_start + timedelta(days=1)
dupe_1_start = pd.Timestamp('2013-01-03', tz='UTC')
dupe_1_end = dupe_1_start + timedelta(days=1)
frame = pd.DataFrame.from_records(
[
{
'sid': 0,
'file_name': 'duplicated',
'company_name': 'duplicated_0',
'start_date_nano': dupe_0_start.value,
'end_date_nano': dupe_0_end.value,
'exchange': '',
},
{
'sid': 1,
'file_name': 'duplicated',
'company_name': 'duplicated_1',
'start_date_nano': dupe_1_start.value,
'end_date_nano': dupe_1_end.value,
'exchange': '',
},
{
'sid': 2,
'file_name': 'unique',
'company_name': 'unique',
'start_date_nano': unique_start.value,
'end_date_nano': unique_end.value,
'exchange': '',
},
],
)
finder = AssetFinder(metadata=frame)
dupe_0, dupe_1, unique = assets = [
finder.retrieve_asset(i)
for i in range(3)
]
dupe_0_start = dupe_0.start_date
dupe_1_start = dupe_1.start_date
cases = [
##
# Scalars
# Asset object
(finder, assets[0], None, assets[0]),
(finder, assets[1], None, assets[1]),
(finder, assets[2], None, assets[2]),
# int
(finder, 0, None, assets[0]),
(finder, 1, None, assets[1]),
(finder, 2, None, assets[2]),
# Duplicated symbol with resolution date
(finder, 'duplicated', dupe_0_start, dupe_0),
(finder, 'duplicated', dupe_1_start, dupe_1),
# Unique symbol, with or without resolution date.
(finder, 'unique', unique_start, unique),
(finder, 'unique', None, unique),
##
# Iterables
# Iterables of Asset objects.
(finder, assets, None, assets),
(finder, iter(assets), None, assets),
# Iterables of ints
(finder, (0, 1), None, assets[:-1]),
(finder, iter((0, 1)), None, assets[:-1]),
# Iterables of symbols.
(finder, ('duplicated', 'unique'), dupe_0_start, [dupe_0, unique]),
(finder, ('duplicated', 'unique'), dupe_1_start, [dupe_1, unique]),
# Mixed types
(finder,
('duplicated', 2, 'unique', 1, dupe_1),
dupe_0_start,
[dupe_0, assets[2], unique, assets[1], dupe_1]),
]
return cases
class AssetTestCase(TestCase):
def test_asset_object(self):
self.assertEquals({5061: 'foo'}[Asset(5061)], 'foo')
self.assertEquals(Asset(5061), 5061)
self.assertEquals(5061, Asset(5061))
self.assertEquals(Asset(5061), Asset(5061))
self.assertEquals(int(Asset(5061)), 5061)
self.assertEquals(str(Asset(5061)), 'Asset(5061)')
def test_asset_is_pickleable(self):
# Very wow
s = Asset(
1337,
symbol="DOGE",
asset_name="DOGECOIN",
start_date=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
end_date=pd.Timestamp('2014-06-25 11:21AM', tz='UTC'),
first_traded=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
exchange='THE MOON',
)
s_unpickled = pickle.loads(pickle.dumps(s))
attrs_to_check = ['end_date',
'exchange',
'first_traded',
'end_date',
'asset_name',
'start_date',
'sid',
'start_date',
'symbol']
for attr in attrs_to_check:
self.assertEqual(getattr(s, attr), getattr(s_unpickled, attr))
def test_asset_comparisons(self):
s_23 = Asset(23)
s_24 = Asset(24)
self.assertEqual(s_23, s_23)
self.assertEqual(s_23, 23)
self.assertEqual(23, s_23)
self.assertNotEqual(s_23, s_24)
self.assertNotEqual(s_23, 24)
self.assertNotEqual(s_23, "23")
self.assertNotEqual(s_23, 23.5)
self.assertNotEqual(s_23, [])
self.assertNotEqual(s_23, None)
self.assertLess(s_23, s_24)
self.assertLess(s_23, 24)
self.assertGreater(24, s_23)
self.assertGreater(s_24, s_23)
def test_lt(self):
self.assertTrue(Asset(3) < Asset(4))
self.assertFalse(Asset(4) < Asset(4))
self.assertFalse(Asset(5) < Asset(4))
def test_le(self):
self.assertTrue(Asset(3) <= Asset(4))
self.assertTrue(Asset(4) <= Asset(4))
self.assertFalse(Asset(5) <= Asset(4))
def test_eq(self):
self.assertFalse(Asset(3) == Asset(4))
self.assertTrue(Asset(4) == Asset(4))
self.assertFalse(Asset(5) == Asset(4))
def test_ge(self):
self.assertFalse(Asset(3) >= Asset(4))
self.assertTrue(Asset(4) >= Asset(4))
self.assertTrue(Asset(5) >= Asset(4))
def test_gt(self):
self.assertFalse(Asset(3) > Asset(4))
self.assertFalse(Asset(4) > Asset(4))
self.assertTrue(Asset(5) > Asset(4))
def test_type_mismatch(self):
if sys.version_info.major < 3:
self.assertIsNotNone(Asset(3) < 'a')
self.assertIsNotNone('a' < Asset(3))
else:
with self.assertRaises(TypeError):
Asset(3) < 'a'
with self.assertRaises(TypeError):
'a' < Asset(3)
class TestFuture(TestCase):
future = Future(
2468,
symbol='OMH15',
root_symbol='OM',
notice_date=pd.Timestamp('2014-01-20', tz='UTC'),
expiration_date=pd.Timestamp('2014-02-20', tz='UTC'),
contract_multiplier=500
)
def test_str(self):
strd = self.future.__str__()
self.assertEqual("Future(2468 [OMH15])", strd)
def test_repr(self):
reprd = self.future.__repr__()
self.assertTrue("Future" in reprd)
self.assertTrue("2468" in reprd)
self.assertTrue("OMH15" in reprd)
self.assertTrue("root_symbol='OM'" in reprd)
self.assertTrue(("notice_date=Timestamp('2014-01-20 00:00:00+0000', "
"tz='UTC')") in reprd)
self.assertTrue("expiration_date=Timestamp('2014-02-20 00:00:00+0000'"
in reprd)
self.assertTrue("contract_multiplier=500" in reprd)
def test_reduce(self):
reduced = self.future.__reduce__()
self.assertEqual(Future, reduced[0])
def test_to_and_from_dict(self):
dictd = self.future.to_dict()
self.assertTrue('root_symbol' in dictd)
self.assertTrue('notice_date' in dictd)
self.assertTrue('expiration_date' in dictd)
self.assertTrue('contract_multiplier' in dictd)
from_dict = Future.from_dict(dictd)
self.assertTrue(isinstance(from_dict, Future))
self.assertEqual(self.future, from_dict)
def test_root_symbol(self):
self.assertEqual('OM', self.future.root_symbol)
class AssetFinderTestCase(TestCase):
def test_lookup_symbol_fuzzy(self):
as_of = pd.Timestamp('2013-01-01', tz='UTC')
frame = pd.DataFrame.from_records(
[
{
'sid': i,
'file_name': 'TEST@%d' % i,
'company_name': "company%d" % i,
'start_date_nano': as_of.value,
'end_date_nano': as_of.value,
'exchange': uuid.uuid4().hex,
}
for i in range(3)
]
)
finder = AssetFinder(frame, fuzzy_char='@')
asset_0, asset_1, asset_2 = (
finder.retrieve_asset(i) for i in range(3)
)
for i in range(2): # we do it twice to test for caching bugs
self.assertIsNone(finder.lookup_symbol('test', as_of))
self.assertEqual(
asset_1,
finder.lookup_symbol('test@1', as_of)
)
# Adding an unnecessary fuzzy shouldn't matter.
self.assertEqual(
asset_1,
finder.lookup_symbol('test@1', as_of, fuzzy=True)
)
# Shouldn't find this with no fuzzy_str passed.
self.assertIsNone(finder.lookup_symbol('test1', as_of))
# Should find exact match.
self.assertEqual(
asset_1,
finder.lookup_symbol('test1', as_of, fuzzy=True),
)
def test_lookup_symbol_resolve_multiple(self):
# Incrementing by two so that start and end dates for each
# generated Asset don't overlap (each Asset's end_date is the
# day after its start date.)
dates = pd.date_range('2013-01-01', freq='2D', periods=5, tz='UTC')
df = pd.DataFrame.from_records(
[
{
'sid': i,
'file_name': 'existing',
'company_name': 'existing',
'start_date_nano': date.value,
'end_date_nano': (date + timedelta(days=1)).value,
'exchange': 'NYSE',
}
for i, date in enumerate(dates)
]
)
finder = AssetFinder(df)
for _ in range(2): # Run checks twice to test for caching bugs.
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol_resolve_multiple('non_existing', dates[0])
with self.assertRaises(MultipleSymbolsFound):
finder.lookup_symbol_resolve_multiple('existing', None)
for i, date in enumerate(dates):
# Verify that we correctly resolve multiple symbols using
# the supplied date
result = finder.lookup_symbol_resolve_multiple(
'existing',
date,
)
self.assertEqual(result.symbol, 'existing')
self.assertEqual(result.sid, i)
@parameterized.expand(
build_lookup_generic_cases()
)
def test_lookup_generic(self, finder, symbols, reference_date, expected):
"""
Ensure that lookup_generic works with various permutations of inputs.
"""
results, missing = finder.lookup_generic(symbols, reference_date)
self.assertEqual(results, expected)
self.assertEqual(missing, [])
def test_lookup_generic_handle_missing(self):
data = pd.DataFrame.from_records(
[
{
'sid': 0,
'file_name': 'real',
'company_name': 'real',
'start_date_nano': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': '',
},
{
'sid': 1,
'file_name': 'also_real',
'company_name': 'also_real',
'start_date_nano': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': '',
},
# Sid whose end date is before our query date. We should
# still correctly find it.
{
'sid': 2,
'file_name': 'real_but_old',
'company_name': 'real_but_old',
'start_date_nano': pd.Timestamp('2002-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2003-1-1', tz='UTC'),
'exchange': '',
},
# Sid whose start_date is **after** our query date. We should
# **not** find it.
{
'sid': 3,
'file_name': 'real_but_in_the_future',
'company_name': 'real_but_in_the_future',
'start_date_nano': pd.Timestamp('2014-1-1', tz='UTC'),
'end_date_nano': pd.Timestamp('2020-1-1', tz='UTC'),
'exchange': 'THE FUTURE',
},
]
)
finder = AssetFinder(data)
results, missing = finder.lookup_generic(
['real', 1, 'fake', 'real_but_old', 'real_but_in_the_future'],
pd.Timestamp('2013-02-01', tz='UTC'),
)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].symbol, 'real')
self.assertEqual(results[0].sid, 0)
self.assertEqual(results[1].symbol, 'also_real')
self.assertEqual(results[1].sid, 1)
self.assertEqual(results[2].symbol, 'real_but_old')
self.assertEqual(results[2].sid, 2)
self.assertEqual(len(missing), 2)
self.assertEqual(missing[0], 'fake')
self.assertEqual(missing[1], 'real_but_in_the_future')
def test_insert_metadata(self):
finder = AssetFinder()
finder.insert_metadata(0,
asset_type='equity',
start_date='2014-01-01',
end_date='2015-01-01',
symbol="PLAY",
foo_data="FOO",)
# Test proper insertion
equity = finder.retrieve_asset(0)
self.assertIsInstance(equity, Equity)
self.assertEqual('PLAY', equity.symbol)
self.assertEqual(pd.Timestamp('2015-01-01', tz='UTC'),
equity.end_date)
# Test invalid field
self.assertFalse('foo_data' in finder.metadata_cache[0])
def test_consume_metadata(self):
# Test dict consumption
finder = AssetFinder()
dict_to_consume = {0: {'symbol': 'PLAY'},
1: {'symbol': 'MSFT'}}
finder.consume_metadata(dict_to_consume)
equity = finder.retrieve_asset(0)
self.assertIsInstance(equity, Equity)
self.assertEqual('PLAY', equity.symbol)
finder = AssetFinder()
# Test dataframe consumption
df = pd.DataFrame(columns=['asset_name', 'exchange'], index=[0, 1])
df['asset_name'][0] = "Dave'N'Busters"
df['exchange'][0] = "NASDAQ"
df['asset_name'][1] = "Microsoft"
df['exchange'][1] = "NYSE"
finder.consume_metadata(df)
self.assertEqual('NASDAQ', finder.metadata_cache[0]['exchange'])
self.assertEqual('Microsoft', finder.metadata_cache[1]['asset_name'])
def test_consume_asset_as_identifier(self):
# Build some end dates
eq_end = pd.Timestamp('2012-01-01', tz='UTC')
fut_end = pd.Timestamp('2008-01-01', tz='UTC')
# Build some simple Assets
equity_asset = Equity(1, symbol="TESTEQ", end_date=eq_end)
future_asset = Future(200, symbol="TESTFUT", end_date=fut_end)
# Consume the Assets
finder = AssetFinder()
finder.consume_identifiers([equity_asset, future_asset])
# Test equality with newly built Assets
self.assertEqual(equity_asset, finder.retrieve_asset(1))
self.assertEqual(future_asset, finder.retrieve_asset(200))
self.assertEqual(eq_end, finder.retrieve_asset(1).end_date)
self.assertEqual(fut_end, finder.retrieve_asset(200).end_date)
def test_sid_assignment(self):
# This metadata does not contain SIDs
metadata = {'PLAY': {'symbol': 'PLAY'},
'MSFT': {'symbol': 'MSFT'}}
today = normalize_date(pd.Timestamp('2015-07-09', tz='UTC'))
# Build a finder that is allowed to assign sids
finder = AssetFinder(metadata=metadata,
allow_sid_assignment=True)
# Verify that Assets were built and different sids were assigned
play = finder.lookup_symbol('PLAY', today)
msft = finder.lookup_symbol('MSFT', today)
self.assertEqual('PLAY', play.symbol)
self.assertIsNotNone(play.sid)
self.assertNotEqual(play.sid, msft.sid)
def test_sid_assignment_failure(self):
# This metadata does not contain SIDs
metadata = {'PLAY': {'symbol': 'PLAY'},
'MSFT': {'symbol': 'MSFT'}}
# Build a finder that is not allowed to assign sids, asserting failure
with self.assertRaises(SidAssignmentError):
AssetFinder(metadata=metadata, allow_sid_assignment=False)
def test_security_dates_warning(self):
# Build an asset with an end_date
eq_end = pd.Timestamp('2012-01-01', tz='UTC')
equity_asset = Equity(1, symbol="TESTEQ", end_date=eq_end)
# Catch all warnings
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered
warnings.simplefilter("always")
equity_asset.security_start_date
equity_asset.security_end_date
equity_asset.security_name
# Verify the warning
self.assertEqual(3, len(w))
for warning in w:
self.assertTrue(issubclass(warning.category,
DeprecationWarning))
def test_lookup_future_chain(self):
metadata = {
# Notice day is today, so not valid
2: {
'symbol': 'ADN15',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-05-14', tz='UTC'),
'start_date': pd.Timestamp('2015-01-01', tz='UTC')
},
1: {
'symbol': 'ADV15',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-08-14', tz='UTC'),
'start_date': pd.Timestamp('2015-01-01', tz='UTC')
},
# Starts trading today, so should be valid.
0: {
'symbol': 'ADF16',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-11-16', tz='UTC'),
'start_date': pd.Timestamp('2015-05-14', tz='UTC')
},
# Copy of the above future, but starts trading in August,
# so it isn't valid.
3: {
'symbol': 'ADF16',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-11-16', tz='UTC'),
'start_date': pd.Timestamp('2015-08-01', tz='UTC')
},
}
finder = AssetFinder(metadata=metadata)
dt = pd.Timestamp('2015-05-14', tz='UTC')
last_year = pd.Timestamp('2014-01-01', tz='UTC')
first_day = pd.Timestamp('2015-01-01', tz='UTC')
# Check that we get the expected number of contracts, in the
# right order
ad_contracts = finder.lookup_future_chain('AD', dt, dt)
self.assertEqual(len(ad_contracts), 2)
self.assertEqual(ad_contracts[0].sid, 1)
self.assertEqual(ad_contracts[1].sid, 0)
# Check that pd.NaT for knowledge_date uses the value of as_of_date
ad_contracts = finder.lookup_future_chain('AD', dt, pd.NaT)
self.assertEqual(len(ad_contracts), 2)
# Check that we get nothing if our knowledge date is last year
ad_contracts = finder.lookup_future_chain('AD', dt, last_year)
self.assertEqual(len(ad_contracts), 0)
# Check that we get things that start on the knowledge date
ad_contracts = finder.lookup_future_chain('AD', dt, first_day)
self.assertEqual(len(ad_contracts), 1)
# Check that pd.NaT for as_of_date gives the whole chain
ad_contracts = finder.lookup_future_chain('AD', pd.NaT, first_day)
self.assertEqual(len(ad_contracts), 4)
def test_map_identifier_index_to_sids(self):
# Build an empty finder and some Assets
dt = pd.Timestamp('2014-01-01', tz='UTC')
finder = AssetFinder()
asset1 = Equity(1, symbol="AAPL")
asset2 = Equity(2, symbol="GOOG")
asset200 = Future(200, symbol="CLK15")
asset201 = Future(201, symbol="CLM15")
# Check for correct mapping and types
pre_map = [asset1, asset2, asset200, asset201]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([1, 2, 200, 201], post_map)
for sid in post_map:
self.assertIsInstance(sid, int)
# Change order and check mapping again
pre_map = [asset201, asset2, asset200, asset1]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([201, 2, 200, 1], post_map)
@with_environment()
def test_compute_lifetimes(self, env=None):
num_assets = 4
trading_day = env.trading_day
first_start = pd.Timestamp('2015-04-01', tz='UTC')
frame = make_rotating_asset_info(
num_assets=num_assets,
first_start=first_start,
frequency=env.trading_day,
periods_between_starts=3,
asset_lifetime=5
)
finder = AssetFinder(frame)
all_dates = pd.date_range(
start=first_start,
end=frame.end_date.max(),
freq=trading_day,
)
for dates in all_subindices(all_dates):
expected_mask = full(
shape=(len(dates), num_assets),
fill_value=False,
dtype=bool,
)
for i, date in enumerate(dates):
it = frame[['start_date', 'end_date']].itertuples()
for j, start, end in it:
if start <= date <= end:
expected_mask[i, j] = True
# Filter out columns with all-empty columns.
expected_result = pd.DataFrame(
data=expected_mask,
index=dates,
columns=frame.sid.values,
)
actual_result = finder.lifetimes(dates)
assert_frame_equal(actual_result, expected_result)
class TestFutureChain(TestCase):
metadata = {
0: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC')},
1: {
'root_symbol': 'CL',
'symbol': 'CLK06',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-03-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-04-20', tz='UTC')},
2: {
'symbol': 'CLQ06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-06-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-07-20', tz='UTC')},
3: {
'symbol': 'CLX06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2006-02-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-09-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-10-20', tz='UTC')}
}
asset_finder = AssetFinder(metadata=metadata)
def test_len(self):
""" Test the __len__ method of FutureChain.
"""
# None of the contracts have started yet.
cl = FutureChain(self.asset_finder, lambda: '2005-11-30', 'CL')
self.assertEqual(len(cl), 0)
# Sids 0, 1, & 2 have started, 3 has not yet started.
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(len(cl), 3)
# Sid 0 is still valid the day before its notice date.
cl = FutureChain(self.asset_finder, lambda: '2005-12-19', 'CL')
self.assertEqual(len(cl), 3)
# Sid 0 is now invalid, leaving only Sids 1 & 2 valid.
cl = FutureChain(self.asset_finder, lambda: '2005-12-20', 'CL')
self.assertEqual(len(cl), 2)
# Sid 3 has started, so 1, 2, & 3 are now valid.
cl = FutureChain(self.asset_finder, lambda: '2006-02-01', 'CL')
self.assertEqual(len(cl), 3)
# All contracts are no longer valid.
cl = FutureChain(self.asset_finder, lambda: '2006-09-20', 'CL')
self.assertEqual(len(cl), 0)
def test_getitem(self):
""" Test the __getitem__ method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(cl[0], 0)
self.assertEqual(cl[1], 1)
self.assertEqual(cl[2], 2)
with self.assertRaises(IndexError):
cl[3]
cl = FutureChain(self.asset_finder, lambda: '2005-12-19', 'CL')
self.assertEqual(cl[0], 0)
cl = FutureChain(self.asset_finder, lambda: '2005-12-20', 'CL')
self.assertEqual(cl[0], 1)
cl = FutureChain(self.asset_finder, lambda: '2006-02-01', 'CL')
self.assertEqual(cl[-1], 3)
def test_root_symbols(self):
""" Test that different variations on root symbols are handled
as expected.
"""
# Make sure this successfully gets the chain for CL.
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(cl.root_symbol, 'CL')
# These root symbols don't exist, so RootSymbolNotFound should
# be raised immediately.
with self.assertRaises(RootSymbolNotFound):
FutureChain(self.asset_finder, lambda: '2005-12-01', 'CLZ')
with self.assertRaises(RootSymbolNotFound):
FutureChain(self.asset_finder, lambda: '2005-12-01', '')
def test_repr(self):
""" Test the __repr__ method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
cl_feb = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL',
as_of_date='2006-02-01')
# The default chain should not include the as of date.
self.assertEqual(repr(cl), "FutureChain(root_symbol='CL')")
# An explicit as of date should show up in the repr.
self.assertEqual(
repr(cl_feb),
("FutureChain(root_symbol='CL', "
"as_of_date='2006-02-01 00:00:00+00:00')")
)
def test_as_of(self):
""" Test the as_of method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
# Test that the as_of_date is set correctly to the future
feb = '2006-02-01'
cl_feb = cl.as_of(feb)
self.assertEqual(
cl_feb.as_of_date,
pd.Timestamp(feb, tz='UTC')
)
# Test that the as_of_date is set correctly to the past, with
# args of str, datetime.datetime, and pd.Timestamp.
feb_prev = '2005-02-01'
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
feb_prev = datetime(year=2005, month=2, day=1)
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
feb_prev = pd.Timestamp('2005-02-01')
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
# The chain as of the current dt should always be the same as
# the defualt chain. Tests date as str, pd.Timestamp, and
# datetime.datetime.
self.assertEqual(cl[0], cl.as_of('2005-12-01')[0])
self.assertEqual(cl[0], cl.as_of(pd.Timestamp('2005-12-01'))[0])
self.assertEqual(
cl[0],
cl.as_of(datetime(year=2005, month=12, day=1))[0]
)
def test_offset(self):
""" Test the offset method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
# Test that an offset forward sets as_of_date as expected
self.assertEqual(
cl.offset('3 days').as_of_date,
cl.as_of_date + pd.Timedelta(days=3)
)
# Test that an offset backward sets as_of_date as expected, with
# time delta given as str, datetime.timedelta, and pd.Timedelta.
self.assertEqual(
cl.offset('-1000 days').as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
self.assertEqual(
cl.offset(timedelta(days=-1000)).as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
self.assertEqual(
cl.offset(pd.Timedelta('-1000 days')).as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
# An offset of zero should give the original chain.
self.assertEqual(cl[0], cl.offset(0)[0])
self.assertEqual(cl[0], cl.offset("0 days")[0])
# A string that doesn't represent a time delta should raise a
# ValueError.
with self.assertRaises(ValueError):
cl.offset("blah")
| apache-2.0 | 4,139,048,185,345,944,000 | 35.085648 | 79 | 0.541632 | false |
chemelnucfin/tensorflow | tensorflow/contrib/boosted_trees/estimator_batch/dnn_tree_combined_estimator.py | 16 | 38554 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow estimators for combined DNN + GBDT training model.
The combined model trains a DNN first, then trains boosted trees to boost the
logits of the DNN. The input layer of the DNN (including the embeddings learned
over sparse features) can optionally be provided to the boosted trees as
an additional input feature.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib import layers
from tensorflow.contrib.boosted_trees.estimator_batch import model
from tensorflow.contrib.boosted_trees.estimator_batch import distillation_loss
from tensorflow.contrib.boosted_trees.estimator_batch import trainer_hooks
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.contrib.boosted_trees.python.training.functions import gbdt_batch
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.python.estimator import estimator as core_estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.python.feature_column import feature_column_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import training_util
_DNN_LEARNING_RATE = 0.001
def _get_optimizer(optimizer):
if callable(optimizer):
return optimizer()
else:
return optimizer
def _add_hidden_layer_summary(value, tag):
summary.scalar("%s_fraction_of_zero_values" % tag, nn.zero_fraction(value))
summary.histogram("%s_activation" % tag, value)
def _dnn_tree_combined_model_fn(
features,
labels,
mode,
head,
dnn_hidden_units,
dnn_feature_columns,
tree_learner_config,
num_trees,
tree_examples_per_layer,
config=None,
dnn_optimizer="Adagrad",
dnn_activation_fn=nn.relu,
dnn_dropout=None,
dnn_input_layer_partitioner=None,
dnn_input_layer_to_tree=True,
dnn_steps_to_train=10000,
predict_with_tree_only=False,
tree_feature_columns=None,
tree_center_bias=False,
dnn_to_tree_distillation_param=None,
use_core_versions=False,
output_type=model.ModelBuilderOutputType.MODEL_FN_OPS,
override_global_step_value=None):
"""DNN and GBDT combined model_fn.
Args:
features: `dict` of `Tensor` objects.
labels: Labels used to train on.
mode: Mode we are in. (TRAIN/EVAL/INFER)
head: A `Head` instance.
dnn_hidden_units: List of hidden units per layer.
dnn_feature_columns: An iterable containing all the feature columns
used by the model's DNN.
tree_learner_config: A config for the tree learner.
num_trees: Number of trees to grow model to after training DNN.
tree_examples_per_layer: Number of examples to accumulate before
growing the tree a layer. This value has a big impact on model
quality and should be set equal to the number of examples in
training dataset if possible. It can also be a function that computes
the number of examples based on the depth of the layer that's
being built.
config: `RunConfig` of the estimator.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN. If `None`, will use the Adagrad
optimizer with default learning rate of 0.001.
dnn_activation_fn: Activation function applied to each layer of the DNN.
If `None`, will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability to drop out a given
unit in the DNN.
dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
Defaults to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
dnn_input_layer_to_tree: Whether to provide the DNN's input layer
as a feature to the tree.
dnn_steps_to_train: Number of steps to train dnn for before switching
to gbdt.
predict_with_tree_only: Whether to use only the tree model output as the
final prediction.
tree_feature_columns: An iterable containing all the feature columns
used by the model's boosted trees. If dnn_input_layer_to_tree is
set to True, these features are in addition to dnn_feature_columns.
tree_center_bias: Whether a separate tree should be created for
first fitting the bias.
dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
float defines the weight of the distillation loss, and the loss_fn, for
computing distillation loss, takes dnn_logits, tree_logits and weight
tensor. If the entire tuple is None, no distillation will be applied. If
only the loss_fn is None, we will take the sigmoid/softmax cross entropy
loss be default. When distillation is applied, `predict_with_tree_only`
will be set to True.
use_core_versions: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
output_type: Whether to return ModelFnOps (old interface) or EstimatorSpec
(new interface).
override_global_step_value: If after the training is done, global step
value must be reset to this value. This is particularly useful for hyper
parameter tuning, which can't recognize early stopping due to the number
of trees. If None, no override of global step will happen.
Returns:
A `ModelFnOps` object.
Raises:
ValueError: if inputs are not valid.
"""
if not isinstance(features, dict):
raise ValueError("features should be a dictionary of `Tensor`s. "
"Given type: {}".format(type(features)))
if not dnn_feature_columns:
raise ValueError("dnn_feature_columns must be specified")
if dnn_to_tree_distillation_param:
if not predict_with_tree_only:
logging.warning("update predict_with_tree_only to True since distillation"
"is specified.")
predict_with_tree_only = True
# Build DNN Logits.
dnn_parent_scope = "dnn"
dnn_partitioner = dnn_input_layer_partitioner or (
partitioned_variables.min_max_variable_partitioner(
max_partitions=config.num_ps_replicas, min_slice_size=64 << 20))
if (output_type == model.ModelBuilderOutputType.ESTIMATOR_SPEC and
not use_core_versions):
raise ValueError("You must use core versions with Estimator Spec")
global_step = training_util.get_global_step()
with variable_scope.variable_scope(
dnn_parent_scope,
values=tuple(six.itervalues(features)),
partitioner=dnn_partitioner):
with variable_scope.variable_scope(
"input_from_feature_columns",
values=tuple(six.itervalues(features)),
partitioner=dnn_partitioner) as input_layer_scope:
if use_core_versions:
input_layer = feature_column_lib.input_layer(
features=features,
feature_columns=dnn_feature_columns,
weight_collections=[dnn_parent_scope])
else:
input_layer = layers.input_from_feature_columns(
columns_to_tensors=features,
feature_columns=dnn_feature_columns,
weight_collections=[dnn_parent_scope],
scope=input_layer_scope)
def dnn_logits_fn():
"""Builds the logits from the input layer."""
previous_layer = input_layer
for layer_id, num_hidden_units in enumerate(dnn_hidden_units):
with variable_scope.variable_scope(
"hiddenlayer_%d" % layer_id,
values=(previous_layer,)) as hidden_layer_scope:
net = layers.fully_connected(
previous_layer,
num_hidden_units,
activation_fn=dnn_activation_fn,
variables_collections=[dnn_parent_scope],
scope=hidden_layer_scope)
if dnn_dropout is not None and mode == model_fn.ModeKeys.TRAIN:
net = layers.dropout(net, keep_prob=(1.0 - dnn_dropout))
_add_hidden_layer_summary(net, hidden_layer_scope.name)
previous_layer = net
with variable_scope.variable_scope(
"logits", values=(previous_layer,)) as logits_scope:
dnn_logits = layers.fully_connected(
previous_layer,
head.logits_dimension,
activation_fn=None,
variables_collections=[dnn_parent_scope],
scope=logits_scope)
_add_hidden_layer_summary(dnn_logits, logits_scope.name)
return dnn_logits
if predict_with_tree_only and mode == model_fn.ModeKeys.INFER:
dnn_logits = array_ops.constant(0.0)
dnn_train_op_fn = control_flow_ops.no_op
elif predict_with_tree_only and mode == model_fn.ModeKeys.EVAL:
dnn_logits = control_flow_ops.cond(
global_step > dnn_steps_to_train,
lambda: array_ops.constant(0.0),
dnn_logits_fn)
dnn_train_op_fn = control_flow_ops.no_op
else:
dnn_logits = dnn_logits_fn()
def dnn_train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizers.optimize_loss(
loss=loss,
global_step=training_util.get_global_step(),
learning_rate=_DNN_LEARNING_RATE,
optimizer=_get_optimizer(dnn_optimizer),
name=dnn_parent_scope,
variables=ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES, scope=dnn_parent_scope),
# Empty summaries to prevent optimizers from logging training_loss.
summaries=[])
# Build Tree Logits.
with ops.device(global_step.device):
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config="", # Initialize an empty ensemble.
name="ensemble_model")
tree_features = features.copy()
if dnn_input_layer_to_tree:
tree_features["dnn_input_layer"] = input_layer
tree_feature_columns.append(layers.real_valued_column("dnn_input_layer"))
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=config.is_chief,
num_ps_replicas=config.num_ps_replicas,
ensemble_handle=ensemble_handle,
center_bias=tree_center_bias,
examples_per_layer=tree_examples_per_layer,
learner_config=tree_learner_config,
feature_columns=tree_feature_columns,
logits_dimension=head.logits_dimension,
features=tree_features,
use_core_columns=use_core_versions)
with ops.name_scope("gbdt"):
predictions_dict = gbdt_model.predict(mode)
tree_logits = predictions_dict["predictions"]
def _tree_train_op_fn(loss):
"""Returns the op to optimize the loss."""
if dnn_to_tree_distillation_param:
loss_weight, loss_fn = dnn_to_tree_distillation_param
# pylint: disable=protected-access
if use_core_versions:
weight_tensor = head_lib._weight_tensor(features, head._weight_column)
else:
weight_tensor = head_lib._weight_tensor(
features, head.weight_column_name)
# pylint: enable=protected-access
dnn_logits_fixed = array_ops.stop_gradient(dnn_logits)
if loss_fn is None:
# we create the loss_fn similar to the head loss_fn for
# multi_class_head used previously as the default one.
n_classes = 2 if head.logits_dimension == 1 else head.logits_dimension
loss_fn = distillation_loss.create_dnn_to_tree_cross_entropy_loss_fn(
n_classes)
dnn_to_tree_distillation_loss = loss_weight * loss_fn(
dnn_logits_fixed, tree_logits, weight_tensor)
summary.scalar("dnn_to_tree_distillation_loss",
dnn_to_tree_distillation_loss)
loss += dnn_to_tree_distillation_loss
update_op = gbdt_model.train(loss, predictions_dict, labels)
with ops.control_dependencies(
[update_op]), (ops.colocate_with(global_step)):
update_op = state_ops.assign_add(global_step, 1).op
return update_op
if predict_with_tree_only:
if mode == model_fn.ModeKeys.TRAIN or mode == model_fn.ModeKeys.INFER:
tree_train_logits = tree_logits
else:
tree_train_logits = control_flow_ops.cond(
global_step > dnn_steps_to_train,
lambda: tree_logits,
lambda: dnn_logits)
else:
tree_train_logits = dnn_logits + tree_logits
def _no_train_op_fn(loss):
"""Returns a no-op."""
del loss
return control_flow_ops.no_op()
if tree_center_bias:
num_trees += 1
finalized_trees, attempted_trees = gbdt_model.get_number_of_trees_tensor()
if output_type == model.ModelBuilderOutputType.MODEL_FN_OPS:
model_fn_ops = head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=_no_train_op_fn,
logits=tree_train_logits)
if mode != model_fn.ModeKeys.TRAIN:
return model_fn_ops
dnn_train_op = head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=dnn_train_op_fn,
logits=dnn_logits).train_op
tree_train_op = head.create_model_fn_ops(
features=tree_features,
mode=mode,
labels=labels,
train_op_fn=_tree_train_op_fn,
logits=tree_train_logits).train_op
# Add the hooks
model_fn_ops.training_hooks.extend([
trainer_hooks.SwitchTrainOp(dnn_train_op, dnn_steps_to_train,
tree_train_op),
trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
finalized_trees,
override_global_step_value)
])
return model_fn_ops
elif output_type == model.ModelBuilderOutputType.ESTIMATOR_SPEC:
fusion_spec = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_no_train_op_fn,
logits=tree_train_logits)
if mode != model_fn.ModeKeys.TRAIN:
return fusion_spec
dnn_spec = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=dnn_train_op_fn,
logits=dnn_logits)
tree_spec = head.create_estimator_spec(
features=tree_features,
mode=mode,
labels=labels,
train_op_fn=_tree_train_op_fn,
logits=tree_train_logits)
training_hooks = [
trainer_hooks.SwitchTrainOp(dnn_spec.train_op, dnn_steps_to_train,
tree_spec.train_op),
trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
finalized_trees,
override_global_step_value)
]
fusion_spec = fusion_spec._replace(training_hooks=training_hooks +
list(fusion_spec.training_hooks))
return fusion_spec
class DNNBoostedTreeCombinedClassifier(estimator.Estimator):
"""A classifier that uses a combined DNN/GBDT model."""
def __init__(self,
dnn_hidden_units,
dnn_feature_columns,
tree_learner_config,
num_trees,
tree_examples_per_layer,
n_classes=2,
weight_column_name=None,
model_dir=None,
config=None,
label_name=None,
label_keys=None,
feature_engineering_fn=None,
dnn_optimizer="Adagrad",
dnn_activation_fn=nn.relu,
dnn_dropout=None,
dnn_input_layer_partitioner=None,
dnn_input_layer_to_tree=True,
dnn_steps_to_train=10000,
predict_with_tree_only=False,
tree_feature_columns=None,
tree_center_bias=False,
dnn_to_tree_distillation_param=None,
use_core_versions=False,
override_global_step_value=None):
"""Initializes a DNNBoostedTreeCombinedClassifier instance.
Args:
dnn_hidden_units: List of hidden units per layer for DNN.
dnn_feature_columns: An iterable containing all the feature columns
used by the model's DNN.
tree_learner_config: A config for the tree learner.
num_trees: Number of trees to grow model to after training DNN.
tree_examples_per_layer: Number of examples to accumulate before
growing the tree a layer. This value has a big impact on model
quality and should be set equal to the number of examples in
training dataset if possible. It can also be a function that computes
the number of examples based on the depth of the layer that's
being built.
n_classes: The number of label classes.
weight_column_name: The name of weight column.
model_dir: Directory for model exports.
config: `RunConfig` of the estimator.
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
label_keys: Optional list of strings with size `[n_classes]` defining the
label vocabulary. Only supported for `n_classes` > 2.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN. If `None`, will use the Adagrad
optimizer with default learning rate.
dnn_activation_fn: Activation function applied to each layer of the DNN.
If `None`, will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability to drop out a given
unit in the DNN.
dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
Defaults to `min_max_variable_partitioner` with `min_slice_size`
64 << 20.
dnn_input_layer_to_tree: Whether to provide the DNN's input layer
as a feature to the tree.
dnn_steps_to_train: Number of steps to train dnn for before switching
to gbdt.
predict_with_tree_only: Whether to use only the tree model output as the
final prediction.
tree_feature_columns: An iterable containing all the feature columns
used by the model's boosted trees. If dnn_input_layer_to_tree is
set to True, these features are in addition to dnn_feature_columns.
tree_center_bias: Whether a separate tree should be created for
first fitting the bias.
dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
float defines the weight of the distillation loss, and the loss_fn, for
computing distillation loss, takes dnn_logits, tree_logits and weight
tensor. If the entire tuple is None, no distillation will be applied. If
only the loss_fn is None, we will take the sigmoid/softmax cross entropy
loss be default. When distillation is applied, `predict_with_tree_only`
will be set to True.
use_core_versions: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
override_global_step_value: If after the training is done, global step
value must be reset to this value. This is particularly useful for hyper
parameter tuning, which can't recognize early stopping due to the number
of trees. If None, no override of global step will happen.
"""
head = head_lib.multi_class_head(
n_classes=n_classes,
label_name=label_name,
label_keys=label_keys,
weight_column_name=weight_column_name,
enable_centered_bias=False)
def _model_fn(features, labels, mode, config):
return _dnn_tree_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
dnn_hidden_units=dnn_hidden_units,
dnn_feature_columns=dnn_feature_columns,
tree_learner_config=tree_learner_config,
num_trees=num_trees,
tree_examples_per_layer=tree_examples_per_layer,
config=config,
dnn_optimizer=dnn_optimizer,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
dnn_input_layer_partitioner=dnn_input_layer_partitioner,
dnn_input_layer_to_tree=dnn_input_layer_to_tree,
dnn_steps_to_train=dnn_steps_to_train,
predict_with_tree_only=predict_with_tree_only,
tree_feature_columns=tree_feature_columns,
tree_center_bias=tree_center_bias,
dnn_to_tree_distillation_param=dnn_to_tree_distillation_param,
use_core_versions=use_core_versions,
override_global_step_value=override_global_step_value)
super(DNNBoostedTreeCombinedClassifier, self).__init__(
model_fn=_model_fn,
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
class DNNBoostedTreeCombinedRegressor(estimator.Estimator):
"""A regressor that uses a combined DNN/GBDT model."""
def __init__(self,
dnn_hidden_units,
dnn_feature_columns,
tree_learner_config,
num_trees,
tree_examples_per_layer,
weight_column_name=None,
model_dir=None,
config=None,
label_name=None,
label_dimension=1,
feature_engineering_fn=None,
dnn_optimizer="Adagrad",
dnn_activation_fn=nn.relu,
dnn_dropout=None,
dnn_input_layer_partitioner=None,
dnn_input_layer_to_tree=True,
dnn_steps_to_train=10000,
predict_with_tree_only=False,
tree_feature_columns=None,
tree_center_bias=False,
dnn_to_tree_distillation_param=None,
use_core_versions=False,
override_global_step_value=None):
"""Initializes a DNNBoostedTreeCombinedRegressor instance.
Args:
dnn_hidden_units: List of hidden units per layer for DNN.
dnn_feature_columns: An iterable containing all the feature columns
used by the model's DNN.
tree_learner_config: A config for the tree learner.
num_trees: Number of trees to grow model to after training DNN.
tree_examples_per_layer: Number of examples to accumulate before
growing the tree a layer. This value has a big impact on model
quality and should be set equal to the number of examples in
training dataset if possible. It can also be a function that computes
the number of examples based on the depth of the layer that's
being built.
weight_column_name: The name of weight column.
model_dir: Directory for model exports.
config: `RunConfig` of the estimator.
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN. If `None`, will use the Adagrad
optimizer with default learning rate.
dnn_activation_fn: Activation function applied to each layer of the DNN.
If `None`, will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability to drop out a given
unit in the DNN.
dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
Defaults to `min_max_variable_partitioner` with `min_slice_size`
64 << 20.
dnn_input_layer_to_tree: Whether to provide the DNN's input layer
as a feature to the tree.
dnn_steps_to_train: Number of steps to train dnn for before switching
to gbdt.
predict_with_tree_only: Whether to use only the tree model output as the
final prediction.
tree_feature_columns: An iterable containing all the feature columns
used by the model's boosted trees. If dnn_input_layer_to_tree is
set to True, these features are in addition to dnn_feature_columns.
tree_center_bias: Whether a separate tree should be created for
first fitting the bias.
dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
float defines the weight of the distillation loss, and the loss_fn, for
computing distillation loss, takes dnn_logits, tree_logits and weight
tensor. If the entire tuple is None, no distillation will be applied. If
only the loss_fn is None, we will take the sigmoid/softmax cross entropy
loss be default. When distillation is applied, `predict_with_tree_only`
will be set to True.
use_core_versions: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
override_global_step_value: If after the training is done, global step
value must be reset to this value. This is particularly useful for hyper
parameter tuning, which can't recognize early stopping due to the number
of trees. If None, no override of global step will happen.
"""
head = head_lib.regression_head(
label_name=label_name,
label_dimension=label_dimension,
weight_column_name=weight_column_name,
enable_centered_bias=False)
# num_classes needed for GradientBoostedDecisionTreeModel
if label_dimension == 1:
tree_learner_config.num_classes = 2
else:
tree_learner_config.num_classes = label_dimension
def _model_fn(features, labels, mode, config):
return _dnn_tree_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
dnn_hidden_units=dnn_hidden_units,
dnn_feature_columns=dnn_feature_columns,
tree_learner_config=tree_learner_config,
num_trees=num_trees,
tree_examples_per_layer=tree_examples_per_layer,
config=config,
dnn_optimizer=dnn_optimizer,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
dnn_input_layer_partitioner=dnn_input_layer_partitioner,
dnn_input_layer_to_tree=dnn_input_layer_to_tree,
dnn_steps_to_train=dnn_steps_to_train,
predict_with_tree_only=predict_with_tree_only,
tree_feature_columns=tree_feature_columns,
tree_center_bias=tree_center_bias,
dnn_to_tree_distillation_param=dnn_to_tree_distillation_param,
use_core_versions=use_core_versions,
override_global_step_value=override_global_step_value)
super(DNNBoostedTreeCombinedRegressor, self).__init__(
model_fn=_model_fn,
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
class DNNBoostedTreeCombinedEstimator(estimator.Estimator):
"""An estimator that uses a combined DNN/GBDT model.
Useful for training with user specified `Head`.
"""
def __init__(self,
dnn_hidden_units,
dnn_feature_columns,
tree_learner_config,
num_trees,
tree_examples_per_layer,
head,
model_dir=None,
config=None,
feature_engineering_fn=None,
dnn_optimizer="Adagrad",
dnn_activation_fn=nn.relu,
dnn_dropout=None,
dnn_input_layer_partitioner=None,
dnn_input_layer_to_tree=True,
dnn_steps_to_train=10000,
predict_with_tree_only=False,
tree_feature_columns=None,
tree_center_bias=False,
dnn_to_tree_distillation_param=None,
use_core_versions=False,
override_global_step_value=None):
"""Initializes a DNNBoostedTreeCombinedEstimator instance.
Args:
dnn_hidden_units: List of hidden units per layer for DNN.
dnn_feature_columns: An iterable containing all the feature columns
used by the model's DNN.
tree_learner_config: A config for the tree learner.
num_trees: Number of trees to grow model to after training DNN.
tree_examples_per_layer: Number of examples to accumulate before
growing the tree a layer. This value has a big impact on model
quality and should be set equal to the number of examples in
training dataset if possible. It can also be a function that computes
the number of examples based on the depth of the layer that's
being built.
head: `Head` instance.
model_dir: Directory for model exports.
config: `RunConfig` of the estimator.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN. If `None`, will use the Adagrad
optimizer with default learning rate.
dnn_activation_fn: Activation function applied to each layer of the DNN.
If `None`, will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability to drop out a given
unit in the DNN.
dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
Defaults to `min_max_variable_partitioner` with `min_slice_size`
64 << 20.
dnn_input_layer_to_tree: Whether to provide the DNN's input layer
as a feature to the tree.
dnn_steps_to_train: Number of steps to train dnn for before switching
to gbdt.
predict_with_tree_only: Whether to use only the tree model output as the
final prediction.
tree_feature_columns: An iterable containing all the feature columns
used by the model's boosted trees. If dnn_input_layer_to_tree is
set to True, these features are in addition to dnn_feature_columns.
tree_center_bias: Whether a separate tree should be created for
first fitting the bias.
dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
float defines the weight of the distillation loss, and the loss_fn, for
computing distillation loss, takes dnn_logits, tree_logits and weight
tensor. If the entire tuple is None, no distillation will be applied. If
only the loss_fn is None, we will take the sigmoid/softmax cross entropy
loss be default. When distillation is applied, `predict_with_tree_only`
will be set to True.
use_core_versions: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
override_global_step_value: If after the training is done, global step
value must be reset to this value. This is particularly useful for hyper
parameter tuning, which can't recognize early stopping due to the number
of trees. If None, no override of global step will happen.
"""
def _model_fn(features, labels, mode, config):
return _dnn_tree_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
dnn_hidden_units=dnn_hidden_units,
dnn_feature_columns=dnn_feature_columns,
tree_learner_config=tree_learner_config,
num_trees=num_trees,
tree_examples_per_layer=tree_examples_per_layer,
config=config,
dnn_optimizer=dnn_optimizer,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
dnn_input_layer_partitioner=dnn_input_layer_partitioner,
dnn_input_layer_to_tree=dnn_input_layer_to_tree,
dnn_steps_to_train=dnn_steps_to_train,
predict_with_tree_only=predict_with_tree_only,
tree_feature_columns=tree_feature_columns,
tree_center_bias=tree_center_bias,
dnn_to_tree_distillation_param=dnn_to_tree_distillation_param,
use_core_versions=use_core_versions,
override_global_step_value=override_global_step_value)
super(DNNBoostedTreeCombinedEstimator, self).__init__(
model_fn=_model_fn,
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
class CoreDNNBoostedTreeCombinedEstimator(core_estimator.Estimator):
"""Initializes a core version of DNNBoostedTreeCombinedEstimator.
Args:
dnn_hidden_units: List of hidden units per layer for DNN.
dnn_feature_columns: An iterable containing all the feature columns
used by the model's DNN.
tree_learner_config: A config for the tree learner.
num_trees: Number of trees to grow model to after training DNN.
tree_examples_per_layer: Number of examples to accumulate before
growing the tree a layer. This value has a big impact on model
quality and should be set equal to the number of examples in
training dataset if possible. It can also be a function that computes
the number of examples based on the depth of the layer that's
being built.
head: `Head` instance.
model_dir: Directory for model exports.
config: `RunConfig` of the estimator.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN. If `None`, will use the Adagrad
optimizer with default learning rate.
dnn_activation_fn: Activation function applied to each layer of the DNN.
If `None`, will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability to drop out a given
unit in the DNN.
dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
Defaults to `min_max_variable_partitioner` with `min_slice_size`
64 << 20.
dnn_input_layer_to_tree: Whether to provide the DNN's input layer
as a feature to the tree.
dnn_steps_to_train: Number of steps to train dnn for before switching
to gbdt.
predict_with_tree_only: Whether to use only the tree model output as the
final prediction.
tree_feature_columns: An iterable containing all the feature columns
used by the model's boosted trees. If dnn_input_layer_to_tree is
set to True, these features are in addition to dnn_feature_columns.
tree_center_bias: Whether a separate tree should be created for
first fitting the bias.
dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
float defines the weight of the distillation loss, and the loss_fn, for
computing distillation loss, takes dnn_logits, tree_logits and weight
tensor. If the entire tuple is None, no distillation will be applied. If
only the loss_fn is None, we will take the sigmoid/softmax cross entropy
loss be default. When distillation is applied, `predict_with_tree_only`
will be set to True.
"""
def __init__(self,
dnn_hidden_units,
dnn_feature_columns,
tree_learner_config,
num_trees,
tree_examples_per_layer,
head,
model_dir=None,
config=None,
dnn_optimizer="Adagrad",
dnn_activation_fn=nn.relu,
dnn_dropout=None,
dnn_input_layer_partitioner=None,
dnn_input_layer_to_tree=True,
dnn_steps_to_train=10000,
predict_with_tree_only=False,
tree_feature_columns=None,
tree_center_bias=False,
dnn_to_tree_distillation_param=None):
def _model_fn(features, labels, mode, config):
return _dnn_tree_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
dnn_hidden_units=dnn_hidden_units,
dnn_feature_columns=dnn_feature_columns,
tree_learner_config=tree_learner_config,
num_trees=num_trees,
tree_examples_per_layer=tree_examples_per_layer,
config=config,
dnn_optimizer=dnn_optimizer,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
dnn_input_layer_partitioner=dnn_input_layer_partitioner,
dnn_input_layer_to_tree=dnn_input_layer_to_tree,
dnn_steps_to_train=dnn_steps_to_train,
predict_with_tree_only=predict_with_tree_only,
tree_feature_columns=tree_feature_columns,
tree_center_bias=tree_center_bias,
dnn_to_tree_distillation_param=dnn_to_tree_distillation_param,
output_type=model.ModelBuilderOutputType.ESTIMATOR_SPEC,
use_core_versions=True,
override_global_step_value=None)
super(CoreDNNBoostedTreeCombinedEstimator, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
| apache-2.0 | -2,076,465,372,930,137,900 | 43.882421 | 81 | 0.660217 | false |
mind1master/aiohttp | tests/test_web_exceptions.py | 1 | 5041 | import collections
import re
from unittest import mock
import pytest
from multidict import CIMultiDict
from aiohttp import signals, web
from aiohttp.protocol import HttpVersion11, RawRequestMessage
from aiohttp.web import Request
@pytest.fixture
def buf():
return bytearray()
@pytest.fixture
def request(buf):
method = 'GET'
path = '/'
headers = CIMultiDict()
transport = mock.Mock()
payload = mock.Mock()
reader = mock.Mock()
writer = mock.Mock()
writer.drain.return_value = ()
def append(data):
buf.extend(data)
writer.write.side_effect = append
app = mock.Mock()
app._debug = False
app.on_response_prepare = signals.Signal(app)
message = RawRequestMessage(method, path, HttpVersion11, headers, [],
False, False)
req = Request(app, message, payload,
transport, reader, writer)
return req
def test_all_http_exceptions_exported():
assert 'HTTPException' in web.__all__
for name in dir(web):
if name.startswith('_'):
continue
obj = getattr(web, name)
if isinstance(obj, type) and issubclass(obj, web.HTTPException):
assert name in web.__all__
@pytest.mark.run_loop
def test_HTTPOk(buf, request):
resp = web.HTTPOk()
yield from resp.prepare(request)
yield from resp.write_eof()
txt = buf.decode('utf8')
assert re.match(('HTTP/1.1 200 OK\r\n'
'Content-Type: text/plain; charset=utf-8\r\n'
'Content-Length: 7\r\n'
'Date: .+\r\n'
'Server: .+\r\n\r\n'
'200: OK'), txt)
def test_terminal_classes_has_status_code():
terminals = set()
for name in dir(web):
obj = getattr(web, name)
if isinstance(obj, type) and issubclass(obj, web.HTTPException):
terminals.add(obj)
dup = frozenset(terminals)
for cls1 in dup:
for cls2 in dup:
if cls1 in cls2.__bases__:
terminals.discard(cls1)
for cls in terminals:
assert cls.status_code is not None
codes = collections.Counter(cls.status_code for cls in terminals)
assert None not in codes
assert 1 == codes.most_common(1)[0][1]
@pytest.mark.run_loop
def test_HTTPFound(buf, request):
resp = web.HTTPFound(location='/redirect')
assert '/redirect' == resp.location
assert '/redirect' == resp.headers['location']
yield from resp.prepare(request)
yield from resp.write_eof()
txt = buf.decode('utf8')
assert re.match('HTTP/1.1 302 Found\r\n'
'Content-Type: text/plain; charset=utf-8\r\n'
'Content-Length: 10\r\n'
'Location: /redirect\r\n'
'Date: .+\r\n'
'Server: .+\r\n\r\n'
'302: Found', txt)
def test_HTTPFound_empty_location():
with pytest.raises(ValueError):
web.HTTPFound(location='')
with pytest.raises(ValueError):
web.HTTPFound(location=None)
@pytest.mark.run_loop
def test_HTTPMethodNotAllowed(buf, request):
resp = web.HTTPMethodNotAllowed('get', ['POST', 'PUT'])
assert 'GET' == resp.method
assert ['POST', 'PUT'] == resp.allowed_methods
assert 'POST,PUT' == resp.headers['allow']
yield from resp.prepare(request)
yield from resp.write_eof()
txt = buf.decode('utf8')
assert re.match('HTTP/1.1 405 Method Not Allowed\r\n'
'Content-Type: text/plain; charset=utf-8\r\n'
'Content-Length: 23\r\n'
'Allow: POST,PUT\r\n'
'Date: .+\r\n'
'Server: .+\r\n\r\n'
'405: Method Not Allowed', txt)
def test_override_body_with_text():
resp = web.HTTPNotFound(text="Page not found")
assert 404 == resp.status
assert "Page not found".encode('utf-8') == resp.body
assert "Page not found" == resp.text
assert "text/plain" == resp.content_type
assert "utf-8" == resp.charset
def test_override_body_with_binary():
txt = "<html><body>Page not found</body></html>"
resp = web.HTTPNotFound(body=txt.encode('utf-8'),
content_type="text/html")
assert 404 == resp.status
assert txt.encode('utf-8') == resp.body
assert txt == resp.text
assert "text/html" == resp.content_type
assert resp.charset is None
def test_default_body():
resp = web.HTTPOk()
assert b'200: OK' == resp.body
def test_empty_body_204():
resp = web.HTTPNoContent()
assert resp.body is None
def test_empty_body_205():
resp = web.HTTPNoContent()
assert resp.body is None
def test_empty_body_304():
resp = web.HTTPNoContent()
resp.body is None
def test_link_header_451(buf, request):
resp = web.HTTPUnavailableForLegalReasons(link='http://warning.or.kr/')
assert 'http://warning.or.kr/' == resp.link
assert '<http://warning.or.kr/>; rel="blocked-by"' == resp.headers['Link']
| apache-2.0 | 4,417,213,948,842,629,600 | 28.138728 | 78 | 0.596509 | false |
fengbohello/practice | python/project/flasky/p036_wtf_form/hello.py | 1 | 1189 | #!/bin/env python
from flask import Flask, render_template
from flask.ext.bootstrap import Bootstrap
from flask.ext.script import Manager
from flask.ext.moment import Moment
from datetime import datetime
from flask.ext.wtf import Form
from wtforms import StringField, SubmitField
from wtforms.validators import Required
class NameForm(Form):
name = StringField('What is your name ?', validators = [Required()])
submit = SubmitField('Submit')
app = Flask(__name__)
app.config['SECRET_KEY'] = 'hello world !'
b = Bootstrap(app)
m = Moment(app)
manager = Manager(app)
@app.route('/', methods = ['GET', 'POST'])
def index():
name = None
form = NameForm()
if form.validate_on_submit():
name = form.name.data
form.name.data = ''
return render_template('index.html', current_time = datetime.utcnow(), form = form, name = name)
@app.route('/user/<name>')
def user(name):
return render_template('user.html', name = name)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html')
@app.errorhandler(500)
def intenal_server_error(e):
return render_template('500.html')
if "__main__" == __name__:
manager.run()
| lgpl-3.0 | -8,779,289,926,343,582,000 | 23.770833 | 100 | 0.686291 | false |
araichev/kml2geojson | kml2geojson/main.py | 1 | 18553 | import os
import shutil
import xml.dom.minidom as md
import re
from pathlib import Path
import json
#: Atomic KML geometry types supported.
#: MultiGeometry is handled separately.
GEOTYPES = [
'Polygon',
'LineString',
'Point',
'Track',
'gx:Track',
]
#: Supported style types
STYLE_TYPES = [
'svg',
'leaflet',
]
SPACE = re.compile(r'\s+')
# ----------------
# Helper functions
# ----------------
def rm_paths(*paths):
"""
Delete the given file paths/directory paths, if they exists.
"""
for p in paths:
p = Path(p)
if p.exists():
if p.is_file():
p.unlink()
else:
shutil.rmtree(str(p))
def get(node, name):
"""
Given a KML Document Object Model (DOM) node, return a list of its sub-nodes that have the given tag name.
"""
return node.getElementsByTagName(name)
def get1(node, name):
"""
Return the first element of ``get(node, name)``, if it exists.
Otherwise return ``None``.
"""
s = get(node, name)
if s:
return s[0]
else:
return None
def attr(node, name):
"""
Return as a string the value of the given DOM node's attribute named by ``name``, if it exists.
Otherwise, return an empty string.
"""
return node.getAttribute(name)
def val(node):
"""
Normalize the given DOM node and return the value of its first child (the string content of the node) stripped of leading and trailing whitespace.
"""
try:
node.normalize()
return node.firstChild.wholeText.strip() # Handles CDATASection too
except AttributeError:
return ''
def valf(node):
"""
Cast ``val(node)`` as a float.
Return ``None`` if that does not work.
"""
try:
return float(val(node))
except ValueError:
return None
def numarray(a):
"""
Cast the given list into a list of floats.
"""
return [float(aa) for aa in a]
def coords1(s):
"""
Convert the given KML string containing one coordinate tuple into a list of floats.
EXAMPLE::
>>> coords1(' -112.2,36.0,2357 ')
[-112.2, 36.0, 2357.0]
"""
return numarray(re.sub(SPACE, '', s).split(','))
def coords(s):
"""
Convert the given KML string containing multiple coordinate tuples into a list of lists of floats.
EXAMPLE::
>>> coords('''
... -112.0,36.1,0
... -113.0,36.0,0
... ''')
[[-112.0, 36.1, 0.0], [-113.0, 36.0, 0.0]]
"""
s = s.split() #sub(TRIM_SPACE, '', v).split()
return [coords1(ss) for ss in s]
def gx_coords1(s):
"""
Convert the given KML string containing one gx coordinate tuple into a list of floats.
EXAMPLE::
>>> gx_coords1('-113.0 36.0 0')
[-113.0, 36.0, 0.0]
"""
return numarray(s.split(' '))
def gx_coords(node):
"""
Given a KML DOM node, grab its <gx:coord> and <gx:timestamp><when>subnodes, and convert them into a dictionary with the keys and values
- ``'coordinates'``: list of lists of float coordinates
- ``'times'``: list of timestamps corresponding to the coordinates
"""
els = get(node, 'gx:coord')
coordinates = []
times = []
coordinates = [gx_coords1(val(el)) for el in els]
time_els = get(node, 'when')
times = [val(t) for t in time_els]
return {
'coordinates': coordinates,
'times': times,
}
def disambiguate(names, mark='1'):
"""
Given a list of strings ``names``, return a new list of names where repeated names have been disambiguated by repeatedly appending the given mark.
EXAMPLE::
>>> disambiguate(['sing', 'song', 'sing', 'sing'])
['sing', 'song', 'sing1', 'sing11']
"""
names_seen = set()
new_names = []
for name in names:
new_name = name
while new_name in names_seen:
new_name += mark
new_names.append(new_name)
names_seen.add(new_name)
return new_names
def to_filename(s):
"""
Based on `django/utils/text.py <https://github.com/django/django/blob/master/django/utils/text.py>`_.
Return the given string converted to a string that can be used for a clean filename.
Specifically, leading and trailing spaces are removed; other spaces are converted to underscores, and anything that is not a unicode alphanumeric, dash, underscore, or dot, is removed.
EXAMPLE::
>>> to_filename("% A d\sbla'{-+\)(ç? ")
'A_dsbla-ç'
"""
s = re.sub(r'(?u)[^-\w. ]', '', s)
s = s.strip().replace(' ', '_')
return s
# ---------------
# Main functions
# ---------------
def build_rgb_and_opacity(s):
"""
Given a KML color string, return an equivalent RGB hex color string and an opacity float rounded to 2 decimal places.
EXAMPLE::
>>> build_rgb_and_opacity('ee001122')
('#221100', 0.93)
"""
# Set defaults
color = '000000'
opacity = 1
if s.startswith('#'):
s = s[1:]
if len(s) == 8:
color = s[6:8] + s[4:6] + s[2:4]
opacity = round(int(s[0:2], 16)/256, 2)
elif len(s) == 6:
color = s[4:6] + s[2:4] + s[0:2]
elif len(s) == 3:
color = s[::-1]
return '#' + color, opacity
def build_svg_style(node):
"""
Given a DOM node, grab its top-level Style nodes, convert every one into a SVG style dictionary, put them in a master dictionary of the form
#style ID -> SVG style dictionary,
and return the result.
The possible keys and values of each SVG style dictionary, the style options, are
- ``iconUrl``: URL of icon
- ``stroke``: stroke color; RGB hex string
- ``stroke-opacity``: stroke opacity
- ``stroke-width``: stroke width in pixels
- ``fill``: fill color; RGB hex string
- ``fill-opacity``: fill opacity
"""
d = {}
for item in get(node, 'Style'):
style_id = '#' + attr(item, 'id')
# Create style properties
props = {}
for x in get(item, 'PolyStyle'):
color = val(get1(x, 'color'))
if color:
rgb, opacity = build_rgb_and_opacity(color)
props['fill'] = rgb
props['fill-opacity'] = opacity
# Set default border style
props['stroke'] = rgb
props['stroke-opacity'] = opacity
props['stroke-width'] = 1
fill = valf(get1(x, 'fill'))
if fill == 0:
props['fill-opacity'] = fill
elif fill == 1 and 'fill-opacity' not in props:
props['fill-opacity'] = fill
outline = valf(get1(x, 'outline'))
if outline == 0:
props['stroke-opacity'] = outline
elif outline == 1 and 'stroke-opacity' not in props:
props['stroke-opacity'] = outline
for x in get(item, 'LineStyle'):
color = val(get1(x, 'color'))
if color:
rgb, opacity = build_rgb_and_opacity(color)
props['stroke'] = rgb
props['stroke-opacity'] = opacity
width = valf(get1(x, 'width'))
if width is not None:
props['stroke-width'] = width
for x in get(item, 'IconStyle'):
icon = get1(x, 'Icon')
if not icon:
continue
# Clear previous style properties
props = {}
props['iconUrl'] = val(get1(icon, 'href'))
d[style_id] = props
return d
def build_leaflet_style(node):
"""
Given a DOM node, grab its top-level Style nodes, convert every one into a Leaflet style dictionary, put them in a master dictionary of the form
#style ID -> Leaflet style dictionary,
and return the result.
The the possible keys and values of each Leaflet style dictionary, the style options, are
- ``iconUrl``: URL of icon
- ``color``: stroke color; RGB hex string
- ``opacity``: stroke opacity
- ``weight``: stroke width in pixels
- ``fillColor``: fill color; RGB hex string
- ``fillOpacity``: fill opacity
"""
d = {}
for item in get(node, 'Style'):
style_id = '#' + attr(item, 'id')
# Create style properties
props = {}
for x in get(item, 'PolyStyle'):
color = val(get1(x, 'color'))
if color:
rgb, opacity = build_rgb_and_opacity(color)
props['fillColor'] = rgb
props['fillOpacity'] = opacity
# Set default border style
props['color'] = rgb
props['opacity'] = opacity
props['weight'] = 1
fill = valf(get1(x, 'fill'))
if fill == 0:
props['fillOpacity'] = fill
elif fill == 1 and 'fillOpacity' not in props:
props['fillOpacity'] = fill
outline = valf(get1(x, 'outline'))
if outline == 0:
props['opacity'] = outline
elif outline == 1 and 'opacity' not in props:
props['opacity'] = outline
for x in get(item, 'LineStyle'):
color = val(get1(x, 'color'))
if color:
rgb, opacity = build_rgb_and_opacity(color)
props['color'] = rgb
props['opacity'] = opacity
width = valf(get1(x, 'width'))
if width is not None:
props['weight'] = width
for x in get(item, 'IconStyle'):
icon = get1(x, 'Icon')
if not icon:
continue
# Clear previous style properties
props = {}
props['iconUrl'] = val(get1(icon, 'href'))
d[style_id] = props
return d
def build_geometry(node):
"""
Return a (decoded) GeoJSON geometry dictionary corresponding to the given KML node.
"""
geoms = []
times = []
if get1(node, 'MultiGeometry'):
return build_geometry(get1(node, 'MultiGeometry'))
if get1(node, 'MultiTrack'):
return build_geometry(get1(node, 'MultiTrack'))
if get1(node, 'gx:MultiTrack'):
return build_geometry(get1(node, 'gx:MultiTrack'))
for geotype in GEOTYPES:
geonodes = get(node, geotype)
if not geonodes:
continue
for geonode in geonodes:
if geotype == 'Point':
geoms.append({
'type': 'Point',
'coordinates': coords1(val(get1(
geonode, 'coordinates')))
})
elif geotype == 'LineString':
geoms.append({
'type': 'LineString',
'coordinates': coords(val(get1(
geonode, 'coordinates')))
})
elif geotype == 'Polygon':
rings = get(geonode, 'LinearRing')
coordinates = [coords(val(get1(ring, 'coordinates')))
for ring in rings]
geoms.append({
'type': 'Polygon',
'coordinates': coordinates,
})
elif geotype in ['Track', 'gx:Track']:
track = gx_coords(geonode)
geoms.append({
'type': 'LineString',
'coordinates': track['coordinates'],
})
if track['times']:
times.append(track['times'])
return {'geoms': geoms, 'times': times}
def build_feature(node):
"""
Build and return a (decoded) GeoJSON Feature corresponding to this KML node (typically a KML Placemark).
Return ``None`` if no Feature can be built.
"""
geoms_and_times = build_geometry(node)
if not geoms_and_times['geoms']:
return None
props = {}
for x in get(node, 'name')[:1]:
name = val(x)
if name:
props['name'] = val(x)
for x in get(node, 'description')[:1]:
desc = val(x)
if desc:
props['description'] = desc
for x in get(node, 'styleUrl')[:1]:
style_url = val(x)
if style_url[0] != '#':
style_url = '#' + style_url
props['styleUrl'] = style_url
for x in get(node, 'PolyStyle')[:1]:
color = val(get1(x, 'color'))
if color:
rgb, opacity = build_rgb_and_opacity(color)
props['fill'] = rgb
props['fill-opacity'] = opacity
# Set default border style
props['stroke'] = rgb
props['stroke-opacity'] = opacity
props['stroke-width'] = 1
fill = valf(get1(x, 'fill'))
if fill == 0:
props['fill-opacity'] = fill
elif fill == 1 and 'fill-opacity' not in props:
props['fill-opacity'] = fill
outline = valf(get1(x, 'outline'))
if outline == 0:
props['stroke-opacity'] = outline
elif outline == 1 and 'stroke-opacity' not in props:
props['stroke-opacity'] = outline
for x in get(node, 'LineStyle')[:1]:
color = val(get1(x, 'color'))
if color:
rgb, opacity = build_rgb_and_opacity(color)
props['stroke'] = rgb
props['stroke-opacity'] = opacity
width = valf(get1(x, 'width'))
if width:
props['stroke-width'] = width
for x in get(node, 'ExtendedData')[:1]:
datas = get(x, 'Data')
for data in datas:
props[attr(data, 'name')] = val(get1(data, 'value'))
simple_datas = get(x, 'SimpleData')
for simple_data in simple_datas:
props[attr(simple_data, 'name')] = val(simple_data)
for x in get(node, 'TimeSpan')[:1]:
begin = val(get1(x, 'begin'))
end = val(get1(x, 'end'))
props['timeSpan'] = {'begin': begin, 'end': end}
if geoms_and_times['times']:
times = geoms_and_times['times']
if len(times) == 1:
props['times'] = times[0]
else:
props['times'] = times
feature = {
'type': 'Feature',
'properties': props,
}
geoms = geoms_and_times['geoms']
if len(geoms) == 1:
feature['geometry'] = geoms[0]
else:
feature['geometry'] = {
'type': 'GeometryCollection',
'geometries': geoms,
}
if attr(node, 'id'):
feature['id'] = attr(node, 'id')
return feature
def build_feature_collection(node, name=None):
"""
Build and return a (decoded) GeoJSON FeatureCollection corresponding to this KML DOM node (typically a KML Folder).
If a name is given, store it in the FeatureCollection's ``'name'`` attribute.
"""
# Initialize
geojson = {
'type': 'FeatureCollection',
'features': [],
}
# Build features
for placemark in get(node, 'Placemark'):
feature = build_feature(placemark)
if feature is not None:
geojson['features'].append(feature)
# Give the collection a name if requested
if name is not None:
geojson['name'] = name
return geojson
def build_layers(node, disambiguate_names=True):
"""
Return a list of GeoJSON FeatureCollections, one for each folder in the given KML DOM node that contains geodata.
Name each FeatureCollection (via a ``'name'`` attribute) according to its corresponding KML folder name.
If ``disambiguate_names == True``, then disambiguate repeated layer names via :func:`disambiguate`.
Warning: this can produce layers with the same geodata in case the KML node has nested folders with geodata.
"""
layers = []
names = []
for i, folder in enumerate(get(node, 'Folder')):
name = val(get1(folder, 'name'))
geojson = build_feature_collection(folder, name)
if geojson['features']:
layers.append(geojson)
names.append(name)
if not layers:
# No folders, so use the root node
name = val(get1(node, 'name'))
geojson = build_feature_collection(node, name)
if geojson['features']:
layers.append(geojson)
names.append(name)
if disambiguate_names:
new_names = disambiguate(names)
new_layers = []
for i, layer in enumerate(layers):
layer['name'] = new_names[i]
new_layers.append(layer)
layers = new_layers
return layers
def convert(kml_path, output_dir, separate_folders=False,
style_type=None, style_filename='style.json'):
"""
Given a path to a KML file, convert it to one or several GeoJSON FeatureCollection files and save the result(s) to the given output directory.
If not ``separate_folders`` (the default), then create one GeoJSON file.
Otherwise, create several GeoJSON files, one for each folder in the KML file that contains geodata or that has a descendant node that contains geodata.
Warning: this can produce GeoJSON files with the same geodata in case the KML file has nested folders with geodata.
If a ``style_type`` is given, then also build a JSON style file of the given style type and save it to the output directory under the name given by ``style_filename``.
"""
# Create absolute paths
kml_path = Path(kml_path).resolve()
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
output_dir = output_dir.resolve()
# Parse KML
with kml_path.open(encoding='utf-8', errors='ignore') as src:
kml_str = src.read()
root = md.parseString(kml_str)
# Build GeoJSON layers
if separate_folders:
layers = build_layers(root)
else:
layers = [build_feature_collection(root, name=kml_path.stem)]
# Create filenames for layers
filenames = disambiguate(
[to_filename(layer['name'])
for layer in layers])
filenames = [name + '.geojson' for name in filenames]
# Write layers to files
for i in range(len(layers)):
path = output_dir/filenames[i]
with path.open('w') as tgt:
json.dump(layers[i], tgt)
# Build and export style file if desired
if style_type is not None:
if style_type not in STYLE_TYPES:
raise ValueError('style type must be one of {!s}'.format(
STYLE_TYPES))
builder_name = 'build_{!s}_style'.format(style_type)
style_dict = globals()[builder_name](root)
path = output_dir/style_filename
with path.open('w') as tgt:
json.dump(style_dict, tgt)
| mit | 8,119,090,203,167,894,000 | 30.495756 | 188 | 0.554579 | false |
sanja7s/SR_Twitter | src_taxonomy/extract_user_score_for_top_topics.py | 1 | 3078 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
extract top taxons for each user:
movies
music
sex
humor
school
'''
import codecs
from collections import defaultdict, OrderedDict
import json
import glob, os
f_in = "tweets_taxonomy_clean.JSON"
f_in_user_ids = "user_IDs.dat"
IN_DIR = "../../../DATA/taxonomy_stats/"
OUT_DIR = "user_taxons/"
f_out_topics = "user_score_for_top_topics.tab"
##################################################
# read in a map for the twitter username --> id
##################################################
def read_user_IDs():
user_ids = defaultdict(str)
with codecs.open(f_in_user_ids,'r', encoding='utf8') as f:
for line in f:
line = line.split()
user_id = line[0]
user = line[1]
user_ids[user] = user_id
return user_ids
###############################################################################
"""
go through taxon file and extract users scores for top topics
movies
music
sex
humor
school
"""
###############################################################################
def extract_usr_topics_score():
os.chdir(IN_DIR)
# resulting dictionary in which the counts and tfidf relevance are collected
res = defaultdict(int)
# holds all the user ids
user_ids = read_user_IDs()
output_file = codecs.open(OUT_DIR+f_out_topics, 'w', encoding='utf8')
cnt = 0
with codecs.open(f_in,'r', encoding='utf8') as input_file:
for line7s in input_file:
try:
line = json.loads(line7s)
taxonomy_all = line["taxonomy"]
user_name = line["_id"]
user_id = user_ids[user_name]
taxonomy = taxonomy_all["taxonomy"]
docSentiment = taxonomy_all["docSentiment"]
# the user we analyze
user_name = line["_id"]
user_id = user_ids[user_name]
res[user_id] = defaultdict(int)
# procedure for extracting the taxons
for el in taxonomy:
try:
if el["confident"] == "no":
continue
except: KeyError
taxonomy_tree = el["label"]
taxonomy_tree = taxonomy_tree.split("/")
taxonomy_tree.pop(0)
levels = len(taxonomy_tree)
score = float(el["score"])
if 'music' in taxonomy_tree:
res[user_id]['music'] += score
elif 'movies' in taxonomy_tree:
res[user_id]['movies'] += score
elif 'sex' in taxonomy_tree:
res[user_id]['sex'] += score
elif 'humor' in taxonomy_tree:
res[user_id]['humor'] += score
elif 'school' in taxonomy_tree:
res[user_id]['school'] += score
output_file.write(str(user_id) + '\t' + str(res[user_id]['music']) + \
'\t' + str(res[user_id]['movies']) + \
'\t' + str(res[user_id]['sex']) + \
'\t' + str(res[user_id]['humor']) + \
'\t' + str(res[user_id]['school']) + '\n')
cnt += 1
except KeyError:
#print line7s
# we don't print since it is tested, there some 10% users for whom
# the taxonomy was not successfuly downloaded and they would be listed here
continue
print "Topics saved for %d users " % (cnt)
###############################################################################
extract_usr_topics_score() | mit | -2,001,876,251,306,540,500 | 27.247706 | 79 | 0.558804 | false |
karmix/blivet | blivet/devicelibs/raid.py | 2 | 23633 | #
# raid.py
# representation of RAID levels
#
# Copyright (C) 2009 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Dave Lehman <[email protected]>
# Anne Mulhern <[email protected]>
#
import abc
from six import add_metaclass
from ..errors import RaidError
from ..size import Size
def div_up(a,b):
"""Rounds up integer division. For example, div_up(3, 2) is 2.
:param int a: the dividend
:param int b: the divisor
"""
return (a + (b - 1))//b
@add_metaclass(abc.ABCMeta)
class RAIDLevel(object):
"""An abstract class which is the parent of all classes which represent
a RAID level.
It ensures that RAIDLevel objects will really be singleton objects
by overriding copy methods.
"""
name = abc.abstractproperty(doc="The canonical name for this level")
names = abc.abstractproperty(doc="List of recognized names for this level.")
min_members = abc.abstractproperty(doc=
"The minimum number of members required to make a fully functioning array.")
@abc.abstractmethod
def has_redundancy(self):
""" Whether this RAID level incorporates inherent redundancy.
Note that for some RAID levels, the notion of redundancy is
meaningless.
:rtype: boolean
:returns: True if this RAID level has inherent redundancy
"""
raise NotImplementedError()
is_uniform = abc.abstractproperty(doc=
"Whether data is uniformly distributed across all devices.")
def __str__(self):
return self.name
def __copy__(self):
return self
def __deepcopy__(self, memo):
# pylint: disable=unused-argument
return self
@add_metaclass(abc.ABCMeta)
class RAIDn(RAIDLevel):
"""An abstract class which is the parent of classes which represent a
numeric RAID level. A better word would be classification, since 'level'
implies an ordering, but level is the canonical word.
The abstract properties of the class are:
- level: A string containing the number that designates this level
- nick: A single nickname for this level, may be None
All methods in this class fall into these categories:
1) May not be overrridden in any subclass.
2) Are private abstract methods.
3) Are special Python methods, e.g., __str__
Note that each subclass in this file is instantiated immediately after
it is defined and using the same name, effectively yielding a
singleton object of the class.
"""
# ABSTRACT PROPERTIES
level = abc.abstractproperty(doc="A code representing the level")
nick = abc.abstractproperty(doc="A nickname for this level")
# PROPERTIES
is_uniform = property(lambda s: True)
number = property(lambda s : int(s.level),
doc="A numeric code for this level")
name = property(lambda s : "raid" + s.level,
doc="The canonical name for this level")
alt_synth_names = property(lambda s : ["RAID" + s.level, s.level, s.number],
doc="names that can be synthesized from level but are not name")
names = property(lambda s :
[n for n in [s.name] + [s.nick] + s.alt_synth_names if n is not None],
doc="all valid names for this level")
# METHODS
def get_max_spares(self, member_count):
"""The maximum number of spares for this level.
:param int member_count: the number of members belonging to the array
:rtype: int
Raiess a RaidError if member_count is fewer than the minimum
number of members required for this level.
"""
if member_count < self.min_members:
raise RaidError("%s requires at least %d disks" % (self.name, self.min_members))
return self._get_max_spares(member_count)
@abc.abstractmethod
def _get_max_spares(self, member_count):
"""Helper function; not to be called directly."""
raise NotImplementedError()
def get_base_member_size(self, size, member_count):
"""The required size for each member of the array for
storing only data.
:param size: size of data to be stored
:type size: :class:`~.size.Size`
:param int member_count: number of members in this array
:rtype: :class:`~.size.Size`
Raises a RaidError if member_count is fewer than the minimum
number of members required for this array or if size is less
than 0.
"""
if member_count < self.min_members:
raise RaidError("%s requires at least %d disks" % (self.name, self.min_members))
if size < 0:
raise RaidError("size is a negative number")
return self._get_base_member_size(size, member_count)
@abc.abstractmethod
def _get_base_member_size(self, size, member_count):
"""Helper function; not to be called directly."""
raise NotImplementedError()
def get_net_array_size(self, member_count, smallest_member_size):
"""Return the space, essentially the number of bits available
for storage. This value is generally a function of the
smallest member size. If the smallest member size represents
the amount of data that can be stored on the smallest member,
then the result will represent the amount of data that can be
stored on the array. If the smallest member size represents
both data and metadata, then the result will represent the
available space in the array for both data and metadata.
:param int member_count: the number of members in the array
:param smallest_member_size: the size of the smallest
member of this array
:type smallest_member_size: :class:`~.size.Size`
:returns: the array size
:rtype: :class:`~.size.Size`
Raises a RaidError if member_count is fewer than the minimum
number of members required for this array or if size is less
than 0.
"""
if member_count < self.min_members:
raise RaidError("%s requires at least %d disks" % (self.name, self.min_members))
if smallest_member_size < Size(0):
raise RaidError("size is a negative number")
return self._get_net_array_size(member_count, smallest_member_size)
@abc.abstractmethod
def _get_net_array_size(self, member_count, smallest_member_size):
"""Helper function; not to be called directly."""
raise NotImplementedError()
@abc.abstractmethod
def _trim(self, size, chunk_size):
"""Helper function; not to be called directly.
Trims size to the largest size that the level allows based on the
chunk_size.
:param size: the size of the array
:type size: :class:`~.size.Size`
:param chunk_size: the smallest unit of size this array allows
:type chunk_size: :class:`~.size.Size`
:rtype: :class:`~.size.Size`
"""
raise NotImplementedError()
@abc.abstractmethod
def _pad(self, size, chunk_size):
"""Helper function; not to be called directly.
Pads size to the smallest size greater than size that is in units
of chunk_size.
:param size: the size of the array
:type size: :class:`~.size.Size`
:param chunk_size: the smallest unit of size this array allows
:type chunk_size: :class:`~.size.Size`
:rtype: :class:`~.size.Size`
"""
raise NotImplementedError()
def get_recommended_stride(self, member_count):
"""Return a recommended stride size in blocks.
Returns None if there is no recommended size.
:param int member_count: the number of members in the array
:rtype: int or None
Raises a RaidError if member_count is fewer than the
minimum number of members required for this level
"""
if member_count < self.min_members:
raise RaidError("%s requires at least %d disks" % (self.name, self.min_members))
return self._get_recommended_stride(member_count)
@abc.abstractmethod
def _get_recommended_stride(self, member_count):
"""Helper function; not to be called directly."""
raise NotImplementedError()
def get_size(self, member_sizes, num_members=None, chunk_size=None, superblock_size_func=None):
"""Estimate the amount of data that can be stored on this array.
:param member_size: a list of the sizes of members of this array
:type member_size: list of :class:`~.size.Size`
:param int num_members: the number of members in the array
:param chunk_size: the smallest unit of size read or written
:type chunk_size: :class:`~.size.Size`
:param superblock_size_func: a function that estimates the
superblock size for this array
:type superblock_size_func: a function from :class:`~.size.Size` to
:class:`~.size.Size`
:returns: an estimate of the amount of data that can be stored on
this array
:rtype: :class:`~.size.Size`
Note that the number of members in the array may not be the same
as the length of member_sizes if the array is still
under construction.
"""
if not member_sizes:
return Size(0)
if num_members is None:
num_members = len(member_sizes)
if chunk_size is None or chunk_size == Size(0):
raise RaidError("chunk_size parameter value %s is not acceptable")
if superblock_size_func is None:
raise RaidError("superblock_size_func value of None is not acceptable")
min_size = min(member_sizes)
superblock_size = superblock_size_func(min_size)
min_data_size = self._trim(min_size - superblock_size, chunk_size)
return self.get_net_array_size(num_members, min_data_size)
def get_space(self, size, num_members, chunk_size=None, superblock_size_func=None):
"""Estimate the amount of memory required by this array, including
memory allocated for metadata.
:param size: the amount of data on this array
:type size: :class:`~.size.Size`
:param int num_members: the number of members in the array
:param chunk_size: the smallest unit of size read or written
:type chunk_size: :class:`~.size.Size`
:param superblock_size_func: a function that estimates the
superblock size for this array
:type superblock_size_func: a function from :class:`~.size.Size` to
:class:`~.size.Size`
:returns: an estimate of the memory required, including metadata
:rtype: :class:`~.size.Size`
"""
if superblock_size_func is None:
raise RaidError("superblock_size_func value of None is not acceptable")
size_per_member = self.get_base_member_size(size, num_members)
size_per_member += superblock_size_func(size)
if chunk_size is not None:
size_per_member = self._pad(size_per_member, chunk_size)
return size_per_member * num_members
class RAIDLevels(object):
"""A class which keeps track of registered RAID levels. This class
may be extended, overriding the isRaid method to include any
additional properties that a client of this package may require
for its RAID levels.
"""
def __init__(self, levels=None):
"""Add the specified standard levels to the levels in this object.
:param levels: the levels to be added to this object
:type levels: list of valid RAID level descriptors
If levels is True, add all standard levels. Else, levels
must be a list of valid level descriptors of standard levels.
Duplicate descriptors are ignored.
"""
levels = levels or []
self._raid_levels = set()
for level in levels:
matches = [l for l in ALL_LEVELS if level in l.names]
if len(matches) != 1:
raise RaidError("invalid standard RAID level descriptor %s" % level)
else:
self.addRaidLevel(matches[0])
@classmethod
def isRaidLevel(cls, level):
"""Return False if level does not satisfy minimum requirements for
a RAID level, otherwise return True.
:param object level: an object representing a RAID level
There must be at least one element in the names list, or the level
will be impossible to look up by any string.
The name property must be defined; it should be one of the
elements in the names list.
All RAID objects that extend RAIDlevel are guaranteed to pass these
minimum requirements.
This method should not be overridden in any subclass so that it
is so restrictive that a RAIDlevel object does not satisfy it.
"""
return len(level.names) > 0 and level.name in level.names
def raidLevel(self, descriptor):
"""Return RAID object corresponding to descriptor.
:param object descriptor: a RAID level descriptor
Note that descriptor may be any object that identifies a
RAID level, including the RAID object itself.
Raises a RaidError if no RAID object can be found for this
descriptor.
"""
for level in self._raid_levels:
if descriptor in level.names or descriptor is level:
return level
raise RaidError("invalid RAID level descriptor %s" % descriptor)
def addRaidLevel(self, level):
"""Adds level to levels if it is not already there.
:param object level: an object representing a RAID level
Raises a RaidError if level is not valid.
Does not allow duplicate level objects.
"""
if not self.isRaidLevel(level):
raise RaidError("level is not a valid RAID level")
self._raid_levels.add(level)
def __iter__(self):
return iter(self._raid_levels)
ALL_LEVELS = RAIDLevels()
class RAID0(RAIDn):
level = property(lambda s: "0")
min_members = property(lambda s: 2)
nick = property(lambda s: "stripe")
def has_redundancy(self):
return False
def _get_max_spares(self, member_count):
return 0
def _get_base_member_size(self, size, member_count):
return div_up(size, member_count)
def _get_net_array_size(self, member_count, smallest_member_size):
return smallest_member_size * member_count
def _trim(self, size, chunk_size):
return size - size % chunk_size
def _pad(self, size, chunk_size):
return size + (chunk_size - (size % chunk_size)) % chunk_size
def _get_recommended_stride(self, member_count):
return member_count * 16
RAID0 = RAID0()
ALL_LEVELS.addRaidLevel(RAID0)
class RAID1(RAIDn):
level = property(lambda s: "1")
min_members = property(lambda s: 2)
nick = property(lambda s: "mirror")
def has_redundancy(self):
return True
def _get_max_spares(self, member_count):
return member_count - self.min_members
def _get_base_member_size(self, size, member_count):
return size
def _get_net_array_size(self, member_count, smallest_member_size):
return smallest_member_size
def _trim(self, size, chunk_size):
return size
def _pad(self, size, chunk_size):
return size
def _get_recommended_stride(self, member_count):
return None
RAID1 = RAID1()
ALL_LEVELS.addRaidLevel(RAID1)
class RAID4(RAIDn):
level = property(lambda s: "4")
min_members = property(lambda s: 3)
nick = property(lambda s: None)
def has_redundancy(self):
return True
def _get_max_spares(self, member_count):
return member_count - self.min_members
def _get_base_member_size(self, size, member_count):
return div_up(size, member_count - 1)
def _get_net_array_size(self, member_count, smallest_member_size):
return smallest_member_size * (member_count - 1)
def _trim(self, size, chunk_size):
return size - size % chunk_size
def _pad(self, size, chunk_size):
return size + (chunk_size - (size % chunk_size)) % chunk_size
def _get_recommended_stride(self, member_count):
return (member_count - 1) * 16
RAID4 = RAID4()
ALL_LEVELS.addRaidLevel(RAID4)
class RAID5(RAIDn):
level = property(lambda s: "5")
min_members = property(lambda s: 3)
nick = property(lambda s: None)
def has_redundancy(self):
return True
def _get_max_spares(self, member_count):
return member_count - self.min_members
def _get_base_member_size(self, size, member_count):
return div_up(size, (member_count - 1))
def _get_net_array_size(self, member_count, smallest_member_size):
return smallest_member_size * (member_count - 1)
def _trim(self, size, chunk_size):
return size - size % chunk_size
def _pad(self, size, chunk_size):
return size + (chunk_size - (size % chunk_size)) % chunk_size
def _get_recommended_stride(self, member_count):
return (member_count - 1) * 16
RAID5 = RAID5()
ALL_LEVELS.addRaidLevel(RAID5)
class RAID6(RAIDn):
level = property(lambda s: "6")
min_members = property(lambda s: 4)
nick = property(lambda s: None)
def has_redundancy(self):
return True
def _get_max_spares(self, member_count):
return member_count - self.min_members
def _get_base_member_size(self, size, member_count):
return div_up(size, member_count - 2)
def _get_net_array_size(self, member_count, smallest_member_size):
return smallest_member_size * (member_count - 2)
def _trim(self, size, chunk_size):
return size - size % chunk_size
def _pad(self, size, chunk_size):
return size + (chunk_size - (size % chunk_size)) % chunk_size
def _get_recommended_stride(self, member_count):
return None
RAID6 = RAID6()
ALL_LEVELS.addRaidLevel(RAID6)
class RAID10(RAIDn):
level = property(lambda s: "10")
min_members = property(lambda s: 4)
nick = property(lambda s: None)
def has_redundancy(self):
return True
def _get_max_spares(self, member_count):
return member_count - self.min_members
def _get_base_member_size(self, size, member_count):
return div_up(size, (member_count // 2))
def _get_net_array_size(self, member_count, smallest_member_size):
return smallest_member_size * (member_count // 2)
def _trim(self, size, chunk_size):
return size
def _pad(self, size, chunk_size):
return size + (chunk_size - (size % chunk_size)) % chunk_size
def _get_recommended_stride(self, member_count):
return None
RAID10 = RAID10()
ALL_LEVELS.addRaidLevel(RAID10)
class Container(RAIDLevel):
name = "container"
names = [name]
min_members = 1
is_uniform = property(lambda s: False)
def has_redundancy(self):
raise RaidError("redundancy is not a concept that applies to containers")
def get_max_spares(self, member_count):
# pylint: disable=unused-argument
raise RaidError("get_max_spares is not defined for level container")
def get_space(self, size, num_members, chunk_size=None, superblock_size_func=None):
# pylint: disable=unused-argument
return size
def get_recommended_stride(self, member_count):
# pylint: disable=unused-argument
raise RaidError("get_recommended_stride is not defined for level container")
def get_size(self, member_sizes, num_members=None, chunk_size=None, superblock_size_func=None):
# pylint: disable=unused-argument
return sum(member_sizes, Size(0))
Container = Container()
ALL_LEVELS.addRaidLevel(Container)
class ErsatzRAID(RAIDLevel):
""" A superclass for a raid level which is not really a raid level at
all, just a bunch of block devices of possibly differing sizes
thrown together. This concept has different names depending on where
it crops up. btrfs's name is single, lvm's is linear. Consequently,
this abstract class implements all the functionality, but there are
distinct subclasses which have different names.
"""
min_members = 1
is_uniform = property(lambda s: False)
def has_redundancy(self):
return False
def get_max_spares(self, member_count):
return member_count - self.min_members
def get_space(self, size, num_members, chunk_size=None, superblock_size_func=None):
# pylint: disable=unused-argument
if superblock_size_func is None:
raise RaidError("superblock_size_func value of None is not acceptable")
return size + num_members * superblock_size_func(size)
def get_recommended_stride(self, member_count):
# pylint: disable=unused-argument
return None
def get_size(self, member_sizes, num_members=None, chunk_size=None, superblock_size_func=None):
# pylint: disable=unused-argument
if not member_sizes:
return Size(0)
if superblock_size_func is None:
raise RaidError("superblock_size_func value of None is not acceptable")
total_space = sum(member_sizes, Size(0))
superblock_size = superblock_size_func(total_space)
return total_space - len(member_sizes) * superblock_size
class Linear(ErsatzRAID):
""" subclass with canonical lvm name """
name = 'linear'
names = [name]
Linear = Linear()
ALL_LEVELS.addRaidLevel(Linear)
class Single(ErsatzRAID):
""" subclass with canonical btrfs name. """
name = 'single'
names = [name]
Single = Single()
ALL_LEVELS.addRaidLevel(Single)
class Dup(RAIDLevel):
""" A RAID level which expresses one way btrfs metadata may be distributed.
For this RAID level, duplication occurs within a single block device.
"""
name = 'dup'
names = [name]
min_members = 1
is_uniform = property(lambda s: False)
def has_redundancy(self):
return True
Dup = Dup()
ALL_LEVELS.addRaidLevel(Dup)
def getRaidLevel(descriptor):
""" Convenience function to return a RAID level for the descriptor.
:param object descriptor: a RAID level descriptor
:rtype: RAIDLevel
:returns: The RAIDLevel object for this descriptor
Note that descriptor may be any object that identifies a
RAID level, including the RAID object itself.
Raises a RaidError is there is no RAID object for the descriptor.
"""
return ALL_LEVELS.raidLevel(descriptor)
| gpl-2.0 | -10,154,364,625,650,508 | 33.805596 | 99 | 0.643676 | false |
amyshi188/osf.io | api/base/utils.py | 4 | 5561 | # -*- coding: utf-8 -*-
from modularodm import Q
from modularodm.exceptions import NoResultsFound
from rest_framework.exceptions import NotFound
from rest_framework.reverse import reverse
import furl
from website import util as website_util # noqa
from website import settings as website_settings
from framework.auth import Auth, User
from api.base.authentication.drf import get_session_from_cookie
from api.base.exceptions import Gone
from framework.auth.oauth_scopes import ComposedScopes, normalize_scopes
from framework.auth.cas import CasResponse
# These values are copied from rest_framework.fields.BooleanField
# BooleanField cannot be imported here without raising an
# ImproperlyConfigured error
TRUTHY = set(('t', 'T', 'true', 'True', 'TRUE', '1', 1, True))
FALSY = set(('f', 'F', 'false', 'False', 'FALSE', '0', 0, 0.0, False))
UPDATE_METHODS = ['PUT', 'PATCH']
def decompose_field(field):
from api.base.serializers import (
HideIfWithdrawal, HideIfRegistration,
HideIfDisabled, AllowMissing
)
WRAPPER_FIELDS = (HideIfWithdrawal, HideIfRegistration, HideIfDisabled, AllowMissing)
while isinstance(field, WRAPPER_FIELDS):
try:
field = getattr(field, 'field')
except AttributeError:
break
return field
def is_bulk_request(request):
"""
Returns True if bulk request. Can be called as early as the parser.
"""
content_type = request.content_type
return 'ext=bulk' in content_type
def is_truthy(value):
return value in TRUTHY
def is_falsy(value):
return value in FALSY
def get_user_auth(request):
"""Given a Django request object, return an ``Auth`` object with the
authenticated user attached to it.
"""
user = request.user
private_key = request.query_params.get('view_only', None)
if user.is_anonymous():
auth = Auth(None, private_key=private_key)
else:
auth = Auth(user, private_key=private_key)
return auth
def absolute_reverse(view_name, query_kwargs=None, args=None, kwargs=None):
"""Like django's `reverse`, except returns an absolute URL. Also add query parameters."""
relative_url = reverse(view_name, kwargs=kwargs)
url = website_util.api_v2_url(relative_url, params=query_kwargs, base_prefix='')
return url
def get_object_or_error(model_cls, query_or_pk, display_name=None, **kwargs):
if isinstance(query_or_pk, basestring):
obj = model_cls.load(query_or_pk)
if obj is None:
raise NotFound
else:
try:
obj = model_cls.find_one(query_or_pk, **kwargs)
except NoResultsFound:
raise NotFound
# For objects that have been disabled (is_active is False), return a 410.
# The User model is an exception because we still want to allow
# users who are unconfirmed or unregistered, but not users who have been
# disabled.
if model_cls is User and obj.is_disabled:
raise Gone(detail='The requested user is no longer available.',
meta={'full_name': obj.fullname, 'family_name': obj.family_name, 'given_name': obj.given_name,
'middle_names': obj.middle_names, 'profile_image': obj.profile_image_url()})
elif model_cls is not User and not getattr(obj, 'is_active', True) or getattr(obj, 'is_deleted', False):
if display_name is None:
raise Gone
else:
raise Gone(detail='The requested {name} is no longer available.'.format(name=display_name))
return obj
def waterbutler_url_for(request_type, provider, path, node_id, token, obj_args=None, **query):
"""Reverse URL lookup for WaterButler routes
:param str request_type: data or metadata
:param str provider: The name of the requested provider
:param str path: The path of the requested file or folder
:param str node_id: The id of the node being accessed
:param str token: The cookie to be used or None
:param dict **query: Addition query parameters to be appended
"""
url = furl.furl(website_settings.WATERBUTLER_URL)
url.path.segments.append(request_type)
url.args.update({
'path': path,
'nid': node_id,
'provider': provider,
})
if token is not None:
url.args['cookie'] = token
if 'view_only' in obj_args:
url.args['view_only'] = obj_args['view_only']
url.args.update(query)
return url.url
def default_node_list_query():
return (
Q('is_deleted', 'ne', True) &
Q('is_collection', 'ne', True) &
Q('is_registration', 'ne', True)
)
def default_node_permission_query(user):
permission_query = Q('is_public', 'eq', True)
if not user.is_anonymous():
permission_query = (permission_query | Q('contributors', 'eq', user._id))
return permission_query
def extend_querystring_params(url, params):
return furl.furl(url).add(args=params).url
def has_admin_scope(request):
""" Helper function to determine if a request should be treated
as though it has the `osf.admin` scope. This includes both
tokened requests that do, and requests that are made via the
OSF (i.e. have an osf cookie)
"""
cookie = request.COOKIES.get(website_settings.COOKIE_NAME)
if cookie:
return bool(get_session_from_cookie(cookie))
token = request.auth
if token is None or not isinstance(token, CasResponse):
return False
return set(ComposedScopes.ADMIN_LEVEL).issubset(normalize_scopes(token.attributes['accessTokenScope']))
| apache-2.0 | -3,444,579,962,994,643,500 | 33.974843 | 113 | 0.671462 | false |
cfjhallgren/shogun | examples/undocumented/python/preprocessor_prunevarsubmean.py | 5 | 1052 | #!/usr/bin/env python
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
parameter_list = [[traindat,testdat,1.5,10],[traindat,testdat,1.5,10]]
def preprocessor_prunevarsubmean (fm_train_real=traindat,fm_test_real=testdat,width=1.4,size_cache=10):
from shogun import Chi2Kernel
from shogun import RealFeatures
from shogun import PruneVarSubMean
feats_train=RealFeatures(fm_train_real)
feats_test=RealFeatures(fm_test_real)
preproc=PruneVarSubMean()
preproc.init(feats_train)
feats_train.add_preprocessor(preproc)
feats_train.apply_preprocessor()
feats_test.add_preprocessor(preproc)
feats_test.apply_preprocessor()
kernel=Chi2Kernel(feats_train, feats_train, width, size_cache)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('PruneVarSubMean')
preprocessor_prunevarsubmean(*parameter_list[0])
| gpl-3.0 | 2,999,298,772,026,677,000 | 29.057143 | 103 | 0.765209 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.