repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
mmottahedi/neuralnilm_prototype
|
scripts/e65.py
|
2
|
2721
|
from __future__ import print_function, division
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer
"""
Setup:
* in_to_cell init weights are now Normal(1.0)
* output all appliances
* fix bug in RealApplianceSource
* use cross-entropy
* smaller network
* power targets
* trying without first two sigmoid layers.
* updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0
which fixes LSTM bug.
https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0
* Subsampling *bidirectional* LSTM
* Output every sequence in the batch
* Change W_in_to_cell from Normal(1.0) to Uniform(5)
* put back the two sigmoid layers
* use Conv1D to create a hierarchical subsampling LSTM
* Using LSTM (not BLSTM) to speed up training while testing
* Use dimshuffle not reshape
* 2 dense layers back
* back to default init
* conv between LSTMs.
* More data
* BLSTM
* Try just using a 1D convnet on input
* add second Convnet layer (not sure this is correct thing to do?)
* third conv layer
* large inits
Changes
* back to 2 conv layers
Results
"""
source = RealApplianceSource(
'/data/dk3810/ukdale.h5',
['fridge freezer', 'hair straighteners', 'television'],
max_input_power=1000, max_appliance_powers=[300, 500, 200],
window=("2013-06-01", "2013-07-01"),
output_one_appliance=False,
boolean_targets=False,
min_on_duration=60,
input_padding=4
)
net = Net(
experiment_name="e65",
source=source,
learning_rate=1e-1,
save_plot_interval=50,
loss_function=crossentropy,
layers_config=[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 3,
'stride': 1,
'nonlinearity': rectify,
'W': Uniform(10)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 3,
'stride': 1,
'nonlinearity': rectify,
'W': Uniform(5)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': LSTMLayer,
'num_units': 40,
'W_in_to_cell': Uniform(5)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
net.print_net()
net.compile()
net.fit()
|
mit
|
aisipos/django
|
django/db/backends/mysql/features.py
|
28
|
2938
|
from django.db.backends.base.features import BaseDatabaseFeatures
from django.utils.functional import cached_property
from .base import Database
try:
import pytz
except ImportError:
pytz = None
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
update_can_self_select = False
allows_group_by_pk = True
related_fields_match_type = True
allow_sliced_subqueries = False
has_bulk_insert = True
has_select_for_update = True
has_select_for_update_nowait = False
supports_forward_references = False
supports_regex_backreferencing = False
supports_date_lookup_using_string = False
can_introspect_autofield = True
can_introspect_binary_field = False
can_introspect_small_integer_field = True
supports_timezones = False
requires_explicit_null_ordering_when_grouping = True
allows_auto_pk_0 = False
uses_savepoints = True
can_release_savepoints = True
atomic_transactions = False
supports_column_check_constraints = False
can_clone_databases = True
supports_temporal_subtraction = True
@cached_property
def _mysql_storage_engine(self):
"Internal method used in Django tests. Don't rely on this from your code"
with self.connection.cursor() as cursor:
cursor.execute("SELECT ENGINE FROM INFORMATION_SCHEMA.ENGINES WHERE SUPPORT = 'DEFAULT'")
result = cursor.fetchone()
return result[0]
@cached_property
def can_introspect_foreign_keys(self):
"Confirm support for introspected foreign keys"
return self._mysql_storage_engine != 'MyISAM'
@cached_property
def supports_microsecond_precision(self):
# See https://github.com/farcepest/MySQLdb1/issues/24 for the reason
# about requiring MySQLdb 1.2.5
return self.connection.mysql_version >= (5, 6, 4) and Database.version_info >= (1, 2, 5)
@cached_property
def has_zoneinfo_database(self):
# MySQL accepts full time zones names (eg. Africa/Nairobi) but rejects
# abbreviations (eg. EAT). When pytz isn't installed and the current
# time zone is LocalTimezone (the only sensible value in this
# context), the current time zone name will be an abbreviation. As a
# consequence, MySQL cannot perform time zone conversions reliably.
if pytz is None:
return False
# Test if the time zone definitions are installed.
with self.connection.cursor() as cursor:
cursor.execute("SELECT 1 FROM mysql.time_zone LIMIT 1")
return cursor.fetchone() is not None
def introspected_boolean_field_type(self, *args, **kwargs):
return 'IntegerField'
@cached_property
def is_sql_auto_is_null_enabled(self):
with self.connection.cursor() as cursor:
cursor.execute('SELECT @@SQL_AUTO_IS_NULL')
return cursor.fetchone()[0] == 1
|
bsd-3-clause
|
hbohuang/kubernetes
|
cluster/juju/charms/trusty/kubernetes/hooks/kubernetes_installer.py
|
148
|
2518
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from path import Path
class KubernetesInstaller():
"""
This class contains the logic needed to install kuberentes binary files.
"""
def __init__(self, arch, version, master, output_dir):
""" Gather the required variables for the install. """
# The kubernetes charm needs certain commands to be aliased.
self.aliases = {'kube-proxy': 'proxy',
'kubelet': 'kubelet'}
self.arch = arch
self.version = version
self.master = master
self.output_dir = output_dir
def download(self):
""" Download the kuberentes binaries from the kubernetes master. """
url = 'http://{0}/kubernetes/{1}/local/bin/linux/{2}'.format(
self.master, self.version, self.arch)
if not self.output_dir.isdir():
self.output_dir.makedirs_p()
for key in self.aliases:
uri = '{0}/{1}'.format(url, key)
destination = self.output_dir / key
wget = 'wget -nv {0} -O {1}'.format(uri, destination)
print(wget)
output = subprocess.check_output(wget.split())
print(output)
destination.chmod(0o755)
def install(self, install_dir=Path('/usr/local/bin')):
""" Create links to the binary files to the install directory. """
if not install_dir.isdir():
install_dir.makedirs_p()
# Create the symbolic links to the real kubernetes binaries.
for key, value in self.aliases.iteritems():
target = self.output_dir / key
if target.exists():
link = install_dir / value
if link.exists():
link.remove()
target.symlink(link)
else:
print('Error target file {0} does not exist.'.format(target))
exit(1)
|
apache-2.0
|
simongoffin/my_odoo_tutorial
|
addons/resource/faces/plocale.py
|
433
|
1910
|
############################################################################
# Copyright (C) 2005 by Reithinger GmbH
# [email protected]
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
import gettext
import os.path
import locale
import sys
def _get_translation():
try:
return gettext.translation("faces")
except:
try:
if sys.frozen:
path = os.path.dirname(sys.argv[0])
path = os.path.join(path, "resources", "faces", "locale")
else:
path = os.path.split(__file__)[0]
path = os.path.join(path, "locale")
return gettext.translation("faces", path)
except Exception, e:
return None
def get_gettext():
trans = _get_translation()
if trans: return trans.ugettext
return lambda msg: msg
def get_encoding():
trans = _get_translation()
if trans: return trans.charset()
return locale.getpreferredencoding()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
ganeshgore/myremolab
|
server/launch/sample_balanced2_concurrent_experiments/main_machine/lab_and_experiment1/experiment76/server_config.py
|
242
|
1525
|
#!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
weblab_xilinx_experiment_xilinx_device = 'FPGA'
weblab_xilinx_experiment_port_number = 1
# This should be something like this:
# import os as _os
# xilinx_home = _os.getenv('XILINX_HOME')
# if xilinx_home == None:
# if _os.name == 'nt':
# xilinx_home = r'C:\Program Files\Xilinx'
# elif _os.name == 'posix':
# xilinx_home = r"/home/nctrun/Xilinx"
#
# if _os.name == 'nt':
# xilinx_impact_full_path = [xilinx_home + r'\bin\nt\impact']
# elif _os.name == 'posix':
# xilinx_impact_full_path = [xilinx_home + r'/bin/lin/impact']
# But for testing we are going to fake it:
xilinx_home = "."
xilinx_impact_full_path = ["python","./tests/unit/weblab/experiment/devices/xilinx_impact/fake_impact.py" ]
xilinx_device_to_program = 'XilinxImpact' # 'JTagBlazer', 'DigilentAdept'
xilinx_device_to_send_commands = 'SerialPort' # 'HttpDevice'
digilent_adept_full_path = ["python","./test/unit/weblab/experiment/devices/digilent_adept/fake_digilent_adept.py" ]
digilent_adept_batch_content = """something with the variable $FILE"""
xilinx_http_device_ip_FPGA = "192.168.50.138"
xilinx_http_device_port_FPGA = 80
xilinx_http_device_app_FPGA = ""
xilinx_batch_content_FPGA = """setMode -bs
setCable -port auto
addDevice -position 1 -file $FILE
Program -p 1
exit
"""
# Though it is not really a FPGA, the webcam url var name depends on the device,
# specified above.
fpga_webcam_url = '''https://www.weblab.deusto.es/webcam/fpga0/image.jpg'''
|
bsd-2-clause
|
nicoTrombon/DjangoPolls
|
env/Lib/site-packages/django/db/models/sql/aggregates.py
|
174
|
4843
|
"""
Classes to represent the default SQL aggregate functions
"""
import copy
import warnings
from django.db.models.fields import FloatField, IntegerField
from django.db.models.lookups import RegisterLookupMixin
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.functional import cached_property
__all__ = ['Aggregate', 'Avg', 'Count', 'Max', 'Min', 'StdDev', 'Sum', 'Variance']
warnings.warn(
"django.db.models.sql.aggregates is deprecated. Use "
"django.db.models.aggregates instead.",
RemovedInDjango110Warning, stacklevel=2)
class Aggregate(RegisterLookupMixin):
"""
Default SQL Aggregate.
"""
is_ordinal = False
is_computed = False
sql_template = '%(function)s(%(field)s)'
def __init__(self, col, source=None, is_summary=False, **extra):
"""Instantiate an SQL aggregate
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* sql_function, the name of the SQL function that implements the
aggregate.
* sql_template, a template string that is used to render the
aggregate into SQL.
* is_ordinal, a boolean indicating if the output of this aggregate
is an integer (e.g., a count)
* is_computed, a boolean indicating if this output of this aggregate
is a computed float (e.g., an average), regardless of the input
type.
"""
self.col = col
self.source = source
self.is_summary = is_summary
self.extra = extra
# Follow the chain of aggregate sources back until you find an
# actual field, or an aggregate that forces a particular output
# type. This type of this field will be used to coerce values
# retrieved from the database.
tmp = self
while tmp and isinstance(tmp, Aggregate):
if getattr(tmp, 'is_ordinal', False):
tmp = self._ordinal_aggregate_field
elif getattr(tmp, 'is_computed', False):
tmp = self._computed_aggregate_field
else:
tmp = tmp.source
self.field = tmp
# Two fake fields used to identify aggregate types in data-conversion operations.
@cached_property
def _ordinal_aggregate_field(self):
return IntegerField()
@cached_property
def _computed_aggregate_field(self):
return FloatField()
def relabeled_clone(self, change_map):
clone = copy.copy(self)
if isinstance(self.col, (list, tuple)):
clone.col = (change_map.get(self.col[0], self.col[0]), self.col[1])
return clone
def as_sql(self, compiler, connection):
"Return the aggregate, rendered as SQL with parameters."
params = []
if hasattr(self.col, 'as_sql'):
field_name, params = self.col.as_sql(compiler, connection)
elif isinstance(self.col, (list, tuple)):
field_name = '.'.join(compiler(c) for c in self.col)
else:
field_name = compiler(self.col)
substitutions = {
'function': self.sql_function,
'field': field_name
}
substitutions.update(self.extra)
return self.sql_template % substitutions, params
def get_group_by_cols(self):
return []
@property
def output_field(self):
return self.field
class Avg(Aggregate):
is_computed = True
sql_function = 'AVG'
class Count(Aggregate):
is_ordinal = True
sql_function = 'COUNT'
sql_template = '%(function)s(%(distinct)s%(field)s)'
def __init__(self, col, distinct=False, **extra):
super(Count, self).__init__(col, distinct='DISTINCT ' if distinct else '', **extra)
class Max(Aggregate):
sql_function = 'MAX'
class Min(Aggregate):
sql_function = 'MIN'
class StdDev(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(StdDev, self).__init__(col, **extra)
self.sql_function = 'STDDEV_SAMP' if sample else 'STDDEV_POP'
class Sum(Aggregate):
sql_function = 'SUM'
class Variance(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(Variance, self).__init__(col, **extra)
self.sql_function = 'VAR_SAMP' if sample else 'VAR_POP'
|
bsd-3-clause
|
nkcr/WebIndex
|
app/venv/lib/python3.5/site-packages/pip/_vendor/colorama/ansi.py
|
640
|
2524
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
'''
This module generates ANSI character codes to printing colors to terminals.
See: http://en.wikipedia.org/wiki/ANSI_escape_code
'''
CSI = '\033['
OSC = '\033]'
BEL = '\007'
def code_to_chars(code):
return CSI + str(code) + 'm'
def set_title(title):
return OSC + '2;' + title + BEL
def clear_screen(mode=2):
return CSI + str(mode) + 'J'
def clear_line(mode=2):
return CSI + str(mode) + 'K'
class AnsiCodes(object):
def __init__(self):
# the subclasses declare class attributes which are numbers.
# Upon instantiation we define instance attributes, which are the same
# as the class attributes but wrapped with the ANSI escape sequence
for name in dir(self):
if not name.startswith('_'):
value = getattr(self, name)
setattr(self, name, code_to_chars(value))
class AnsiCursor(object):
def UP(self, n=1):
return CSI + str(n) + 'A'
def DOWN(self, n=1):
return CSI + str(n) + 'B'
def FORWARD(self, n=1):
return CSI + str(n) + 'C'
def BACK(self, n=1):
return CSI + str(n) + 'D'
def POS(self, x=1, y=1):
return CSI + str(y) + ';' + str(x) + 'H'
class AnsiFore(AnsiCodes):
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 90
LIGHTRED_EX = 91
LIGHTGREEN_EX = 92
LIGHTYELLOW_EX = 93
LIGHTBLUE_EX = 94
LIGHTMAGENTA_EX = 95
LIGHTCYAN_EX = 96
LIGHTWHITE_EX = 97
class AnsiBack(AnsiCodes):
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 100
LIGHTRED_EX = 101
LIGHTGREEN_EX = 102
LIGHTYELLOW_EX = 103
LIGHTBLUE_EX = 104
LIGHTMAGENTA_EX = 105
LIGHTCYAN_EX = 106
LIGHTWHITE_EX = 107
class AnsiStyle(AnsiCodes):
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
Fore = AnsiFore()
Back = AnsiBack()
Style = AnsiStyle()
Cursor = AnsiCursor()
|
mit
|
alexlo03/ansible
|
lib/ansible/module_utils/oneview.py
|
23
|
18876
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import (absolute_import, division, print_function)
import abc
import collections
import json
import os
import traceback
try:
from hpOneView.oneview_client import OneViewClient
HAS_HPE_ONEVIEW = True
except ImportError:
HAS_HPE_ONEVIEW = False
from ansible.module_utils import six
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.common._collections_compat import Mapping
def transform_list_to_dict(list_):
"""
Transforms a list into a dictionary, putting values as keys.
:arg list list_: List of values
:return: dict: dictionary built
"""
ret = {}
if not list_:
return ret
for value in list_:
if isinstance(value, Mapping):
ret.update(value)
else:
ret[to_native(value, errors='surrogate_or_strict')] = True
return ret
def merge_list_by_key(original_list, updated_list, key, ignore_when_null=None):
"""
Merge two lists by the key. It basically:
1. Adds the items that are present on updated_list and are absent on original_list.
2. Removes items that are absent on updated_list and are present on original_list.
3. For all items that are in both lists, overwrites the values from the original item by the updated item.
:arg list original_list: original list.
:arg list updated_list: list with changes.
:arg str key: unique identifier.
:arg list ignore_when_null: list with the keys from the updated items that should be ignored in the merge,
if its values are null.
:return: list: Lists merged.
"""
ignore_when_null = [] if ignore_when_null is None else ignore_when_null
if not original_list:
return updated_list
items_map = collections.OrderedDict([(i[key], i.copy()) for i in original_list])
merged_items = collections.OrderedDict()
for item in updated_list:
item_key = item[key]
if item_key in items_map:
for ignored_key in ignore_when_null:
if ignored_key in item and item[ignored_key] is None:
item.pop(ignored_key)
merged_items[item_key] = items_map[item_key]
merged_items[item_key].update(item)
else:
merged_items[item_key] = item
return list(merged_items.values())
def _str_sorted(obj):
if isinstance(obj, Mapping):
return json.dumps(obj, sort_keys=True)
else:
return str(obj)
def _standardize_value(value):
"""
Convert value to string to enhance the comparison.
:arg value: Any object type.
:return: str: Converted value.
"""
if isinstance(value, float) and value.is_integer():
# Workaround to avoid erroneous comparison between int and float
# Removes zero from integer floats
value = int(value)
return str(value)
class OneViewModuleException(Exception):
"""
OneView base Exception.
Attributes:
msg (str): Exception message.
oneview_response (dict): OneView rest response.
"""
def __init__(self, data):
self.msg = None
self.oneview_response = None
if isinstance(data, six.string_types):
self.msg = data
else:
self.oneview_response = data
if data and isinstance(data, dict):
self.msg = data.get('message')
if self.oneview_response:
Exception.__init__(self, self.msg, self.oneview_response)
else:
Exception.__init__(self, self.msg)
class OneViewModuleTaskError(OneViewModuleException):
"""
OneView Task Error Exception.
Attributes:
msg (str): Exception message.
error_code (str): A code which uniquely identifies the specific error.
"""
def __init__(self, msg, error_code=None):
super(OneViewModuleTaskError, self).__init__(msg)
self.error_code = error_code
class OneViewModuleValueError(OneViewModuleException):
"""
OneView Value Error.
The exception is raised when the data contains an inappropriate value.
Attributes:
msg (str): Exception message.
"""
pass
class OneViewModuleResourceNotFound(OneViewModuleException):
"""
OneView Resource Not Found Exception.
The exception is raised when an associated resource was not found.
Attributes:
msg (str): Exception message.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class OneViewModuleBase(object):
MSG_CREATED = 'Resource created successfully.'
MSG_UPDATED = 'Resource updated successfully.'
MSG_DELETED = 'Resource deleted successfully.'
MSG_ALREADY_PRESENT = 'Resource is already present.'
MSG_ALREADY_ABSENT = 'Resource is already absent.'
MSG_DIFF_AT_KEY = 'Difference found at key \'{0}\'. '
HPE_ONEVIEW_SDK_REQUIRED = 'HPE OneView Python SDK is required for this module.'
ONEVIEW_COMMON_ARGS = dict(
config=dict(type='path'),
hostname=dict(type='str'),
username=dict(type='str'),
password=dict(type='str', no_log=True),
api_version=dict(type='int'),
image_streamer_hostname=dict(type='str')
)
ONEVIEW_VALIDATE_ETAG_ARGS = dict(validate_etag=dict(type='bool', default=True))
resource_client = None
def __init__(self, additional_arg_spec=None, validate_etag_support=False):
"""
OneViewModuleBase constructor.
:arg dict additional_arg_spec: Additional argument spec definition.
:arg bool validate_etag_support: Enables support to eTag validation.
"""
argument_spec = self._build_argument_spec(additional_arg_spec, validate_etag_support)
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
self._check_hpe_oneview_sdk()
self._create_oneview_client()
self.state = self.module.params.get('state')
self.data = self.module.params.get('data')
# Preload params for get_all - used by facts
self.facts_params = self.module.params.get('params') or {}
# Preload options as dict - used by facts
self.options = transform_list_to_dict(self.module.params.get('options'))
self.validate_etag_support = validate_etag_support
def _build_argument_spec(self, additional_arg_spec, validate_etag_support):
merged_arg_spec = dict()
merged_arg_spec.update(self.ONEVIEW_COMMON_ARGS)
if validate_etag_support:
merged_arg_spec.update(self.ONEVIEW_VALIDATE_ETAG_ARGS)
if additional_arg_spec:
merged_arg_spec.update(additional_arg_spec)
return merged_arg_spec
def _check_hpe_oneview_sdk(self):
if not HAS_HPE_ONEVIEW:
self.module.fail_json(msg=self.HPE_ONEVIEW_SDK_REQUIRED)
def _create_oneview_client(self):
if self.module.params.get('hostname'):
config = dict(ip=self.module.params['hostname'],
credentials=dict(userName=self.module.params['username'], password=self.module.params['password']),
api_version=self.module.params['api_version'],
image_streamer_ip=self.module.params['image_streamer_hostname'])
self.oneview_client = OneViewClient(config)
elif not self.module.params['config']:
self.oneview_client = OneViewClient.from_environment_variables()
else:
self.oneview_client = OneViewClient.from_json_file(self.module.params['config'])
@abc.abstractmethod
def execute_module(self):
"""
Abstract method, must be implemented by the inheritor.
This method is called from the run method. It should contains the module logic
:return: dict: It must return a dictionary with the attributes for the module result,
such as ansible_facts, msg and changed.
"""
pass
def run(self):
"""
Common implementation of the OneView run modules.
It calls the inheritor 'execute_module' function and sends the return to the Ansible.
It handles any OneViewModuleException in order to signal a failure to Ansible, with a descriptive error message.
"""
try:
if self.validate_etag_support:
if not self.module.params.get('validate_etag'):
self.oneview_client.connection.disable_etag_validation()
result = self.execute_module()
if "changed" not in result:
result['changed'] = False
self.module.exit_json(**result)
except OneViewModuleException as exception:
error_msg = '; '.join(to_native(e) for e in exception.args)
self.module.fail_json(msg=error_msg, exception=traceback.format_exc())
def resource_absent(self, resource, method='delete'):
"""
Generic implementation of the absent state for the OneView resources.
It checks if the resource needs to be removed.
:arg dict resource: Resource to delete.
:arg str method: Function of the OneView client that will be called for resource deletion.
Usually delete or remove.
:return: A dictionary with the expected arguments for the AnsibleModule.exit_json
"""
if resource:
getattr(self.resource_client, method)(resource)
return {"changed": True, "msg": self.MSG_DELETED}
else:
return {"changed": False, "msg": self.MSG_ALREADY_ABSENT}
def get_by_name(self, name):
"""
Generic get by name implementation.
:arg str name: Resource name to search for.
:return: The resource found or None.
"""
result = self.resource_client.get_by('name', name)
return result[0] if result else None
def resource_present(self, resource, fact_name, create_method='create'):
"""
Generic implementation of the present state for the OneView resources.
It checks if the resource needs to be created or updated.
:arg dict resource: Resource to create or update.
:arg str fact_name: Name of the fact returned to the Ansible.
:arg str create_method: Function of the OneView client that will be called for resource creation.
Usually create or add.
:return: A dictionary with the expected arguments for the AnsibleModule.exit_json
"""
changed = False
if "newName" in self.data:
self.data["name"] = self.data.pop("newName")
if not resource:
resource = getattr(self.resource_client, create_method)(self.data)
msg = self.MSG_CREATED
changed = True
else:
merged_data = resource.copy()
merged_data.update(self.data)
if self.compare(resource, merged_data):
msg = self.MSG_ALREADY_PRESENT
else:
resource = self.resource_client.update(merged_data)
changed = True
msg = self.MSG_UPDATED
return dict(
msg=msg,
changed=changed,
ansible_facts={fact_name: resource}
)
def resource_scopes_set(self, state, fact_name, scope_uris):
"""
Generic implementation of the scopes update PATCH for the OneView resources.
It checks if the resource needs to be updated with the current scopes.
This method is meant to be run after ensuring the present state.
:arg dict state: Dict containing the data from the last state results in the resource.
It needs to have the 'msg', 'changed', and 'ansible_facts' entries.
:arg str fact_name: Name of the fact returned to the Ansible.
:arg list scope_uris: List with all the scope URIs to be added to the resource.
:return: A dictionary with the expected arguments for the AnsibleModule.exit_json
"""
if scope_uris is None:
scope_uris = []
resource = state['ansible_facts'][fact_name]
operation_data = dict(operation='replace', path='/scopeUris', value=scope_uris)
if resource['scopeUris'] is None or set(resource['scopeUris']) != set(scope_uris):
state['ansible_facts'][fact_name] = self.resource_client.patch(resource['uri'], **operation_data)
state['changed'] = True
state['msg'] = self.MSG_UPDATED
return state
def compare(self, first_resource, second_resource):
"""
Recursively compares dictionary contents equivalence, ignoring types and elements order.
Particularities of the comparison:
- Inexistent key = None
- These values are considered equal: None, empty, False
- Lists are compared value by value after a sort, if they have same size.
- Each element is converted to str before the comparison.
:arg dict first_resource: first dictionary
:arg dict second_resource: second dictionary
:return: bool: True when equal, False when different.
"""
resource1 = first_resource
resource2 = second_resource
debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2)
# The first resource is True / Not Null and the second resource is False / Null
if resource1 and not resource2:
self.module.log("resource1 and not resource2. " + debug_resources)
return False
# Checks all keys in first dict against the second dict
for key in resource1:
if key not in resource2:
if resource1[key] is not None:
# Inexistent key is equivalent to exist with value None
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
return False
# If both values are null, empty or False it will be considered equal.
elif not resource1[key] and not resource2[key]:
continue
elif isinstance(resource1[key], Mapping):
# recursive call
if not self.compare(resource1[key], resource2[key]):
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
return False
elif isinstance(resource1[key], list):
# change comparison function to compare_list
if not self.compare_list(resource1[key], resource2[key]):
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
return False
elif _standardize_value(resource1[key]) != _standardize_value(resource2[key]):
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
return False
# Checks all keys in the second dict, looking for missing elements
for key in resource2.keys():
if key not in resource1:
if resource2[key] is not None:
# Inexistent key is equivalent to exist with value None
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
return False
return True
def compare_list(self, first_resource, second_resource):
"""
Recursively compares lists contents equivalence, ignoring types and element orders.
Lists with same size are compared value by value after a sort,
each element is converted to str before the comparison.
:arg list first_resource: first list
:arg list second_resource: second list
:return: True when equal; False when different.
"""
resource1 = first_resource
resource2 = second_resource
debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2)
# The second list is null / empty / False
if not resource2:
self.module.log("resource 2 is null. " + debug_resources)
return False
if len(resource1) != len(resource2):
self.module.log("resources have different length. " + debug_resources)
return False
resource1 = sorted(resource1, key=_str_sorted)
resource2 = sorted(resource2, key=_str_sorted)
for i, val in enumerate(resource1):
if isinstance(val, Mapping):
# change comparison function to compare dictionaries
if not self.compare(val, resource2[i]):
self.module.log("resources are different. " + debug_resources)
return False
elif isinstance(val, list):
# recursive call
if not self.compare_list(val, resource2[i]):
self.module.log("lists are different. " + debug_resources)
return False
elif _standardize_value(val) != _standardize_value(resource2[i]):
self.module.log("values are different. " + debug_resources)
return False
# no differences found
return True
|
gpl-3.0
|
mcalmer/spacewalk
|
client/tools/rhncfg/config_management/rhncfg_diff_revisions.py
|
10
|
2765
|
#
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import sys
from config_common import handler_base, cfg_exceptions
from config_common.rhn_log import log_debug, die
class Handler(handler_base.HandlerBase):
_usage_options = "[options] file"
_options_table = [
handler_base.HandlerBase._option_class(
'-c', '--channel', action="append",
help="Use this config channel",
),
handler_base.HandlerBase._option_class(
'-r', '--revision', action="append",
help="Use this revision",
),
]
def run(self):
log_debug(2)
r = self.repository
if len(self.args) != 1:
die(3, "One file needs to be specified")
path = self.args[0]
channel_dst = None
ns_count = len(self.options.channel or [])
if ns_count == 0:
die(3, "At least one config channel has to be specified")
channel_src = self.options.channel[0]
if ns_count > 2:
die(3, "At most two config channels can be specified")
if not r.config_channel_exists(channel_src):
die(4, "Source config channel %s does not exist" % channel_src)
if ns_count == 2:
channel_dst = self.options.channel[1]
if not r.config_channel_exists(channel_dst):
die(4, "Config channel %s does not exist" % channel_dst)
revision_dst = None
rev_count = len(self.options.revision or [])
if rev_count == 0:
die(3, "At least one revision has to be specified")
revision_src = self.options.revision[0]
if rev_count > 2:
die(3, "At most two revisions can be specified")
if rev_count == 2:
revision_dst = self.options.revision[1]
try:
result = r.diff_file_revisions(path, channel_src,
revision_src, channel_dst, revision_dst)
except cfg_exceptions.RepositoryFileMissingError:
e = sys.exc_info()[1]
die(2, e[0])
except cfg_exceptions.BinaryFileDiffError:
e = sys.exc_info()[1]
die(3, e[0])
sys.stdout.write(result)
|
gpl-2.0
|
sebrandon1/neutron
|
neutron/conf/wsgi.py
|
9
|
1262
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_service import wsgi
from neutron._i18n import _
socket_opts = [
cfg.IntOpt('backlog',
default=4096,
help=_("Number of backlog requests to configure "
"the socket with")),
cfg.IntOpt('retry_until_window',
default=30,
help=_("Number of seconds to keep retrying to listen")),
cfg.BoolOpt('use_ssl',
default=False,
help=_('Enable SSL on the API server')),
]
def register_socket_opts(cfg=cfg.CONF):
cfg.register_opts(socket_opts)
wsgi.register_opts(cfg)
|
apache-2.0
|
zarboz/xbmc
|
lib/libUPnP/Neptune/Build/Tools/SCons/gcc-generic.py
|
21
|
1153
|
import os
def generate(env, gcc_cross_prefix=None, gcc_strict=True, gcc_stop_on_warning=None):
if gcc_stop_on_warning == None: gcc_stop_on_warning = env['stop_on_warning']
### compiler flags
if gcc_strict:
env.AppendUnique(CCFLAGS = ['-pedantic', '-Wall', '-W', '-Wundef', '-Wno-long-long'])
env.AppendUnique(CFLAGS = ['-Wmissing-prototypes', '-Wmissing-declarations'])
else:
env.AppendUnique(CCFLAGS = ['-Wall'])
compiler_defines = ['-D_REENTRANT']
env.AppendUnique(CCFLAGS = compiler_defines)
env.AppendUnique(CPPFLAGS = compiler_defines)
if env['build_config'] == 'Debug':
env.AppendUnique(CCFLAGS = '-g')
else:
env.AppendUnique(CCFLAGS = '-O3')
if gcc_stop_on_warning:
env.AppendUnique(CCFLAGS = ['-Werror'])
if gcc_cross_prefix:
env['ENV']['PATH'] += os.environ['PATH']
env['AR'] = gcc_cross_prefix+'-ar'
env['RANLIB'] = gcc_cross_prefix+'-ranlib'
env['CC'] = gcc_cross_prefix+'-gcc'
env['CXX'] = gcc_cross_prefix+'-g++'
env['LINK'] = gcc_cross_prefix+'-g++'
|
gpl-2.0
|
acourtney2015/boto
|
boto/iam/connection.py
|
75
|
62193
|
# Copyright (c) 2010-2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010-2011, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
import boto.jsonresponse
from boto.compat import json, six
from boto.resultset import ResultSet
from boto.iam.summarymap import SummaryMap
from boto.connection import AWSQueryConnection
DEFAULT_POLICY_DOCUMENTS = {
'default': {
'Statement': [
{
'Principal': {
'Service': ['ec2.amazonaws.com']
},
'Effect': 'Allow',
'Action': ['sts:AssumeRole']
}
]
},
'amazonaws.com.cn': {
'Statement': [
{
'Principal': {
'Service': ['ec2.amazonaws.com.cn']
},
'Effect': 'Allow',
'Action': ['sts:AssumeRole']
}
]
},
}
# For backward-compatibility, we'll preserve this here.
ASSUME_ROLE_POLICY_DOCUMENT = json.dumps(DEFAULT_POLICY_DOCUMENTS['default'])
class IAMConnection(AWSQueryConnection):
APIVersion = '2010-05-08'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, host='iam.amazonaws.com',
debug=0, https_connection_factory=None, path='/',
security_token=None, validate_certs=True, profile_name=None):
super(IAMConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy,
proxy_port, proxy_user, proxy_pass,
host, debug, https_connection_factory,
path, security_token,
validate_certs=validate_certs,
profile_name=profile_name)
def _required_auth_capability(self):
return ['hmac-v4']
def get_response(self, action, params, path='/', parent=None,
verb='POST', list_marker='Set'):
"""
Utility method to handle calls to IAM and parsing of responses.
"""
if not parent:
parent = self
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if response.status == 200:
if body:
e = boto.jsonresponse.Element(list_marker=list_marker,
pythonize_name=True)
h = boto.jsonresponse.XmlHandler(e, parent)
h.parse(body)
return e
else:
# Support empty responses, e.g. deleting a SAML provider
# according to the official documentation.
return {}
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
#
# Group methods
#
def get_all_groups(self, path_prefix='/', marker=None, max_items=None):
"""
List the groups that have the specified path prefix.
:type path_prefix: string
:param path_prefix: If provided, only groups whose paths match
the provided prefix will be returned.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {}
if path_prefix:
params['PathPrefix'] = path_prefix
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListGroups', params,
list_marker='Groups')
def get_group(self, group_name, marker=None, max_items=None):
"""
Return a list of users that are in the specified group.
:type group_name: string
:param group_name: The name of the group whose information should
be returned.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'GroupName': group_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('GetGroup', params, list_marker='Users')
def create_group(self, group_name, path='/'):
"""
Create a group.
:type group_name: string
:param group_name: The name of the new group
:type path: string
:param path: The path to the group (Optional). Defaults to /.
"""
params = {'GroupName': group_name,
'Path': path}
return self.get_response('CreateGroup', params)
def delete_group(self, group_name):
"""
Delete a group. The group must not contain any Users or
have any attached policies
:type group_name: string
:param group_name: The name of the group to delete.
"""
params = {'GroupName': group_name}
return self.get_response('DeleteGroup', params)
def update_group(self, group_name, new_group_name=None, new_path=None):
"""
Updates name and/or path of the specified group.
:type group_name: string
:param group_name: The name of the new group
:type new_group_name: string
:param new_group_name: If provided, the name of the group will be
changed to this name.
:type new_path: string
:param new_path: If provided, the path of the group will be
changed to this path.
"""
params = {'GroupName': group_name}
if new_group_name:
params['NewGroupName'] = new_group_name
if new_path:
params['NewPath'] = new_path
return self.get_response('UpdateGroup', params)
def add_user_to_group(self, group_name, user_name):
"""
Add a user to a group
:type group_name: string
:param group_name: The name of the group
:type user_name: string
:param user_name: The to be added to the group.
"""
params = {'GroupName': group_name,
'UserName': user_name}
return self.get_response('AddUserToGroup', params)
def remove_user_from_group(self, group_name, user_name):
"""
Remove a user from a group.
:type group_name: string
:param group_name: The name of the group
:type user_name: string
:param user_name: The user to remove from the group.
"""
params = {'GroupName': group_name,
'UserName': user_name}
return self.get_response('RemoveUserFromGroup', params)
def put_group_policy(self, group_name, policy_name, policy_json):
"""
Adds or updates the specified policy document for the specified group.
:type group_name: string
:param group_name: The name of the group the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to get.
:type policy_json: string
:param policy_json: The policy document.
"""
params = {'GroupName': group_name,
'PolicyName': policy_name,
'PolicyDocument': policy_json}
return self.get_response('PutGroupPolicy', params, verb='POST')
def get_all_group_policies(self, group_name, marker=None, max_items=None):
"""
List the names of the policies associated with the specified group.
:type group_name: string
:param group_name: The name of the group the policy is associated with.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'GroupName': group_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListGroupPolicies', params,
list_marker='PolicyNames')
def get_group_policy(self, group_name, policy_name):
"""
Retrieves the specified policy document for the specified group.
:type group_name: string
:param group_name: The name of the group the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to get.
"""
params = {'GroupName': group_name,
'PolicyName': policy_name}
return self.get_response('GetGroupPolicy', params, verb='POST')
def delete_group_policy(self, group_name, policy_name):
"""
Deletes the specified policy document for the specified group.
:type group_name: string
:param group_name: The name of the group the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to delete.
"""
params = {'GroupName': group_name,
'PolicyName': policy_name}
return self.get_response('DeleteGroupPolicy', params, verb='POST')
def get_all_users(self, path_prefix='/', marker=None, max_items=None):
"""
List the users that have the specified path prefix.
:type path_prefix: string
:param path_prefix: If provided, only users whose paths match
the provided prefix will be returned.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'PathPrefix': path_prefix}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListUsers', params, list_marker='Users')
#
# User methods
#
def create_user(self, user_name, path='/'):
"""
Create a user.
:type user_name: string
:param user_name: The name of the new user
:type path: string
:param path: The path in which the user will be created.
Defaults to /.
"""
params = {'UserName': user_name,
'Path': path}
return self.get_response('CreateUser', params)
def delete_user(self, user_name):
"""
Delete a user including the user's path, GUID and ARN.
If the user_name is not specified, the user_name is determined
implicitly based on the AWS Access Key ID used to sign the request.
:type user_name: string
:param user_name: The name of the user to delete.
"""
params = {'UserName': user_name}
return self.get_response('DeleteUser', params)
def get_user(self, user_name=None):
"""
Retrieve information about the specified user.
If the user_name is not specified, the user_name is determined
implicitly based on the AWS Access Key ID used to sign the request.
:type user_name: string
:param user_name: The name of the user to retrieve.
If not specified, defaults to user making request.
"""
params = {}
if user_name:
params['UserName'] = user_name
return self.get_response('GetUser', params)
def update_user(self, user_name, new_user_name=None, new_path=None):
"""
Updates name and/or path of the specified user.
:type user_name: string
:param user_name: The name of the user
:type new_user_name: string
:param new_user_name: If provided, the username of the user will be
changed to this username.
:type new_path: string
:param new_path: If provided, the path of the user will be
changed to this path.
"""
params = {'UserName': user_name}
if new_user_name:
params['NewUserName'] = new_user_name
if new_path:
params['NewPath'] = new_path
return self.get_response('UpdateUser', params)
def get_all_user_policies(self, user_name, marker=None, max_items=None):
"""
List the names of the policies associated with the specified user.
:type user_name: string
:param user_name: The name of the user the policy is associated with.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'UserName': user_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListUserPolicies', params,
list_marker='PolicyNames')
def put_user_policy(self, user_name, policy_name, policy_json):
"""
Adds or updates the specified policy document for the specified user.
:type user_name: string
:param user_name: The name of the user the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to get.
:type policy_json: string
:param policy_json: The policy document.
"""
params = {'UserName': user_name,
'PolicyName': policy_name,
'PolicyDocument': policy_json}
return self.get_response('PutUserPolicy', params, verb='POST')
def get_user_policy(self, user_name, policy_name):
"""
Retrieves the specified policy document for the specified user.
:type user_name: string
:param user_name: The name of the user the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to get.
"""
params = {'UserName': user_name,
'PolicyName': policy_name}
return self.get_response('GetUserPolicy', params, verb='POST')
def delete_user_policy(self, user_name, policy_name):
"""
Deletes the specified policy document for the specified user.
:type user_name: string
:param user_name: The name of the user the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to delete.
"""
params = {'UserName': user_name,
'PolicyName': policy_name}
return self.get_response('DeleteUserPolicy', params, verb='POST')
def get_groups_for_user(self, user_name, marker=None, max_items=None):
"""
List the groups that a specified user belongs to.
:type user_name: string
:param user_name: The name of the user to list groups for.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'UserName': user_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListGroupsForUser', params,
list_marker='Groups')
#
# Access Keys
#
def get_all_access_keys(self, user_name, marker=None, max_items=None):
"""
Get all access keys associated with an account.
:type user_name: string
:param user_name: The username of the user
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'UserName': user_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListAccessKeys', params,
list_marker='AccessKeyMetadata')
def create_access_key(self, user_name=None):
"""
Create a new AWS Secret Access Key and corresponding AWS Access Key ID
for the specified user. The default status for new keys is Active
If the user_name is not specified, the user_name is determined
implicitly based on the AWS Access Key ID used to sign the request.
:type user_name: string
:param user_name: The username of the user
"""
params = {'UserName': user_name}
return self.get_response('CreateAccessKey', params)
def update_access_key(self, access_key_id, status, user_name=None):
"""
Changes the status of the specified access key from Active to Inactive
or vice versa. This action can be used to disable a user's key as
part of a key rotation workflow.
If the user_name is not specified, the user_name is determined
implicitly based on the AWS Access Key ID used to sign the request.
:type access_key_id: string
:param access_key_id: The ID of the access key.
:type status: string
:param status: Either Active or Inactive.
:type user_name: string
:param user_name: The username of user (optional).
"""
params = {'AccessKeyId': access_key_id,
'Status': status}
if user_name:
params['UserName'] = user_name
return self.get_response('UpdateAccessKey', params)
def delete_access_key(self, access_key_id, user_name=None):
"""
Delete an access key associated with a user.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type access_key_id: string
:param access_key_id: The ID of the access key to be deleted.
:type user_name: string
:param user_name: The username of the user
"""
params = {'AccessKeyId': access_key_id}
if user_name:
params['UserName'] = user_name
return self.get_response('DeleteAccessKey', params)
#
# Signing Certificates
#
def get_all_signing_certs(self, marker=None, max_items=None,
user_name=None):
"""
Get all signing certificates associated with an account.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
:type user_name: string
:param user_name: The username of the user
"""
params = {}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
if user_name:
params['UserName'] = user_name
return self.get_response('ListSigningCertificates',
params, list_marker='Certificates')
def update_signing_cert(self, cert_id, status, user_name=None):
"""
Change the status of the specified signing certificate from
Active to Inactive or vice versa.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type cert_id: string
:param cert_id: The ID of the signing certificate
:type status: string
:param status: Either Active or Inactive.
:type user_name: string
:param user_name: The username of the user
"""
params = {'CertificateId': cert_id,
'Status': status}
if user_name:
params['UserName'] = user_name
return self.get_response('UpdateSigningCertificate', params)
def upload_signing_cert(self, cert_body, user_name=None):
"""
Uploads an X.509 signing certificate and associates it with
the specified user.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type cert_body: string
:param cert_body: The body of the signing certificate.
:type user_name: string
:param user_name: The username of the user
"""
params = {'CertificateBody': cert_body}
if user_name:
params['UserName'] = user_name
return self.get_response('UploadSigningCertificate', params,
verb='POST')
def delete_signing_cert(self, cert_id, user_name=None):
"""
Delete a signing certificate associated with a user.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type user_name: string
:param user_name: The username of the user
:type cert_id: string
:param cert_id: The ID of the certificate.
"""
params = {'CertificateId': cert_id}
if user_name:
params['UserName'] = user_name
return self.get_response('DeleteSigningCertificate', params)
#
# Server Certificates
#
def list_server_certs(self, path_prefix='/',
marker=None, max_items=None):
"""
Lists the server certificates that have the specified path prefix.
If none exist, the action returns an empty list.
:type path_prefix: string
:param path_prefix: If provided, only certificates whose paths match
the provided prefix will be returned.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {}
if path_prefix:
params['PathPrefix'] = path_prefix
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListServerCertificates',
params,
list_marker='ServerCertificateMetadataList')
# Preserves backwards compatibility.
# TODO: Look into deprecating this eventually?
get_all_server_certs = list_server_certs
def update_server_cert(self, cert_name, new_cert_name=None,
new_path=None):
"""
Updates the name and/or the path of the specified server certificate.
:type cert_name: string
:param cert_name: The name of the server certificate that you want
to update.
:type new_cert_name: string
:param new_cert_name: The new name for the server certificate.
Include this only if you are updating the
server certificate's name.
:type new_path: string
:param new_path: If provided, the path of the certificate will be
changed to this path.
"""
params = {'ServerCertificateName': cert_name}
if new_cert_name:
params['NewServerCertificateName'] = new_cert_name
if new_path:
params['NewPath'] = new_path
return self.get_response('UpdateServerCertificate', params)
def upload_server_cert(self, cert_name, cert_body, private_key,
cert_chain=None, path=None):
"""
Uploads a server certificate entity for the AWS Account.
The server certificate entity includes a public key certificate,
a private key, and an optional certificate chain, which should
all be PEM-encoded.
:type cert_name: string
:param cert_name: The name for the server certificate. Do not
include the path in this value.
:type cert_body: string
:param cert_body: The contents of the public key certificate
in PEM-encoded format.
:type private_key: string
:param private_key: The contents of the private key in
PEM-encoded format.
:type cert_chain: string
:param cert_chain: The contents of the certificate chain. This
is typically a concatenation of the PEM-encoded
public key certificates of the chain.
:type path: string
:param path: The path for the server certificate.
"""
params = {'ServerCertificateName': cert_name,
'CertificateBody': cert_body,
'PrivateKey': private_key}
if cert_chain:
params['CertificateChain'] = cert_chain
if path:
params['Path'] = path
return self.get_response('UploadServerCertificate', params,
verb='POST')
def get_server_certificate(self, cert_name):
"""
Retrieves information about the specified server certificate.
:type cert_name: string
:param cert_name: The name of the server certificate you want
to retrieve information about.
"""
params = {'ServerCertificateName': cert_name}
return self.get_response('GetServerCertificate', params)
def delete_server_cert(self, cert_name):
"""
Delete the specified server certificate.
:type cert_name: string
:param cert_name: The name of the server certificate you want
to delete.
"""
params = {'ServerCertificateName': cert_name}
return self.get_response('DeleteServerCertificate', params)
#
# MFA Devices
#
def get_all_mfa_devices(self, user_name, marker=None, max_items=None):
"""
Get all MFA devices associated with an account.
:type user_name: string
:param user_name: The username of the user
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'UserName': user_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListMFADevices',
params, list_marker='MFADevices')
def enable_mfa_device(self, user_name, serial_number,
auth_code_1, auth_code_2):
"""
Enables the specified MFA device and associates it with the
specified user.
:type user_name: string
:param user_name: The username of the user
:type serial_number: string
:param serial_number: The serial number which uniquely identifies
the MFA device.
:type auth_code_1: string
:param auth_code_1: An authentication code emitted by the device.
:type auth_code_2: string
:param auth_code_2: A subsequent authentication code emitted
by the device.
"""
params = {'UserName': user_name,
'SerialNumber': serial_number,
'AuthenticationCode1': auth_code_1,
'AuthenticationCode2': auth_code_2}
return self.get_response('EnableMFADevice', params)
def deactivate_mfa_device(self, user_name, serial_number):
"""
Deactivates the specified MFA device and removes it from
association with the user.
:type user_name: string
:param user_name: The username of the user
:type serial_number: string
:param serial_number: The serial number which uniquely identifies
the MFA device.
"""
params = {'UserName': user_name,
'SerialNumber': serial_number}
return self.get_response('DeactivateMFADevice', params)
def resync_mfa_device(self, user_name, serial_number,
auth_code_1, auth_code_2):
"""
Syncronizes the specified MFA device with the AWS servers.
:type user_name: string
:param user_name: The username of the user
:type serial_number: string
:param serial_number: The serial number which uniquely identifies
the MFA device.
:type auth_code_1: string
:param auth_code_1: An authentication code emitted by the device.
:type auth_code_2: string
:param auth_code_2: A subsequent authentication code emitted
by the device.
"""
params = {'UserName': user_name,
'SerialNumber': serial_number,
'AuthenticationCode1': auth_code_1,
'AuthenticationCode2': auth_code_2}
return self.get_response('ResyncMFADevice', params)
#
# Login Profiles
#
def get_login_profiles(self, user_name):
"""
Retrieves the login profile for the specified user.
:type user_name: string
:param user_name: The username of the user
"""
params = {'UserName': user_name}
return self.get_response('GetLoginProfile', params)
def create_login_profile(self, user_name, password):
"""
Creates a login profile for the specified user, give the user the
ability to access AWS services and the AWS Management Console.
:type user_name: string
:param user_name: The name of the user
:type password: string
:param password: The new password for the user
"""
params = {'UserName': user_name,
'Password': password}
return self.get_response('CreateLoginProfile', params)
def delete_login_profile(self, user_name):
"""
Deletes the login profile associated with the specified user.
:type user_name: string
:param user_name: The name of the user to delete.
"""
params = {'UserName': user_name}
return self.get_response('DeleteLoginProfile', params)
def update_login_profile(self, user_name, password):
"""
Resets the password associated with the user's login profile.
:type user_name: string
:param user_name: The name of the user
:type password: string
:param password: The new password for the user
"""
params = {'UserName': user_name,
'Password': password}
return self.get_response('UpdateLoginProfile', params)
def create_account_alias(self, alias):
"""
Creates a new alias for the AWS account.
For more information on account id aliases, please see
http://goo.gl/ToB7G
:type alias: string
:param alias: The alias to attach to the account.
"""
params = {'AccountAlias': alias}
return self.get_response('CreateAccountAlias', params)
def delete_account_alias(self, alias):
"""
Deletes an alias for the AWS account.
For more information on account id aliases, please see
http://goo.gl/ToB7G
:type alias: string
:param alias: The alias to remove from the account.
"""
params = {'AccountAlias': alias}
return self.get_response('DeleteAccountAlias', params)
def get_account_alias(self):
"""
Get the alias for the current account.
This is referred to in the docs as list_account_aliases,
but it seems you can only have one account alias currently.
For more information on account id aliases, please see
http://goo.gl/ToB7G
"""
return self.get_response('ListAccountAliases', {},
list_marker='AccountAliases')
def get_signin_url(self, service='ec2'):
"""
Get the URL where IAM users can use their login profile to sign in
to this account's console.
:type service: string
:param service: Default service to go to in the console.
"""
alias = self.get_account_alias()
if not alias:
raise Exception('No alias associated with this account. Please use iam.create_account_alias() first.')
resp = alias.get('list_account_aliases_response', {})
result = resp.get('list_account_aliases_result', {})
aliases = result.get('account_aliases', [])
if not len(aliases):
raise Exception('No alias associated with this account. Please use iam.create_account_alias() first.')
# We'll just use the first one we find.
alias = aliases[0]
if self.host == 'iam.us-gov.amazonaws.com':
return "https://%s.signin.amazonaws-us-gov.com/console/%s" % (
alias,
service
)
elif self.host.endswith('amazonaws.com.cn'):
return "https://%s.signin.amazonaws.cn/console/%s" % (
alias,
service
)
else:
return "https://%s.signin.aws.amazon.com/console/%s" % (
alias,
service
)
def get_account_summary(self):
"""
Get the alias for the current account.
This is referred to in the docs as list_account_aliases,
but it seems you can only have one account alias currently.
For more information on account id aliases, please see
http://goo.gl/ToB7G
"""
return self.get_object('GetAccountSummary', {}, SummaryMap)
#
# IAM Roles
#
def add_role_to_instance_profile(self, instance_profile_name, role_name):
"""
Adds the specified role to the specified instance profile.
:type instance_profile_name: string
:param instance_profile_name: Name of the instance profile to update.
:type role_name: string
:param role_name: Name of the role to add.
"""
return self.get_response('AddRoleToInstanceProfile',
{'InstanceProfileName': instance_profile_name,
'RoleName': role_name})
def create_instance_profile(self, instance_profile_name, path=None):
"""
Creates a new instance profile.
:type instance_profile_name: string
:param instance_profile_name: Name of the instance profile to create.
:type path: string
:param path: The path to the instance profile.
"""
params = {'InstanceProfileName': instance_profile_name}
if path is not None:
params['Path'] = path
return self.get_response('CreateInstanceProfile', params)
def _build_policy(self, assume_role_policy_document=None):
if assume_role_policy_document is not None:
if isinstance(assume_role_policy_document, six.string_types):
# Historically, they had to pass a string. If it's a string,
# assume the user has already handled it.
return assume_role_policy_document
else:
for tld, policy in DEFAULT_POLICY_DOCUMENTS.items():
if tld is 'default':
# Skip the default. We'll fall back to it if we don't find
# anything.
continue
if self.host and self.host.endswith(tld):
assume_role_policy_document = policy
break
if not assume_role_policy_document:
assume_role_policy_document = DEFAULT_POLICY_DOCUMENTS['default']
# Dump the policy (either user-supplied ``dict`` or one of the defaults)
return json.dumps(assume_role_policy_document)
def create_role(self, role_name, assume_role_policy_document=None, path=None):
"""
Creates a new role for your AWS account.
The policy grants permission to an EC2 instance to assume the role.
The policy is URL-encoded according to RFC 3986. Currently, only EC2
instances can assume roles.
:type role_name: string
:param role_name: Name of the role to create.
:type assume_role_policy_document: ``string`` or ``dict``
:param assume_role_policy_document: The policy that grants an entity
permission to assume the role.
:type path: string
:param path: The path to the role.
"""
params = {
'RoleName': role_name,
'AssumeRolePolicyDocument': self._build_policy(
assume_role_policy_document
),
}
if path is not None:
params['Path'] = path
return self.get_response('CreateRole', params)
def delete_instance_profile(self, instance_profile_name):
"""
Deletes the specified instance profile. The instance profile must not
have an associated role.
:type instance_profile_name: string
:param instance_profile_name: Name of the instance profile to delete.
"""
return self.get_response(
'DeleteInstanceProfile',
{'InstanceProfileName': instance_profile_name})
def delete_role(self, role_name):
"""
Deletes the specified role. The role must not have any policies
attached.
:type role_name: string
:param role_name: Name of the role to delete.
"""
return self.get_response('DeleteRole', {'RoleName': role_name})
def delete_role_policy(self, role_name, policy_name):
"""
Deletes the specified policy associated with the specified role.
:type role_name: string
:param role_name: Name of the role associated with the policy.
:type policy_name: string
:param policy_name: Name of the policy to delete.
"""
return self.get_response(
'DeleteRolePolicy',
{'RoleName': role_name, 'PolicyName': policy_name})
def get_instance_profile(self, instance_profile_name):
"""
Retrieves information about the specified instance profile, including
the instance profile's path, GUID, ARN, and role.
:type instance_profile_name: string
:param instance_profile_name: Name of the instance profile to get
information about.
"""
return self.get_response('GetInstanceProfile',
{'InstanceProfileName': instance_profile_name})
def get_role(self, role_name):
"""
Retrieves information about the specified role, including the role's
path, GUID, ARN, and the policy granting permission to EC2 to assume
the role.
:type role_name: string
:param role_name: Name of the role associated with the policy.
"""
return self.get_response('GetRole', {'RoleName': role_name})
def get_role_policy(self, role_name, policy_name):
"""
Retrieves the specified policy document for the specified role.
:type role_name: string
:param role_name: Name of the role associated with the policy.
:type policy_name: string
:param policy_name: Name of the policy to get.
"""
return self.get_response('GetRolePolicy',
{'RoleName': role_name,
'PolicyName': policy_name})
def list_instance_profiles(self, path_prefix=None, marker=None,
max_items=None):
"""
Lists the instance profiles that have the specified path prefix. If
there are none, the action returns an empty list.
:type path_prefix: string
:param path_prefix: The path prefix for filtering the results. For
example: /application_abc/component_xyz/, which would get all
instance profiles whose path starts with
/application_abc/component_xyz/.
:type marker: string
:param marker: Use this parameter only when paginating results, and
only in a subsequent request after you've received a response
where the results are truncated. Set it to the value of the
Marker element in the response you just received.
:type max_items: int
:param max_items: Use this parameter only when paginating results to
indicate the maximum number of user names you want in the response.
"""
params = {}
if path_prefix is not None:
params['PathPrefix'] = path_prefix
if marker is not None:
params['Marker'] = marker
if max_items is not None:
params['MaxItems'] = max_items
return self.get_response('ListInstanceProfiles', params,
list_marker='InstanceProfiles')
def list_instance_profiles_for_role(self, role_name, marker=None,
max_items=None):
"""
Lists the instance profiles that have the specified associated role. If
there are none, the action returns an empty list.
:type role_name: string
:param role_name: The name of the role to list instance profiles for.
:type marker: string
:param marker: Use this parameter only when paginating results, and
only in a subsequent request after you've received a response
where the results are truncated. Set it to the value of the
Marker element in the response you just received.
:type max_items: int
:param max_items: Use this parameter only when paginating results to
indicate the maximum number of user names you want in the response.
"""
params = {'RoleName': role_name}
if marker is not None:
params['Marker'] = marker
if max_items is not None:
params['MaxItems'] = max_items
return self.get_response('ListInstanceProfilesForRole', params,
list_marker='InstanceProfiles')
def list_role_policies(self, role_name, marker=None, max_items=None):
"""
Lists the names of the policies associated with the specified role. If
there are none, the action returns an empty list.
:type role_name: string
:param role_name: The name of the role to list policies for.
:type marker: string
:param marker: Use this parameter only when paginating results, and
only in a subsequent request after you've received a response
where the results are truncated. Set it to the value of the
marker element in the response you just received.
:type max_items: int
:param max_items: Use this parameter only when paginating results to
indicate the maximum number of user names you want in the response.
"""
params = {'RoleName': role_name}
if marker is not None:
params['Marker'] = marker
if max_items is not None:
params['MaxItems'] = max_items
return self.get_response('ListRolePolicies', params,
list_marker='PolicyNames')
def list_roles(self, path_prefix=None, marker=None, max_items=None):
"""
Lists the roles that have the specified path prefix. If there are none,
the action returns an empty list.
:type path_prefix: string
:param path_prefix: The path prefix for filtering the results.
:type marker: string
:param marker: Use this parameter only when paginating results, and
only in a subsequent request after you've received a response
where the results are truncated. Set it to the value of the
marker element in the response you just received.
:type max_items: int
:param max_items: Use this parameter only when paginating results to
indicate the maximum number of user names you want in the response.
"""
params = {}
if path_prefix is not None:
params['PathPrefix'] = path_prefix
if marker is not None:
params['Marker'] = marker
if max_items is not None:
params['MaxItems'] = max_items
return self.get_response('ListRoles', params, list_marker='Roles')
def put_role_policy(self, role_name, policy_name, policy_document):
"""
Adds (or updates) a policy document associated with the specified role.
:type role_name: string
:param role_name: Name of the role to associate the policy with.
:type policy_name: string
:param policy_name: Name of the policy document.
:type policy_document: string
:param policy_document: The policy document.
"""
return self.get_response('PutRolePolicy',
{'RoleName': role_name,
'PolicyName': policy_name,
'PolicyDocument': policy_document})
def remove_role_from_instance_profile(self, instance_profile_name,
role_name):
"""
Removes the specified role from the specified instance profile.
:type instance_profile_name: string
:param instance_profile_name: Name of the instance profile to update.
:type role_name: string
:param role_name: Name of the role to remove.
"""
return self.get_response('RemoveRoleFromInstanceProfile',
{'InstanceProfileName': instance_profile_name,
'RoleName': role_name})
def update_assume_role_policy(self, role_name, policy_document):
"""
Updates the policy that grants an entity permission to assume a role.
Currently, only an Amazon EC2 instance can assume a role.
:type role_name: string
:param role_name: Name of the role to update.
:type policy_document: string
:param policy_document: The policy that grants an entity permission to
assume the role.
"""
return self.get_response('UpdateAssumeRolePolicy',
{'RoleName': role_name,
'PolicyDocument': policy_document})
def create_saml_provider(self, saml_metadata_document, name):
"""
Creates an IAM entity to describe an identity provider (IdP)
that supports SAML 2.0.
The SAML provider that you create with this operation can be
used as a principal in a role's trust policy to establish a
trust relationship between AWS and a SAML identity provider.
You can create an IAM role that supports Web-based single
sign-on (SSO) to the AWS Management Console or one that
supports API access to AWS.
When you create the SAML provider, you upload an a SAML
metadata document that you get from your IdP and that includes
the issuer's name, expiration information, and keys that can
be used to validate the SAML authentication response
(assertions) that are received from the IdP. You must generate
the metadata document using the identity management software
that is used as your organization's IdP.
This operation requires `Signature Version 4`_.
For more information, see `Giving Console Access Using SAML`_
and `Creating Temporary Security Credentials for SAML
Federation`_ in the Using Temporary Credentials guide.
:type saml_metadata_document: string
:param saml_metadata_document: An XML document generated by an identity
provider (IdP) that supports SAML 2.0. The document includes the
issuer's name, expiration information, and keys that can be used to
validate the SAML authentication response (assertions) that are
received from the IdP. You must generate the metadata document
using the identity management software that is used as your
organization's IdP.
For more information, see `Creating Temporary Security Credentials for
SAML Federation`_ in the Using Temporary Security Credentials
guide.
:type name: string
:param name: The name of the provider to create.
"""
params = {
'SAMLMetadataDocument': saml_metadata_document,
'Name': name,
}
return self.get_response('CreateSAMLProvider', params)
def list_saml_providers(self):
"""
Lists the SAML providers in the account.
This operation requires `Signature Version 4`_.
"""
return self.get_response('ListSAMLProviders', {}, list_marker='SAMLProviderList')
def get_saml_provider(self, saml_provider_arn):
"""
Returns the SAML provider metadocument that was uploaded when
the provider was created or updated.
This operation requires `Signature Version 4`_.
:type saml_provider_arn: string
:param saml_provider_arn: The Amazon Resource Name (ARN) of the SAML
provider to get information about.
"""
params = {'SAMLProviderArn': saml_provider_arn}
return self.get_response('GetSAMLProvider', params)
def update_saml_provider(self, saml_provider_arn, saml_metadata_document):
"""
Updates the metadata document for an existing SAML provider.
This operation requires `Signature Version 4`_.
:type saml_provider_arn: string
:param saml_provider_arn: The Amazon Resource Name (ARN) of the SAML
provider to update.
:type saml_metadata_document: string
:param saml_metadata_document: An XML document generated by an identity
provider (IdP) that supports SAML 2.0. The document includes the
issuer's name, expiration information, and keys that can be used to
validate the SAML authentication response (assertions) that are
received from the IdP. You must generate the metadata document
using the identity management software that is used as your
organization's IdP.
"""
params = {
'SAMLMetadataDocument': saml_metadata_document,
'SAMLProviderArn': saml_provider_arn,
}
return self.get_response('UpdateSAMLProvider', params)
def delete_saml_provider(self, saml_provider_arn):
"""
Deletes a SAML provider.
Deleting the provider does not update any roles that reference
the SAML provider as a principal in their trust policies. Any
attempt to assume a role that references a SAML provider that
has been deleted will fail.
This operation requires `Signature Version 4`_.
:type saml_provider_arn: string
:param saml_provider_arn: The Amazon Resource Name (ARN) of the SAML
provider to delete.
"""
params = {'SAMLProviderArn': saml_provider_arn}
return self.get_response('DeleteSAMLProvider', params)
#
# IAM Reports
#
def generate_credential_report(self):
"""
Generates a credential report for an account
A new credential report can only be generated every 4 hours. If one
hasn't been generated in the last 4 hours then get_credential_report
will error when called
"""
params = {}
return self.get_response('GenerateCredentialReport', params)
def get_credential_report(self):
"""
Retrieves a credential report for an account
A report must have been generated in the last 4 hours to succeed.
The report is returned as a base64 encoded blob within the response.
"""
params = {}
return self.get_response('GetCredentialReport', params)
def create_virtual_mfa_device(self, path, device_name):
"""
Creates a new virtual MFA device for the AWS account.
After creating the virtual MFA, use enable-mfa-device to
attach the MFA device to an IAM user.
:type path: string
:param path: The path for the virtual MFA device.
:type device_name: string
:param device_name: The name of the virtual MFA device.
Used with path to uniquely identify a virtual MFA device.
"""
params = {
'Path': path,
'VirtualMFADeviceName': device_name
}
return self.get_response('CreateVirtualMFADevice', params)
#
# IAM password policy
#
def get_account_password_policy(self):
"""
Returns the password policy for the AWS account.
"""
params = {}
return self.get_response('GetAccountPasswordPolicy', params)
def delete_account_password_policy(self):
"""
Delete the password policy currently set for the AWS account.
"""
params = {}
return self.get_response('DeleteAccountPasswordPolicy', params)
def update_account_password_policy(self, allow_users_to_change_password=None,
hard_expiry=None, max_password_age=None ,
minimum_password_length=None ,
password_reuse_prevention=None,
require_lowercase_characters=None,
require_numbers=None, require_symbols=None ,
require_uppercase_characters=None):
"""
Update the password policy for the AWS account.
Notes: unset parameters will be reset to Amazon default settings!
Most of the password policy settings are enforced the next time your users
change their passwords. When you set minimum length and character type
requirements, they are enforced the next time your users change their
passwords - users are not forced to change their existing passwords, even
if the pre-existing passwords do not adhere to the updated password
policy. When you set a password expiration period, the expiration period
is enforced immediately.
:type allow_users_to_change_password: bool
:param allow_users_to_change_password: Allows all IAM users in your account
to use the AWS Management Console to change their own passwords.
:type hard_expiry: bool
:param hard_expiry: Prevents IAM users from setting a new password after
their password has expired.
:type max_password_age: int
:param max_password_age: The number of days that an IAM user password is valid.
:type minimum_password_length: int
:param minimum_password_length: The minimum number of characters allowed in
an IAM user password.
:type password_reuse_prevention: int
:param password_reuse_prevention: Specifies the number of previous passwords
that IAM users are prevented from reusing.
:type require_lowercase_characters: bool
:param require_lowercase_characters: Specifies whether IAM user passwords
must contain at least one lowercase character from the ISO basic Latin
alphabet (``a`` to ``z``).
:type require_numbers: bool
:param require_numbers: Specifies whether IAM user passwords must contain at
least one numeric character (``0`` to ``9``).
:type require_symbols: bool
:param require_symbols: Specifies whether IAM user passwords must contain at
least one of the following non-alphanumeric characters:
``! @ # $ % ^ & * ( ) _ + - = [ ] { } | '``
:type require_uppercase_characters: bool
:param require_uppercase_characters: Specifies whether IAM user passwords
must contain at least one uppercase character from the ISO basic Latin
alphabet (``A`` to ``Z``).
"""
params = {}
if allow_users_to_change_password is not None and type(allow_users_to_change_password) is bool:
params['AllowUsersToChangePassword'] = str(allow_users_to_change_password).lower()
if hard_expiry is not None and type(allow_users_to_change_password) is bool:
params['HardExpiry'] = str(hard_expiry).lower()
if max_password_age is not None:
params['MaxPasswordAge'] = max_password_age
if minimum_password_length is not None:
params['MinimumPasswordLength'] = minimum_password_length
if password_reuse_prevention is not None:
params['PasswordReusePrevention'] = password_reuse_prevention
if require_lowercase_characters is not None and type(allow_users_to_change_password) is bool:
params['RequireLowercaseCharacters'] = str(require_lowercase_characters).lower()
if require_numbers is not None and type(allow_users_to_change_password) is bool:
params['RequireNumbers'] = str(require_numbers).lower()
if require_symbols is not None and type(allow_users_to_change_password) is bool:
params['RequireSymbols'] = str(require_symbols).lower()
if require_uppercase_characters is not None and type(allow_users_to_change_password) is bool:
params['RequireUppercaseCharacters'] = str(require_uppercase_characters).lower()
return self.get_response('UpdateAccountPasswordPolicy', params)
|
mit
|
hep-gc/glint-horizon
|
openstack_dashboard/dashboards/admin/hypervisors/tests.py
|
7
|
2096
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 B1 Systems GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class HypervisorViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.nova: ('hypervisor_list',
'hypervisor_stats')})
def test_index(self):
hypervisors = self.hypervisors.list()
stats = self.hypervisors.stats
api.nova.hypervisor_list(IsA(http.HttpRequest)).AndReturn(hypervisors)
api.nova.hypervisor_stats(IsA(http.HttpRequest)).AndReturn(stats)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:hypervisors:index'))
self.assertTemplateUsed(res, 'admin/hypervisors/index.html')
self.assertItemsEqual(res.context['table'].data, hypervisors)
class HypervisorDetailViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.nova: ('hypervisor_search',)})
def test_index(self):
hypervisor = self.hypervisors.list().pop().hypervisor_hostname
api.nova.hypervisor_search(
IsA(http.HttpRequest), hypervisor).AndReturn([])
self.mox.ReplayAll()
url = reverse('horizon:admin:hypervisors:detail', args=[hypervisor])
res = self.client.get(url)
self.assertTemplateUsed(res, 'admin/hypervisors/detail.html')
self.assertItemsEqual(res.context['table'].data, [])
|
apache-2.0
|
vlvkobal/netdata
|
collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py
|
4
|
12093
|
# SPDX-License-Identifier: MIT
"""
Low-level helpers for the SecureTransport bindings.
These are Python functions that are not directly related to the high-level APIs
but are necessary to get them to work. They include a whole bunch of low-level
CoreFoundation messing about and memory management. The concerns in this module
are almost entirely about trying to avoid memory leaks and providing
appropriate and useful assistance to the higher-level code.
"""
import base64
import ctypes
import itertools
import re
import os
import ssl
import tempfile
from .bindings import Security, CoreFoundation, CFConst
# This regular expression is used to grab PEM data out of a PEM bundle.
_PEM_CERTS_RE = re.compile(
b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL
)
def _cf_data_from_bytes(bytestring):
"""
Given a bytestring, create a CFData object from it. This CFData object must
be CFReleased by the caller.
"""
return CoreFoundation.CFDataCreate(
CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring)
)
def _cf_dictionary_from_tuples(tuples):
"""
Given a list of Python tuples, create an associated CFDictionary.
"""
dictionary_size = len(tuples)
# We need to get the dictionary keys and values out in the same order.
keys = (t[0] for t in tuples)
values = (t[1] for t in tuples)
cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys)
cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values)
return CoreFoundation.CFDictionaryCreate(
CoreFoundation.kCFAllocatorDefault,
cf_keys,
cf_values,
dictionary_size,
CoreFoundation.kCFTypeDictionaryKeyCallBacks,
CoreFoundation.kCFTypeDictionaryValueCallBacks,
)
def _cf_string_to_unicode(value):
"""
Creates a Unicode string from a CFString object. Used entirely for error
reporting.
Yes, it annoys me quite a lot that this function is this complex.
"""
value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p))
string = CoreFoundation.CFStringGetCStringPtr(
value_as_void_p,
CFConst.kCFStringEncodingUTF8
)
if string is None:
buffer = ctypes.create_string_buffer(1024)
result = CoreFoundation.CFStringGetCString(
value_as_void_p,
buffer,
1024,
CFConst.kCFStringEncodingUTF8
)
if not result:
raise OSError('Error copying C string from CFStringRef')
string = buffer.value
if string is not None:
string = string.decode('utf-8')
return string
def _assert_no_error(error, exception_class=None):
"""
Checks the return code and throws an exception if there is an error to
report
"""
if error == 0:
return
cf_error_string = Security.SecCopyErrorMessageString(error, None)
output = _cf_string_to_unicode(cf_error_string)
CoreFoundation.CFRelease(cf_error_string)
if output is None or output == u'':
output = u'OSStatus %s' % error
if exception_class is None:
exception_class = ssl.SSLError
raise exception_class(output)
def _cert_array_from_pem(pem_bundle):
"""
Given a bundle of certs in PEM format, turns them into a CFArray of certs
that can be used to validate a cert chain.
"""
der_certs = [
base64.b64decode(match.group(1))
for match in _PEM_CERTS_RE.finditer(pem_bundle)
]
if not der_certs:
raise ssl.SSLError("No root certificates specified")
cert_array = CoreFoundation.CFArrayCreateMutable(
CoreFoundation.kCFAllocatorDefault,
0,
ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks)
)
if not cert_array:
raise ssl.SSLError("Unable to allocate memory!")
try:
for der_bytes in der_certs:
certdata = _cf_data_from_bytes(der_bytes)
if not certdata:
raise ssl.SSLError("Unable to allocate memory!")
cert = Security.SecCertificateCreateWithData(
CoreFoundation.kCFAllocatorDefault, certdata
)
CoreFoundation.CFRelease(certdata)
if not cert:
raise ssl.SSLError("Unable to build cert object!")
CoreFoundation.CFArrayAppendValue(cert_array, cert)
CoreFoundation.CFRelease(cert)
except Exception:
# We need to free the array before the exception bubbles further.
# We only want to do that if an error occurs: otherwise, the caller
# should free.
CoreFoundation.CFRelease(cert_array)
return cert_array
def _is_cert(item):
"""
Returns True if a given CFTypeRef is a certificate.
"""
expected = Security.SecCertificateGetTypeID()
return CoreFoundation.CFGetTypeID(item) == expected
def _is_identity(item):
"""
Returns True if a given CFTypeRef is an identity.
"""
expected = Security.SecIdentityGetTypeID()
return CoreFoundation.CFGetTypeID(item) == expected
def _temporary_keychain():
"""
This function creates a temporary Mac keychain that we can use to work with
credentials. This keychain uses a one-time password and a temporary file to
store the data. We expect to have one keychain per socket. The returned
SecKeychainRef must be freed by the caller, including calling
SecKeychainDelete.
Returns a tuple of the SecKeychainRef and the path to the temporary
directory that contains it.
"""
# Unfortunately, SecKeychainCreate requires a path to a keychain. This
# means we cannot use mkstemp to use a generic temporary file. Instead,
# we're going to create a temporary directory and a filename to use there.
# This filename will be 8 random bytes expanded into base64. We also need
# some random bytes to password-protect the keychain we're creating, so we
# ask for 40 random bytes.
random_bytes = os.urandom(40)
filename = base64.b64encode(random_bytes[:8]).decode('utf-8')
password = base64.b64encode(random_bytes[8:]) # Must be valid UTF-8
tempdirectory = tempfile.mkdtemp()
keychain_path = os.path.join(tempdirectory, filename).encode('utf-8')
# We now want to create the keychain itself.
keychain = Security.SecKeychainRef()
status = Security.SecKeychainCreate(
keychain_path,
len(password),
password,
False,
None,
ctypes.byref(keychain)
)
_assert_no_error(status)
# Having created the keychain, we want to pass it off to the caller.
return keychain, tempdirectory
def _load_items_from_file(keychain, path):
"""
Given a single file, loads all the trust objects from it into arrays and
the keychain.
Returns a tuple of lists: the first list is a list of identities, the
second a list of certs.
"""
certificates = []
identities = []
result_array = None
with open(path, 'rb') as f:
raw_filedata = f.read()
try:
filedata = CoreFoundation.CFDataCreate(
CoreFoundation.kCFAllocatorDefault,
raw_filedata,
len(raw_filedata)
)
result_array = CoreFoundation.CFArrayRef()
result = Security.SecItemImport(
filedata, # cert data
None, # Filename, leaving it out for now
None, # What the type of the file is, we don't care
None, # what's in the file, we don't care
0, # import flags
None, # key params, can include passphrase in the future
keychain, # The keychain to insert into
ctypes.byref(result_array) # Results
)
_assert_no_error(result)
# A CFArray is not very useful to us as an intermediary
# representation, so we are going to extract the objects we want
# and then free the array. We don't need to keep hold of keys: the
# keychain already has them!
result_count = CoreFoundation.CFArrayGetCount(result_array)
for index in range(result_count):
item = CoreFoundation.CFArrayGetValueAtIndex(
result_array, index
)
item = ctypes.cast(item, CoreFoundation.CFTypeRef)
if _is_cert(item):
CoreFoundation.CFRetain(item)
certificates.append(item)
elif _is_identity(item):
CoreFoundation.CFRetain(item)
identities.append(item)
finally:
if result_array:
CoreFoundation.CFRelease(result_array)
CoreFoundation.CFRelease(filedata)
return (identities, certificates)
def _load_client_cert_chain(keychain, *paths):
"""
Load certificates and maybe keys from a number of files. Has the end goal
of returning a CFArray containing one SecIdentityRef, and then zero or more
SecCertificateRef objects, suitable for use as a client certificate trust
chain.
"""
# Ok, the strategy.
#
# This relies on knowing that macOS will not give you a SecIdentityRef
# unless you have imported a key into a keychain. This is a somewhat
# artificial limitation of macOS (for example, it doesn't necessarily
# affect iOS), but there is nothing inside Security.framework that lets you
# get a SecIdentityRef without having a key in a keychain.
#
# So the policy here is we take all the files and iterate them in order.
# Each one will use SecItemImport to have one or more objects loaded from
# it. We will also point at a keychain that macOS can use to work with the
# private key.
#
# Once we have all the objects, we'll check what we actually have. If we
# already have a SecIdentityRef in hand, fab: we'll use that. Otherwise,
# we'll take the first certificate (which we assume to be our leaf) and
# ask the keychain to give us a SecIdentityRef with that cert's associated
# key.
#
# We'll then return a CFArray containing the trust chain: one
# SecIdentityRef and then zero-or-more SecCertificateRef objects. The
# responsibility for freeing this CFArray will be with the caller. This
# CFArray must remain alive for the entire connection, so in practice it
# will be stored with a single SSLSocket, along with the reference to the
# keychain.
certificates = []
identities = []
# Filter out bad paths.
paths = (path for path in paths if path)
try:
for file_path in paths:
new_identities, new_certs = _load_items_from_file(
keychain, file_path
)
identities.extend(new_identities)
certificates.extend(new_certs)
# Ok, we have everything. The question is: do we have an identity? If
# not, we want to grab one from the first cert we have.
if not identities:
new_identity = Security.SecIdentityRef()
status = Security.SecIdentityCreateWithCertificate(
keychain,
certificates[0],
ctypes.byref(new_identity)
)
_assert_no_error(status)
identities.append(new_identity)
# We now want to release the original certificate, as we no longer
# need it.
CoreFoundation.CFRelease(certificates.pop(0))
# We now need to build a new CFArray that holds the trust chain.
trust_chain = CoreFoundation.CFArrayCreateMutable(
CoreFoundation.kCFAllocatorDefault,
0,
ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
)
for item in itertools.chain(identities, certificates):
# ArrayAppendValue does a CFRetain on the item. That's fine,
# because the finally block will release our other refs to them.
CoreFoundation.CFArrayAppendValue(trust_chain, item)
return trust_chain
finally:
for obj in itertools.chain(identities, certificates):
CoreFoundation.CFRelease(obj)
|
gpl-3.0
|
egoid/baytree
|
lib/python2.7/site-packages/environ/environ.py
|
3
|
26771
|
"""
Django-environ allows you to utilize 12factor inspired environment
variables to configure your Django application.
"""
import json
import logging
import os
import re
import sys
import warnings
try:
from django.core.exceptions import ImproperlyConfigured
except ImportError:
class ImproperlyConfigured(Exception):
pass
from six.moves import urllib
from six import string_types
logger = logging.getLogger(__name__)
VERSION = '0.4.1'
__author__ = 'joke2k'
__version__ = tuple(VERSION.split('.'))
# return int if possible
def _cast_int(v):
return int(v) if hasattr(v, 'isdigit') and v.isdigit() else v
def _cast_urlstr(v):
return urllib.parse.unquote_plus(v) if isinstance(v, str) else v
# back compatibility with redis_cache package
DJANGO_REDIS_DRIVER = 'django_redis.cache.RedisCache'
DJANGO_REDIS_CACHE_DRIVER = 'redis_cache.RedisCache'
REDIS_DRIVER = DJANGO_REDIS_DRIVER
try:
import redis_cache
REDIS_DRIVER = DJANGO_REDIS_CACHE_DRIVER
except:
pass
class NoValue(object):
def __repr__(self):
return '<{0}>'.format(self.__class__.__name__)
class Env(object):
"""Provide scheme-based lookups of environment variables so that each
caller doesn't have to pass in `cast` and `default` parameters.
Usage:::
env = Env(MAIL_ENABLED=bool, SMTP_LOGIN=(str, 'DEFAULT'))
if env('MAIL_ENABLED'):
...
"""
ENVIRON = os.environ
NOTSET = NoValue()
BOOLEAN_TRUE_STRINGS = ('true', 'on', 'ok', 'y', 'yes', '1')
URL_CLASS = urllib.parse.ParseResult
DEFAULT_DATABASE_ENV = 'DATABASE_URL'
DB_SCHEMES = {
'postgres': 'django.db.backends.postgresql_psycopg2',
'postgresql': 'django.db.backends.postgresql_psycopg2',
'psql': 'django.db.backends.postgresql_psycopg2',
'pgsql': 'django.db.backends.postgresql_psycopg2',
'postgis': 'django.contrib.gis.db.backends.postgis',
'mysql': 'django.db.backends.mysql',
'mysql2': 'django.db.backends.mysql',
'mysqlgis': 'django.contrib.gis.db.backends.mysql',
'oracle': 'django.db.backends.oracle',
'spatialite': 'django.contrib.gis.db.backends.spatialite',
'sqlite': 'django.db.backends.sqlite3',
'ldap': 'ldapdb.backends.ldap',
}
_DB_BASE_OPTIONS = ['CONN_MAX_AGE', 'ATOMIC_REQUESTS', 'AUTOCOMMIT']
DEFAULT_CACHE_ENV = 'CACHE_URL'
CACHE_SCHEMES = {
'dbcache': 'django.core.cache.backends.db.DatabaseCache',
'dummycache': 'django.core.cache.backends.dummy.DummyCache',
'filecache': 'django.core.cache.backends.filebased.FileBasedCache',
'locmemcache': 'django.core.cache.backends.locmem.LocMemCache',
'memcache': 'django.core.cache.backends.memcached.MemcachedCache',
'pymemcache': 'django.core.cache.backends.memcached.PyLibMCCache',
'rediscache': REDIS_DRIVER,
'redis': REDIS_DRIVER,
}
_CACHE_BASE_OPTIONS = ['TIMEOUT', 'KEY_PREFIX', 'VERSION', 'KEY_FUNCTION', 'BINARY']
DEFAULT_EMAIL_ENV = 'EMAIL_URL'
EMAIL_SCHEMES = {
'smtp': 'django.core.mail.backends.smtp.EmailBackend',
'smtps': 'django.core.mail.backends.smtp.EmailBackend',
'smtp+tls': 'django.core.mail.backends.smtp.EmailBackend',
'smtp+ssl': 'django.core.mail.backends.smtp.EmailBackend',
'consolemail': 'django.core.mail.backends.console.EmailBackend',
'filemail': 'django.core.mail.backends.filebased.EmailBackend',
'memorymail': 'django.core.mail.backends.locmem.EmailBackend',
'dummymail': 'django.core.mail.backends.dummy.EmailBackend'
}
_EMAIL_BASE_OPTIONS = ['EMAIL_USE_TLS', 'EMAIL_USE_SSL']
DEFAULT_SEARCH_ENV = 'SEARCH_URL'
SEARCH_SCHEMES = {
"elasticsearch": "haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine",
"solr": "haystack.backends.solr_backend.SolrEngine",
"whoosh": "haystack.backends.whoosh_backend.WhooshEngine",
"xapian": "haystack.backends.xapian_backend.XapianEngine",
"simple": "haystack.backends.simple_backend.SimpleEngine",
}
def __init__(self, **scheme):
self.scheme = scheme
def __call__(self, var, cast=None, default=NOTSET, parse_default=False):
return self.get_value(var, cast=cast, default=default, parse_default=parse_default)
# Shortcuts
def str(self, var, default=NOTSET):
"""
:rtype: str
"""
return self.get_value(var, default=default)
def unicode(self, var, default=NOTSET):
"""Helper for python2
:rtype: unicode
"""
return self.get_value(var, cast=str, default=default)
def bool(self, var, default=NOTSET):
"""
:rtype: bool
"""
return self.get_value(var, cast=bool, default=default)
def int(self, var, default=NOTSET):
"""
:rtype: int
"""
return self.get_value(var, cast=int, default=default)
def float(self, var, default=NOTSET):
"""
:rtype: float
"""
return self.get_value(var, cast=float, default=default)
def json(self, var, default=NOTSET):
"""
:returns: Json parsed
"""
return self.get_value(var, cast=json.loads, default=default)
def list(self, var, cast=None, default=NOTSET):
"""
:rtype: list
"""
return self.get_value(var, cast=list if not cast else [cast], default=default)
def tuple(self, var, cast=None, default=NOTSET):
"""
:rtype: tuple
"""
return self.get_value(var, cast=tuple if not cast else (cast,), default=default)
def dict(self, var, cast=dict, default=NOTSET):
"""
:rtype: dict
"""
return self.get_value(var, cast=cast, default=default)
def url(self, var, default=NOTSET):
"""
:rtype: urlparse.ParseResult
"""
return self.get_value(var, cast=urllib.parse.urlparse, default=default, parse_default=True)
def db_url(self, var=DEFAULT_DATABASE_ENV, default=NOTSET, engine=None):
"""Returns a config dictionary, defaulting to DATABASE_URL.
:rtype: dict
"""
return self.db_url_config(self.get_value(var, default=default), engine=engine)
db = db_url
def cache_url(self, var=DEFAULT_CACHE_ENV, default=NOTSET, backend=None):
"""Returns a config dictionary, defaulting to CACHE_URL.
:rtype: dict
"""
return self.cache_url_config(self.url(var, default=default), backend=backend)
cache = cache_url
def email_url(self, var=DEFAULT_EMAIL_ENV, default=NOTSET, backend=None):
"""Returns a config dictionary, defaulting to EMAIL_URL.
:rtype: dict
"""
return self.email_url_config(self.url(var, default=default), backend=backend)
email = email_url
def search_url(self, var=DEFAULT_SEARCH_ENV, default=NOTSET, engine=None):
"""Returns a config dictionary, defaulting to SEARCH_URL.
:rtype: dict
"""
return self.search_url_config(self.url(var, default=default), engine=engine)
def path(self, var, default=NOTSET, **kwargs):
"""
:rtype: Path
"""
return Path(self.get_value(var, default=default), **kwargs)
def get_value(self, var, cast=None, default=NOTSET, parse_default=False):
"""Return value for given environment variable.
:param var: Name of variable.
:param cast: Type to cast return value as.
:param default: If var not present in environ, return this instead.
:param parse_default: force to parse default..
:returns: Value from environment or default (if set)
"""
logger.debug("get '{0}' casted as '{1}' with default '{2}'".format(
var, cast, default
))
if var in self.scheme:
var_info = self.scheme[var]
try:
has_default = len(var_info) == 2
except TypeError:
has_default = False
if has_default:
if not cast:
cast = var_info[0]
if default is self.NOTSET:
try:
default = var_info[1]
except IndexError:
pass
else:
if not cast:
cast = var_info
try:
value = self.ENVIRON[var]
except KeyError:
if default is self.NOTSET:
error_msg = "Set the {0} environment variable".format(var)
raise ImproperlyConfigured(error_msg)
value = default
# Resolve any proxied values
if hasattr(value, 'startswith') and value.startswith('$'):
value = value.lstrip('$')
value = self.get_value(value, cast=cast, default=default)
if value != default or (parse_default and value):
value = self.parse_value(value, cast)
return value
# Class and static methods
@classmethod
def parse_value(cls, value, cast):
"""Parse and cast provided value
:param value: Stringed value.
:param cast: Type to cast return value as.
:returns: Casted value
"""
if cast is None:
return value
elif cast is bool:
try:
value = int(value) != 0
except ValueError:
value = value.lower() in cls.BOOLEAN_TRUE_STRINGS
elif isinstance(cast, list):
value = list(map(cast[0], [x for x in value.split(',') if x]))
elif isinstance(cast, tuple):
val = value.strip('(').strip(')').split(',')
value = tuple(map(cast[0], [x for x in val if x]))
elif isinstance(cast, dict):
key_cast = cast.get('key', str)
value_cast = cast.get('value', str)
value_cast_by_key = cast.get('cast', dict())
value = dict(map(
lambda kv: (
key_cast(kv[0]),
cls.parse_value(kv[1], value_cast_by_key.get(kv[0], value_cast))
),
[val.split('=') for val in value.split(';') if val]
))
elif cast is dict:
value = dict([val.split('=') for val in value.split(',') if val])
elif cast is list:
value = [x for x in value.split(',') if x]
elif cast is tuple:
val = value.strip('(').strip(')').split(',')
value = tuple([x for x in val if x])
elif cast is float:
# clean string
float_str = re.sub(r'[^\d,\.]', '', value)
# split for avoid thousand separator and different locale comma/dot symbol
parts = re.split(r'[,\.]', float_str)
if len(parts) == 1:
float_str = parts[0]
else:
float_str = "{0}.{1}".format(''.join(parts[0:-1]), parts[-1])
value = float(float_str)
else:
value = cast(value)
return value
@classmethod
def db_url_config(cls, url, engine=None):
"""Pulled from DJ-Database-URL, parse an arbitrary Database URL.
Support currently exists for PostgreSQL, PostGIS, MySQL, Oracle and SQLite.
SQLite connects to file based databases. The same URL format is used, omitting the hostname,
and using the "file" portion as the filename of the database.
This has the effect of four slashes being present for an absolute file path:
>>> from environ import Env
>>> Env.db_url_config('sqlite:////full/path/to/your/file.sqlite')
{'ENGINE': 'django.db.backends.sqlite3', 'HOST': '', 'NAME': '/full/path/to/your/file.sqlite', 'PASSWORD': '', 'PORT': '', 'USER': ''}
>>> Env.db_url_config('postgres://uf07k1i6d8ia0v:[email protected]:5431/d8r82722r2kuvn')
{'ENGINE': 'django.db.backends.postgresql_psycopg2', 'HOST': 'ec2-107-21-253-135.compute-1.amazonaws.com', 'NAME': 'd8r82722r2kuvn', 'PASSWORD': 'wegauwhgeuioweg', 'PORT': 5431, 'USER': 'uf07k1i6d8ia0v'}
"""
if not isinstance(url, cls.URL_CLASS):
if url == 'sqlite://:memory:':
# this is a special case, because if we pass this URL into
# urlparse, urlparse will choke trying to interpret "memory"
# as a port number
return {
'ENGINE': cls.DB_SCHEMES['sqlite'],
'NAME': ':memory:'
}
# note: no other settings are required for sqlite
url = urllib.parse.urlparse(url)
config = {}
# Remove query strings.
path = url.path[1:]
path = urllib.parse.unquote_plus(path.split('?', 2)[0])
# if we are using sqlite and we have no path, then assume we
# want an in-memory database (this is the behaviour of sqlalchemy)
if url.scheme == 'sqlite' and path == '':
path = ':memory:'
if url.scheme == 'ldap':
path = '{scheme}://{hostname}'.format(scheme=url.scheme, hostname=url.hostname)
if url.port:
path += ':{port}'.format(port=url.port)
# Update with environment configuration.
config.update({
'NAME': path or '',
'USER': _cast_urlstr(url.username) or '',
'PASSWORD': _cast_urlstr(url.password) or '',
'HOST': url.hostname or '',
'PORT': _cast_int(url.port) or '',
})
if url.scheme == 'oracle' and path == '':
config['NAME'] = config['HOST']
config['HOST'] = ''
if url.scheme == 'oracle':
# Django oracle/base.py strips port and fails on non-string value
if not config['PORT']:
del(config['PORT'])
else:
config['PORT'] = str(config['PORT'])
if url.query:
config_options = {}
for k, v in urllib.parse.parse_qs(url.query).items():
if k.upper() in cls._DB_BASE_OPTIONS:
config.update({k.upper(): _cast_int(v[0])})
else:
config_options.update({k: _cast_int(v[0])})
config['OPTIONS'] = config_options
if engine:
config['ENGINE'] = engine
if url.scheme in Env.DB_SCHEMES:
config['ENGINE'] = Env.DB_SCHEMES[url.scheme]
if not config.get('ENGINE', False):
warnings.warn("Engine not recognized from url: {0}".format(config))
return {}
return config
@classmethod
def cache_url_config(cls, url, backend=None):
"""Pulled from DJ-Cache-URL, parse an arbitrary Cache URL.
:param url:
:param backend:
:return:
"""
url = urllib.parse.urlparse(url) if not isinstance(url, cls.URL_CLASS) else url
location = url.netloc.split(',')
if len(location) == 1:
location = location[0]
config = {
'BACKEND': cls.CACHE_SCHEMES[url.scheme],
'LOCATION': location,
}
# Add the drive to LOCATION
if url.scheme == 'filecache':
config.update({
'LOCATION': url.netloc + url.path,
})
if url.path and url.scheme in ['memcache', 'pymemcache']:
config.update({
'LOCATION': 'unix:' + url.path,
})
elif url.scheme.startswith('redis'):
if url.hostname:
scheme = url.scheme.replace('cache', '')
else:
scheme = 'unix'
config['LOCATION'] = scheme + '://' + url.netloc + url.path
if url.query:
config_options = {}
for k, v in urllib.parse.parse_qs(url.query).items():
opt = {k.upper(): _cast_int(v[0])}
if k.upper() in cls._CACHE_BASE_OPTIONS:
config.update(opt)
else:
config_options.update(opt)
config['OPTIONS'] = config_options
if backend:
config['BACKEND'] = backend
return config
@classmethod
def email_url_config(cls, url, backend=None):
"""Parses an email URL."""
config = {}
url = urllib.parse.urlparse(url) if not isinstance(url, cls.URL_CLASS) else url
# Remove query strings
path = url.path[1:]
path = urllib.parse.unquote_plus(path.split('?', 2)[0])
# Update with environment configuration
config.update({
'EMAIL_FILE_PATH': path,
'EMAIL_HOST_USER': _cast_urlstr(url.username),
'EMAIL_HOST_PASSWORD': _cast_urlstr(url.password),
'EMAIL_HOST': url.hostname,
'EMAIL_PORT': _cast_int(url.port),
})
if backend:
config['EMAIL_BACKEND'] = backend
elif url.scheme not in cls.EMAIL_SCHEMES:
raise ImproperlyConfigured('Invalid email schema %s' % url.scheme)
elif url.scheme in cls.EMAIL_SCHEMES:
config['EMAIL_BACKEND'] = cls.EMAIL_SCHEMES[url.scheme]
if url.scheme in ('smtps', 'smtp+tls'):
config['EMAIL_USE_TLS'] = True
elif url.scheme == 'smtp+ssl':
config['EMAIL_USE_SSL'] = True
if url.query:
config_options = {}
for k, v in urllib.parse.parse_qs(url.query).items():
opt = {k.upper(): _cast_int(v[0])}
if k.upper() in cls._EMAIL_BASE_OPTIONS:
config.update(opt)
else:
config_options.update(opt)
config['OPTIONS'] = config_options
return config
@classmethod
def search_url_config(cls, url, engine=None):
config = {}
url = urllib.parse.urlparse(url) if not isinstance(url, cls.URL_CLASS) else url
# Remove query strings.
path = url.path[1:]
path = urllib.parse.unquote_plus(path.split('?', 2)[0])
if url.scheme not in cls.SEARCH_SCHEMES:
raise ImproperlyConfigured('Invalid search schema %s' % url.scheme)
config["ENGINE"] = cls.SEARCH_SCHEMES[url.scheme]
# check commons params
params = {}
if url.query:
params = urllib.parse.parse_qs(url.query)
if 'EXCLUDED_INDEXES' in params.keys():
config['EXCLUDED_INDEXES'] = params['EXCLUDED_INDEXES'][0].split(',')
if 'INCLUDE_SPELLING' in params.keys():
config['INCLUDE_SPELLING'] = cls.parse_value(params['INCLUDE_SPELLING'][0], bool)
if 'BATCH_SIZE' in params.keys():
config['BATCH_SIZE'] = cls.parse_value(params['BATCH_SIZE'][0], int)
if url.scheme == 'simple':
return config
elif url.scheme in ['solr', 'elasticsearch']:
if 'KWARGS' in params.keys():
config['KWARGS'] = params['KWARGS'][0]
# remove trailing slash
if path.endswith("/"):
path = path[:-1]
if url.scheme == 'solr':
config['URL'] = urllib.parse.urlunparse(('http',) + url[1:2] + (path,) + ('', '', ''))
if 'TIMEOUT' in params.keys():
config['TIMEOUT'] = cls.parse_value(params['TIMEOUT'][0], int)
return config
if url.scheme == 'elasticsearch':
split = path.rsplit("/", 1)
if len(split) > 1:
path = "/".join(split[:-1])
index = split[-1]
else:
path = ""
index = split[0]
config['URL'] = urllib.parse.urlunparse(('http',) + url[1:2] + (path,) + ('', '', ''))
if 'TIMEOUT' in params.keys():
config['TIMEOUT'] = cls.parse_value(params['TIMEOUT'][0], int)
config['INDEX_NAME'] = index
return config
config['PATH'] = '/' + path
if url.scheme == 'whoosh':
if 'STORAGE' in params.keys():
config['STORAGE'] = params['STORAGE'][0]
if 'POST_LIMIT' in params.keys():
config['POST_LIMIT'] = cls.parse_value(params['POST_LIMIT'][0], int)
elif url.scheme == 'xapian':
if 'FLAGS' in params.keys():
config['FLAGS'] = params['FLAGS'][0]
if engine:
config['ENGINE'] = engine
return config
@classmethod
def read_env(cls, env_file=None, **overrides):
"""Read a .env file into os.environ.
If not given a path to a dotenv path, does filthy magic stack backtracking
to find manage.py and then find the dotenv.
http://www.wellfireinteractive.com/blog/easier-12-factor-django/
https://gist.github.com/bennylope/2999704
"""
if env_file is None:
frame = sys._getframe()
env_file = os.path.join(os.path.dirname(frame.f_back.f_code.co_filename), '.env')
if not os.path.exists(env_file):
warnings.warn(
"%s doesn't exist - if you're not configuring your "
"environment separately, create one." % env_file)
return
try:
with open(env_file) if isinstance(env_file, string_types) else env_file as f:
content = f.read()
except IOError:
warnings.warn(
"Error reading %s - if you're not configuring your "
"environment separately, check this." % env_file)
return
logger.debug('Read environment variables from: {0}'.format(env_file))
for line in content.splitlines():
m1 = re.match(r'\A([A-Za-z_0-9]+)=(.*)\Z', line)
if m1:
key, val = m1.group(1), m1.group(2)
m2 = re.match(r"\A'(.*)'\Z", val)
if m2:
val = m2.group(1)
m3 = re.match(r'\A"(.*)"\Z', val)
if m3:
val = re.sub(r'\\(.)', r'\1', m3.group(1))
cls.ENVIRON.setdefault(key, str(val))
# set defaults
for key, value in overrides.items():
cls.ENVIRON.setdefault(key, value)
class Path(object):
"""Inspired to Django Two-scoops, handling File Paths in Settings.
>>> from environ import Path
>>> root = Path('/home')
>>> root, root(), root('dev')
(<Path:/home>, '/home', '/home/dev')
>>> root == Path('/home')
True
>>> root in Path('/'), root not in Path('/other/path')
(True, True)
>>> root('dev', 'not_existing_dir', required=True)
Traceback (most recent call last):
environ.environ.ImproperlyConfigured: Create required path: /home/not_existing_dir
>>> public = root.path('public')
>>> public, public.root, public('styles')
(<Path:/home/public>, '/home/public', '/home/public/styles')
>>> assets, scripts = public.path('assets'), public.path('assets', 'scripts')
>>> assets.root, scripts.root
('/home/public/assets', '/home/public/assets/scripts')
>>> assets + 'styles', str(assets + 'styles'), ~assets
(<Path:/home/public/assets/styles>, '/home/public/assets/styles', <Path:/home/public>)
"""
def path(self, *paths, **kwargs):
"""Create new Path based on self.root and provided paths.
:param paths: List of sub paths
:param kwargs: required=False
:rtype: Path
"""
return self.__class__(self.__root__, *paths, **kwargs)
def file(self, name, *args, **kwargs):
"""Open a file.
:param name: Filename appended to self.root
:param args: passed to open()
:param kwargs: passed to open()
:rtype: file
"""
return open(self(name), *args, **kwargs)
@property
def root(self):
"""Current directory for this Path"""
return self.__root__
def __init__(self, start='', *paths, **kwargs):
super(Path, self).__init__()
if kwargs.get('is_file', False):
start = os.path.dirname(start)
self.__root__ = self._absolute_join(start, *paths, **kwargs)
def __call__(self, *paths, **kwargs):
"""Retrieve the absolute path, with appended paths
:param paths: List of sub path of self.root
:param kwargs: required=False
"""
return self._absolute_join(self.__root__, *paths, **kwargs)
def __eq__(self, other):
return self.__root__ == other.__root__
def __ne__(self, other):
return not self.__eq__(other)
def __add__(self, other):
return Path(self.__root__, other if not isinstance(other, Path) else other.__root__)
def __sub__(self, other):
if isinstance(other, int):
return self.path('../' * other)
elif isinstance(other, string_types):
return Path(self.__root__.rstrip(other))
raise TypeError(
"unsupported operand type(s) for -: '{0}' and '{1}'".format(self, type(other)))
def __invert__(self):
return self.path('..')
def __contains__(self, item):
base_path = self.__root__
if len(base_path) > 1:
base_path = os.path.join(base_path, '')
return item.__root__.startswith(base_path)
def __repr__(self):
return "<Path:{0}>".format(self.__root__)
def __str__(self):
return self.__root__
def __unicode__(self):
return self.__str__()
def __getitem__(self, *args, **kwargs):
return self.__str__().__getitem__(*args, **kwargs)
def rfind(self, *args, **kwargs):
return self.__str__().rfind(*args, **kwargs)
def find(self, *args, **kwargs):
return self.__str__().find(*args, **kwargs)
@staticmethod
def _absolute_join(base, *paths, **kwargs):
absolute_path = os.path.abspath(os.path.join(base, *paths))
if kwargs.get('required', False) and not os.path.exists(absolute_path):
raise ImproperlyConfigured(
"Create required path: {0}".format(absolute_path))
return absolute_path
def register_scheme(scheme):
for method in dir(urllib.parse):
if method.startswith('uses_'):
getattr(urllib.parse, method).append(scheme)
def register_schemes(schemes):
for scheme in schemes:
register_scheme(scheme)
# Register database and cache schemes in URLs.
register_schemes(Env.DB_SCHEMES.keys())
register_schemes(Env.CACHE_SCHEMES.keys())
register_schemes(Env.SEARCH_SCHEMES.keys())
register_schemes(Env.EMAIL_SCHEMES.keys())
|
mit
|
TintypeMolly/Yuzuki
|
resource/board.py
|
2
|
1155
|
# -*- coding: utf-8 -*-
from config.config import ARTICLE_PER_PAGE
from exception import Unauthorized
from helper.model_control import get_board, get_article_page
from helper.permission import is_anybody, can_write
from helper.resource import YuzukiResource
from helper.template import render_template
class Board(YuzukiResource):
def render_GET(self, request):
name = request.get_argument("name")
if not (name == "notice" or is_anybody(request)):
raise Unauthorized()
page = request.get_argument_int("page", 1)
board = get_board(request, name)
articles = get_article_page(request, board, page)
total_article_count = board.article_count
page_total = total_article_count / ARTICLE_PER_PAGE
if total_article_count % ARTICLE_PER_PAGE != 0:
page_total = total_article_count / ARTICLE_PER_PAGE + 1
context = {
"items": articles,
"board": board,
"page": page,
"page_total": page_total,
"can_write": can_write(request, board),
}
return render_template("board.html", request, context)
|
mit
|
mschilli87/cdshelf
|
messages.py
|
1
|
8350
|
# cdshelf message definitions
# Copyright (C) 2017 Marcel Schilling
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#######################
# general information #
#######################
# file: messages.py
# created: 2017-03-26
# last update: 2017-12-25
# author: Marcel Schilling <[email protected]>
# license: GNU Affero General Public License Version 3 (GNU AGPL v3)
# purpose: define messages for cdshelf Audio CD backup & conversion tool
######################################
# change log (reverse chronological) #
######################################
# 2017-12-25: added get_metadata command & release ID lookup messages
# 2017-10-15: added pretend_image command & correspondig message
# added open_submission_url parameter
# 2017-08-30: added submission URL to Disc ID lookup error message
# removed ambiguous artist credit error message
# added ambiguous artist credit error message
# fixed typo in function name
# 2017-08-29: added tmpdir_prefix & tmpdir_suffix parameters
# 2017-04-23: re-factored parameter-related message definition (functions
# instead of copy/paste code)
# corrected capitalization of 'Disc ID'
# 2017-03-28: added image & directory commands & directory parameter to usage
# message / added metadata-, directory- & image-related messages
# 2017-03-26: added discid command to usage message / added messages related to
# disc data reading & Disc ID extraction
# added device command & parameter to usage message / added
# device-related messages
# initial version (help, usage & license)
###########
# imports #
###########
import defaults
#######################
# message definitions #
#######################
# define usage message
usage="""\
usage: cdshelf <command> [<command> ...] [--config <parameter>=<value> [<parameter>=<value>]]
The following commands are currently supported:
help print help message
pretend_image pretend to create CD image (dry-run)
image create CD image
get_metadata fetch missing metadata for shelved releases from MusicBrainz
usage show usage
license print license
discid print Disc ID of Audio CD in CD device
device print CD device to be used for reading Audio CDs
directory print cdshelf base directory
The following parameters are currently supported:
device device to read Audio CDs from (default: detect default device)
directory cdshelf base directory (default: '""" + \
defaults.directory + """')
open_submission_url choice on wether ("yes") or not ("no" [or any other value])
to (attempt to) open the submission URL of an unknown Disc
ID in the (default) web browser (default: '""" + \
defaults.open_submission_url + """')
tmpdir_prefix cdshelf temporary directory prefix (default: '""" + \
defaults.tmpdir_prefix + """')
tmpdir_suffix cdshelf temporary directory suffix (default: '""" + \
defaults.tmpdir_suffix + """')
"""
# define help message
help="""\
cdshelf
Audio CD backup & conversion tool
""" + usage
# define license message
license="""\
Copyright (C) 2017 Marcel Schilling
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
##############################
# parameter-related messages #
##############################
# verbose text description to use in messages instead of parameter name
param_label = {
"device": "CD device",
"directory": "shelf directory",
"open_submission_url": "choice on wether ('yes') or not ('no' [or any " + \
"other value]) to (attempt to) open the " + \
"submission URL of an unknown Disc ID in the " + \
"(default) web browser",
"tmpdir_prefix": "cdshelf temporary directory prefix",
"tmpdir_suffix": "cdshelf temporary directory suffix",
}
# message indicating user specified parameter
def user_param(parameter):
return(param_label[parameter] + " specified by user...")
# text description of how default parameter is obtained
param_default_action = {
"device": "detecting default device",
}
# message indicating default parameter usage
def default_param(parameter):
# get parameter label
param = param_label[parameter]
# if specific default parameter action was defined, use it
try:
default_action = param_default_action[parameter]
# otherwise, use default description
except KeyError:
default_action = "using default " + param
# compose message from parts & return
return("\nno " + param + " specified by user; " + default_action + "...\n" +
"overwrite by setting --config " + parameter + "=<" + parameter +
">\n")
# message indicating selected value for parameter
def selected_param(parameter, selected_value):
return("using " + param_label[parameter] + " '" + selected_value + "'")
#########################
# disc-related messages #
#########################
# message indicating disc reading
def read_disc(device):
return("reading disc in device '" + device + "'...")
# message indicating disc reading error
def disc_error(device):
return("ERROR: Cannot read disc in device '" + device + "'!")
# message indicating read Disc ID
def disc_id(disc_id):
return("read Disc ID '" + disc_id + "'")
#############################
# metadata-related messages #
#############################
# message indicating Disc ID lookup
def lookup_disc_id(disc_id):
return("fetching metadata for Disc ID '" + disc_id + "' from MusicBrainz...")
# message indicating Disc ID lookup error
def disc_id_unknown(disc_id, submission_url):
return("ERROR: Disc ID '" + disc_id + "' is not associated to any release on MusicBrainz.\n" +\
"Please submit it using the following URL: '" + submission_url + "'.")
# message indicating ambiguous Disc ID lookup result
def disc_id_ambiguous(disc_id):
return("ERROR: Disc ID '" + disc_id + "' is associated to several releases on MusicBrainz")
# message indicating release ID lookup
def lookup_release_id(release_id):
return("fetching metadata for release ID '" + release_id + "' from MusicBrainz...")
# message indicating release ID lookup error
def release_id_unknown(release_id):
return("ERROR: Release ID '" + release_id +\
"' is not associated to any release on MusicBrainz.")
##########################
# image-related messages #
##########################
# message indicating image generation target
def image_target(device, directory, basename):
return(" an image of CD in device '" + device + "' in directory '" +
directory + "' using basename '" + basename + "'")
# message pretending image generation
def pretend_image(device, directory, basename):
return("cdshelf image would create" +
image_target(device, directory, basename))
# message indicating image generation
def create_image(device, directory, basename):
return("creating" + image_target(device, directory, basename))
|
agpl-3.0
|
jswope00/griffinx
|
common/lib/xmodule/xmodule/tests/test_graders.py
|
102
|
12238
|
"""Grading tests"""
import unittest
from xmodule import graders
from xmodule.graders import Score, aggregate_scores
class GradesheetTest(unittest.TestCase):
'''Tests the aggregate_scores method'''
def test_weighted_grading(self):
scores = []
Score.__sub__ = lambda me, other: (me.earned - other.earned) + (me.possible - other.possible)
all_total, graded_total = aggregate_scores(scores)
self.assertEqual(all_total, Score(earned=0, possible=0, graded=False, section="summary"))
self.assertEqual(graded_total, Score(earned=0, possible=0, graded=True, section="summary"))
scores.append(Score(earned=0, possible=5, graded=False, section="summary"))
all_total, graded_total = aggregate_scores(scores)
self.assertEqual(all_total, Score(earned=0, possible=5, graded=False, section="summary"))
self.assertEqual(graded_total, Score(earned=0, possible=0, graded=True, section="summary"))
scores.append(Score(earned=3, possible=5, graded=True, section="summary"))
all_total, graded_total = aggregate_scores(scores)
self.assertAlmostEqual(all_total, Score(earned=3, possible=10, graded=False, section="summary"))
self.assertAlmostEqual(graded_total, Score(earned=3, possible=5, graded=True, section="summary"))
scores.append(Score(earned=2, possible=5, graded=True, section="summary"))
all_total, graded_total = aggregate_scores(scores)
self.assertAlmostEqual(all_total, Score(earned=5, possible=15, graded=False, section="summary"))
self.assertAlmostEqual(graded_total, Score(earned=5, possible=10, graded=True, section="summary"))
class GraderTest(unittest.TestCase):
'''Tests grader implementations'''
empty_gradesheet = {
}
incomplete_gradesheet = {
'Homework': [],
'Lab': [],
'Midterm': [],
}
test_gradesheet = {
'Homework': [Score(earned=2, possible=20.0, graded=True, section='hw1'),
Score(earned=16, possible=16.0, graded=True, section='hw2')],
# The dropped scores should be from the assignments that don't exist yet
'Lab': [Score(earned=1, possible=2.0, graded=True, section='lab1'), # Dropped
Score(earned=1, possible=1.0, graded=True, section='lab2'),
Score(earned=1, possible=1.0, graded=True, section='lab3'),
Score(earned=5, possible=25.0, graded=True, section='lab4'), # Dropped
Score(earned=3, possible=4.0, graded=True, section='lab5'), # Dropped
Score(earned=6, possible=7.0, graded=True, section='lab6'),
Score(earned=5, possible=6.0, graded=True, section='lab7')],
'Midterm': [Score(earned=50.5, possible=100, graded=True, section="Midterm Exam"), ],
}
def test_single_section_grader(self):
midterm_grader = graders.SingleSectionGrader("Midterm", "Midterm Exam")
lab4_grader = graders.SingleSectionGrader("Lab", "lab4")
bad_lab_grader = graders.SingleSectionGrader("Lab", "lab42")
for graded in [midterm_grader.grade(self.empty_gradesheet),
midterm_grader.grade(self.incomplete_gradesheet),
bad_lab_grader.grade(self.test_gradesheet)]:
self.assertEqual(len(graded['section_breakdown']), 1)
self.assertEqual(graded['percent'], 0.0)
graded = midterm_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.505)
self.assertEqual(len(graded['section_breakdown']), 1)
graded = lab4_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.2)
self.assertEqual(len(graded['section_breakdown']), 1)
def test_assignment_format_grader(self):
homework_grader = graders.AssignmentFormatGrader("Homework", 12, 2)
no_drop_grader = graders.AssignmentFormatGrader("Homework", 12, 0)
# Even though the minimum number is 3, this should grade correctly when 7 assignments are found
overflow_grader = graders.AssignmentFormatGrader("Lab", 3, 2)
lab_grader = graders.AssignmentFormatGrader("Lab", 7, 3)
# Test the grading of an empty gradesheet
for graded in [homework_grader.grade(self.empty_gradesheet),
no_drop_grader.grade(self.empty_gradesheet),
homework_grader.grade(self.incomplete_gradesheet),
no_drop_grader.grade(self.incomplete_gradesheet)]:
self.assertAlmostEqual(graded['percent'], 0.0)
# Make sure the breakdown includes 12 sections, plus one summary
self.assertEqual(len(graded['section_breakdown']), 12 + 1)
graded = homework_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.11) # 100% + 10% / 10 assignments
self.assertEqual(len(graded['section_breakdown']), 12 + 1)
graded = no_drop_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.0916666666666666) # 100% + 10% / 12 assignments
self.assertEqual(len(graded['section_breakdown']), 12 + 1)
graded = overflow_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.8880952380952382) # 100% + 10% / 5 assignments
self.assertEqual(len(graded['section_breakdown']), 7 + 1)
graded = lab_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.9226190476190477)
self.assertEqual(len(graded['section_breakdown']), 7 + 1)
def test_assignment_format_grader_on_single_section_entry(self):
midterm_grader = graders.AssignmentFormatGrader("Midterm", 1, 0)
# Test the grading on a section with one item:
for graded in [midterm_grader.grade(self.empty_gradesheet),
midterm_grader.grade(self.incomplete_gradesheet)]:
self.assertAlmostEqual(graded['percent'], 0.0)
# Make sure the breakdown includes just the one summary
self.assertEqual(len(graded['section_breakdown']), 0 + 1)
self.assertEqual(graded['section_breakdown'][0]['label'], 'Midterm')
graded = midterm_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.505)
self.assertEqual(len(graded['section_breakdown']), 0 + 1)
def test_weighted_subsections_grader(self):
# First, a few sub graders
homework_grader = graders.AssignmentFormatGrader("Homework", 12, 2)
lab_grader = graders.AssignmentFormatGrader("Lab", 7, 3)
# phasing out the use of SingleSectionGraders, and instead using AssignmentFormatGraders that
# will act like SingleSectionGraders on single sections.
midterm_grader = graders.AssignmentFormatGrader("Midterm", 1, 0)
weighted_grader = graders.WeightedSubsectionsGrader([(homework_grader, homework_grader.category, 0.25),
(lab_grader, lab_grader.category, 0.25),
(midterm_grader, midterm_grader.category, 0.5)])
over_one_weights_grader = graders.WeightedSubsectionsGrader([(homework_grader, homework_grader.category, 0.5),
(lab_grader, lab_grader.category, 0.5),
(midterm_grader, midterm_grader.category, 0.5)])
# The midterm should have all weight on this one
zero_weights_grader = graders.WeightedSubsectionsGrader([(homework_grader, homework_grader.category, 0.0),
(lab_grader, lab_grader.category, 0.0),
(midterm_grader, midterm_grader.category, 0.5)])
# This should always have a final percent of zero
all_zero_weights_grader = graders.WeightedSubsectionsGrader([(homework_grader, homework_grader.category, 0.0),
(lab_grader, lab_grader.category, 0.0),
(midterm_grader, midterm_grader.category, 0.0)])
empty_grader = graders.WeightedSubsectionsGrader([])
graded = weighted_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.5106547619047619)
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
self.assertEqual(len(graded['grade_breakdown']), 3)
graded = over_one_weights_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.7688095238095238)
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
self.assertEqual(len(graded['grade_breakdown']), 3)
graded = zero_weights_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.2525)
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
self.assertEqual(len(graded['grade_breakdown']), 3)
graded = all_zero_weights_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.0)
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
self.assertEqual(len(graded['grade_breakdown']), 3)
for graded in [weighted_grader.grade(self.empty_gradesheet),
weighted_grader.grade(self.incomplete_gradesheet),
zero_weights_grader.grade(self.empty_gradesheet),
all_zero_weights_grader.grade(self.empty_gradesheet)]:
self.assertAlmostEqual(graded['percent'], 0.0)
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
self.assertEqual(len(graded['grade_breakdown']), 3)
graded = empty_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.0)
self.assertEqual(len(graded['section_breakdown']), 0)
self.assertEqual(len(graded['grade_breakdown']), 0)
def test_grader_from_conf(self):
# Confs always produce a graders.WeightedSubsectionsGrader, so we test this by repeating the test
# in test_graders.WeightedSubsectionsGrader, but generate the graders with confs.
weighted_grader = graders.grader_from_conf([
{
'type': "Homework",
'min_count': 12,
'drop_count': 2,
'short_label': "HW",
'weight': 0.25,
},
{
'type': "Lab",
'min_count': 7,
'drop_count': 3,
'category': "Labs",
'weight': 0.25
},
{
'type': "Midterm",
'name': "Midterm Exam",
'short_label': "Midterm",
'weight': 0.5,
},
])
empty_grader = graders.grader_from_conf([])
graded = weighted_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.5106547619047619)
self.assertEqual(len(graded['section_breakdown']), (12 + 1) + (7 + 1) + 1)
self.assertEqual(len(graded['grade_breakdown']), 3)
graded = empty_grader.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.0)
self.assertEqual(len(graded['section_breakdown']), 0)
self.assertEqual(len(graded['grade_breakdown']), 0)
# Test that graders can also be used instead of lists of dictionaries
homework_grader = graders.AssignmentFormatGrader("Homework", 12, 2)
homework_grader2 = graders.grader_from_conf(homework_grader)
graded = homework_grader2.grade(self.test_gradesheet)
self.assertAlmostEqual(graded['percent'], 0.11)
self.assertEqual(len(graded['section_breakdown']), 12 + 1)
# TODO: How do we test failure cases? The parser only logs an error when
# it can't parse something. Maybe it should throw exceptions?
|
agpl-3.0
|
olapaola/olapaola-android-scripting
|
python/src/Lib/test/test_epoll.py
|
51
|
6269
|
# Copyright (c) 2001-2006 Twisted Matrix Laboratories.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Tests for epoll wrapper.
"""
import os
import socket
import errno
import time
import select
import tempfile
import unittest
from test import test_support
if not hasattr(select, "epoll"):
raise test_support.TestSkipped("test works only on Linux 2.6")
try:
select.epoll()
except IOError, e:
if e.errno == errno.ENOSYS:
raise test_support.TestSkipped("kernel doesn't support epoll()")
class TestEPoll(unittest.TestCase):
def setUp(self):
self.serverSocket = socket.socket()
self.serverSocket.bind(('127.0.0.1', 0))
self.serverSocket.listen(1)
self.connections = [self.serverSocket]
def tearDown(self):
for skt in self.connections:
skt.close()
def _connected_pair(self):
client = socket.socket()
client.setblocking(False)
try:
client.connect(('127.0.0.1', self.serverSocket.getsockname()[1]))
except socket.error, e:
self.assertEquals(e.args[0], errno.EINPROGRESS)
else:
raise AssertionError("Connect should have raised EINPROGRESS")
server, addr = self.serverSocket.accept()
self.connections.extend((client, server))
return client, server
def test_create(self):
try:
ep = select.epoll(16)
except OSError, e:
raise AssertionError(str(e))
self.assert_(ep.fileno() > 0, ep.fileno())
self.assert_(not ep.closed)
ep.close()
self.assert_(ep.closed)
self.assertRaises(ValueError, ep.fileno)
def test_badcreate(self):
self.assertRaises(TypeError, select.epoll, 1, 2, 3)
self.assertRaises(TypeError, select.epoll, 'foo')
self.assertRaises(TypeError, select.epoll, None)
self.assertRaises(TypeError, select.epoll, ())
self.assertRaises(TypeError, select.epoll, ['foo'])
self.assertRaises(TypeError, select.epoll, {})
def test_add(self):
server, client = self._connected_pair()
ep = select.epoll(2)
try:
ep.register(server.fileno(), select.EPOLLIN | select.EPOLLOUT)
ep.register(client.fileno(), select.EPOLLIN | select.EPOLLOUT)
finally:
ep.close()
def test_fromfd(self):
server, client = self._connected_pair()
ep = select.epoll(2)
ep2 = select.epoll.fromfd(ep.fileno())
ep2.register(server.fileno(), select.EPOLLIN | select.EPOLLOUT)
ep2.register(client.fileno(), select.EPOLLIN | select.EPOLLOUT)
events = ep.poll(1, 4)
events2 = ep2.poll(0.9, 4)
self.assertEqual(len(events), 2)
self.assertEqual(len(events2), 2)
ep.close()
try:
ep2.poll(1, 4)
except IOError, e:
self.failUnlessEqual(e.args[0], errno.EBADF, e)
else:
self.fail("epoll on closed fd didn't raise EBADF")
def test_control_and_wait(self):
client, server = self._connected_pair()
ep = select.epoll(16)
ep.register(server.fileno(),
select.EPOLLIN | select.EPOLLOUT | select.EPOLLET)
ep.register(client.fileno(),
select.EPOLLIN | select.EPOLLOUT | select.EPOLLET)
now = time.time()
events = ep.poll(1, 4)
then = time.time()
self.failIf(then - now > 0.1, then - now)
events.sort()
expected = [(client.fileno(), select.EPOLLOUT),
(server.fileno(), select.EPOLLOUT)]
expected.sort()
self.assertEquals(events, expected)
self.failIf(then - now > 0.01, then - now)
now = time.time()
events = ep.poll(timeout=2.1, maxevents=4)
then = time.time()
self.failIf(events)
client.send("Hello!")
server.send("world!!!")
now = time.time()
events = ep.poll(1, 4)
then = time.time()
self.failIf(then - now > 0.01)
events.sort()
expected = [(client.fileno(), select.EPOLLIN | select.EPOLLOUT),
(server.fileno(), select.EPOLLIN | select.EPOLLOUT)]
expected.sort()
self.assertEquals(events, expected)
ep.unregister(client.fileno())
ep.modify(server.fileno(), select.EPOLLOUT)
now = time.time()
events = ep.poll(1, 4)
then = time.time()
self.failIf(then - now > 0.01)
expected = [(server.fileno(), select.EPOLLOUT)]
self.assertEquals(events, expected)
def test_errors(self):
self.assertRaises(ValueError, select.epoll, -2)
self.assertRaises(ValueError, select.epoll().register, -1,
select.EPOLLIN)
def test_unregister_closed(self):
server, client = self._connected_pair()
fd = server.fileno()
ep = select.epoll(16)
ep.register(server)
now = time.time()
events = ep.poll(1, 4)
then = time.time()
self.failIf(then - now > 0.01)
server.close()
ep.unregister(fd)
def test_main():
test_support.run_unittest(TestEPoll)
if __name__ == "__main__":
test_main()
|
apache-2.0
|
google-code-export/pyglet
|
tests/font/VALIGN.py
|
19
|
1049
|
#!/usr/bin/env python
'''Test that font.Text vertical alignment works.
Four labels will be aligned top, center, baseline and bottom.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import unittest
from pyglet import gl
from pyglet import font
from . import base_text
class TEST_VALIGN(base_text.TextTestBase):
font_name = ''
window_size = 600, 200
def render(self):
fnt = font.load('', self.font_size)
h = fnt.ascent - fnt.descent
w = self.window.width
self.labels = []
x = 0
for align in 'top center baseline bottom'.split():
label = align.upper() + 'y'
self.labels.append(font.Text(fnt, label, x, 50, valign=align))
x += self.labels[-1].width
def draw(self):
gl.glColor3f(1, 1, 1)
gl.glBegin(gl.GL_LINES)
gl.glVertex2f(0, 50)
gl.glVertex2f(self.window.width, 50)
gl.glEnd()
for label in self.labels:
label.draw()
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
FedoraScientific/salome-geom
|
doc/salome/examples/basic_operations_ex01.py
|
1
|
1302
|
# Partition
import salome
salome.salome_init()
import GEOM
from salome.geom import geomBuilder
geompy = geomBuilder.New(salome.myStudy)
gg = salome.ImportComponentGUI("GEOM")
# create a vertex and a vector
p0 = geompy.MakeVertex( 0., 0., 0.)
p200 = geompy.MakeVertex(200., 200., 200.)
pz = geompy.MakeVertex( 0., 0., 100.)
# create a vector
vxyz = geompy.MakeVectorDXDYDZ(100., 100., 100.)
# create a box from two points
box = geompy.MakeBoxTwoPnt(p0, p200)
# create a plane
trimsize = 500.
plane = geompy.MakePlane(pz, vxyz, trimsize)
# create partition objects
partition1 = geompy.MakePartition([box], [plane])
partition2 = geompy.Partition([box], [plane])
partition3 = geompy.MakeHalfPartition(box, plane)
# add objects in the study
id_box = geompy.addToStudy(box,"Box")
id_plane = geompy.addToStudy(plane,"Plane")
id_partition1 = geompy.addToStudy(partition1,"MakePartition")
id_partition2 = geompy.addToStudy(partition2,"Partition")
id_partition3 = geompy.addToStudy(partition3,"MakeHalfPartition")
# display the partition objects and the plane
gg.createAndDisplayGO(id_box)
gg.setDisplayMode(id_box,1)
gg.createAndDisplayGO(id_plane)
gg.setDisplayMode(id_plane,1)
gg.createAndDisplayGO(id_partition1)
gg.createAndDisplayGO(id_partition2)
gg.createAndDisplayGO(id_partition3)
|
lgpl-2.1
|
zhenv5/scikit-learn
|
sklearn/metrics/cluster/tests/test_unsupervised.py
|
230
|
2823
|
import numpy as np
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.metrics.cluster.unsupervised import silhouette_score
from sklearn.metrics import pairwise_distances
from sklearn.utils.testing import assert_false, assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
# Assert Silhouette Coefficient != nan when there is 1 sample in a class.
# This tests for the condition that caused issue 960.
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
|
bsd-3-clause
|
ahu-odoo/odoo
|
addons/l10n_in/__init__.py
|
702
|
1046
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
ksophocleous/grpc
|
src/python/grpcio/grpc/framework/foundation/abandonment.py
|
63
|
1822
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utilities for indicating abandonment of computation."""
class Abandoned(Exception):
"""Indicates that some computation is being abandoned.
Abandoning a computation is different than returning a value or raising
an exception indicating some operational or programming defect.
"""
|
bsd-3-clause
|
Poppin-Tech/mitro
|
mitro-core/tools/txnsim.py
|
25
|
1305
|
#!/usr/bin/env python
import sys
import random
import heapq
DISTRIBUTION = [
341,
444,
551,
656,
765,
906,
1130,
1588,
3313,
84399
]
EVENTS_PER_MS = 5754. / 431997259
# do a simulation
MAX_TIME = 60 * 60 * 24 * 1000
if __name__ == '__main__':
scale_factor = int(sys.argv[1])
exp_lambda = (EVENTS_PER_MS * scale_factor)
num_pending_transactions = []
# a new event shows up according to the random variable, and lasts for
# some time based on the distribution of percentiles
current_time = 0
count = 0
pending_events = []
while current_time < MAX_TIME:
count += 1
current_time += random.expovariate(exp_lambda)
# new event!
heapq.heappush(pending_events, current_time + random.choice(DISTRIBUTION))
while pending_events and (pending_events[0] < current_time):
heapq.heappop(pending_events)
num_pending_transactions.append(len(pending_events))
print 'total transactions: ', count
print 'max pending at any time: ', max(num_pending_transactions)
mean = sum(num_pending_transactions) / len(num_pending_transactions)
print 'average pending: ', mean
print ('stddev pending: ',
(sum([(x-mean)**2 for x in num_pending_transactions]) / len(num_pending_transactions))**.5
)
|
gpl-3.0
|
crazcalm/PyTN_talk_proposal
|
recipies/recipe1/tests/test_fib.py
|
1
|
1634
|
import unittest
import fib
class Testing(unittest.TestCase):
def test_testing(self):
self.assertEqual(1,1, "Of course it does!")
class Fib_(unittest.TestCase):
def setUp(self):
self.fib = fib.fib2
def basecase_num_1(self):
self.assertEqual(self.fib(1), 0, "fib num 1 is not correct")
def basecase_num_2(self):
self.assertEqual(self.fib(2), 1, "fib num 2 is not correct")
def fib_num_10(self):
self.assertEqual(self.fib(10), 34, "fib 10 is 33")
def basecase_list_1(self):
self.assertEqual(self.fib(1), [0], "fib list with one item is not correct")
def basecase_list_2(self):
self.assertEqual(self.fib(2), [0,1], "fib list 2 with two items is not correct")
def fib_list_10(self):
self.assertEqual(self.fib(10), [0,1,1,2,3,5,8,13,21,34],
"Fib list ten is not correct")
class Fib1_testing(Fib_):
def setUp(self):
self.fib = fib.fib1
def test_basecase_1(self):
self.basecase_list_1()
def test_basecase_2(self):
self.basecase_list_2()
def test_fib_list_10(self):
self.fib_list_10()
class Fib2_testing(Fib_):
def setUp(self):
self.fib = fib.fib2
def test_basecase_1(self):
self.basecase_num_1()
def test_basecase_2(self):
self.basecase_num_2()
def test_fib_num_10(self):
self.fib_num_10()
class Fib_yield_list(Fib1_testing):
def setUp(self):
self.fib = fib.fib_list
class Fib_yield_num(Fib2_testing):
def setUp(self):
self.fib = fib.nth_fib_num
if __name__ == "__main__":
unittest.main()
|
mit
|
edxnercel/edx-platform
|
common/djangoapps/status/status.py
|
48
|
1463
|
"""
A tiny app that checks for a status message.
"""
from django.conf import settings
from django.core.cache import cache
import json
import logging
import os
log = logging.getLogger(__name__)
def get_site_status_msg(course_id):
"""
Look for a file settings.STATUS_MESSAGE_PATH. If found, read it,
parse as json, and do the following:
* if there is a key 'global', include that in the result list.
* if course is not None, and there is a key for course.id, add that to the result list.
* return "<br/>".join(result)
Otherwise, return None.
If something goes wrong, returns None. ("is there a status msg?" logic is
not allowed to break the entire site).
"""
try:
# first check for msg in cache
msg = cache.get('site_status_msg')
if msg is not None:
return msg
if os.path.isfile(settings.STATUS_MESSAGE_PATH):
with open(settings.STATUS_MESSAGE_PATH) as f:
content = f.read()
else:
return None
status_dict = json.loads(content)
msg = status_dict.get('global', None)
if course_id in status_dict:
msg = msg + "<br>" if msg else ''
msg += status_dict[course_id]
# set msg to cache, with expiry 5 mins
cache.set('site_status_msg', msg, 60 * 5)
return msg
except:
log.exception("Error while getting a status message.")
return None
|
agpl-3.0
|
izgzhen/servo
|
tests/wpt/harness/wptrunner/browsers/b2g.py
|
117
|
8016
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import tempfile
import shutil
import subprocess
import fxos_appgen
import gaiatest
import mozdevice
import moznetwork
import mozrunner
from marionette import expected
from marionette.by import By
from marionette.wait import Wait
from mozprofile import FirefoxProfile, Preferences
from .base import get_free_port, BrowserError, Browser, ExecutorBrowser
from ..executors.executormarionette import MarionetteTestharnessExecutor
from ..hosts import HostsFile, HostsLine
from ..environment import hostnames
here = os.path.split(__file__)[0]
__wptrunner__ = {"product": "b2g",
"check_args": "check_args",
"browser": "B2GBrowser",
"executor": {"testharness": "B2GMarionetteTestharnessExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_options": "env_options"}
def check_args(**kwargs):
pass
def browser_kwargs(test_environment, **kwargs):
return {"prefs_root": kwargs["prefs_root"],
"no_backup": kwargs.get("b2g_no_backup", False)}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
timeout_multiplier = kwargs["timeout_multiplier"]
if timeout_multiplier is None:
timeout_multiplier = 2
executor_kwargs = {"server_config": server_config,
"timeout_multiplier": timeout_multiplier,
"close_after_done": False}
if test_type == "reftest":
executor_kwargs["cache_manager"] = cache_manager
return executor_kwargs
def env_options():
return {"host": "web-platform.test",
"bind_hostname": "false",
"test_server_port": False}
class B2GBrowser(Browser):
used_ports = set()
init_timeout = 180
def __init__(self, logger, prefs_root, no_backup=False):
Browser.__init__(self, logger)
logger.info("Waiting for device")
subprocess.call(["adb", "wait-for-device"])
self.device = mozdevice.DeviceManagerADB()
self.marionette_port = get_free_port(2828, exclude=self.used_ports)
self.used_ports.add(self.marionette_port)
self.cert_test_app = None
self.runner = None
self.prefs_root = prefs_root
self.no_backup = no_backup
self.backup_path = None
self.backup_paths = []
self.backup_dirs = []
def setup(self):
self.logger.info("Running B2G setup")
self.backup_path = tempfile.mkdtemp()
self.logger.debug("Backing up device to %s" % (self.backup_path,))
if not self.no_backup:
self.backup_dirs = [("/data/local", os.path.join(self.backup_path, "local")),
("/data/b2g/mozilla", os.path.join(self.backup_path, "profile"))]
self.backup_paths = [("/system/etc/hosts", os.path.join(self.backup_path, "hosts"))]
for remote, local in self.backup_dirs:
self.device.getDirectory(remote, local)
for remote, local in self.backup_paths:
self.device.getFile(remote, local)
self.setup_hosts()
def start(self):
profile = FirefoxProfile()
profile.set_preferences({"dom.disable_open_during_load": False,
"marionette.defaultPrefs.enabled": True})
self.logger.debug("Creating device runner")
self.runner = mozrunner.B2GDeviceRunner(profile=profile)
self.logger.debug("Starting device runner")
self.runner.start()
self.logger.debug("Device runner started")
def setup_hosts(self):
host_ip = moznetwork.get_ip()
temp_dir = tempfile.mkdtemp()
hosts_path = os.path.join(temp_dir, "hosts")
remote_path = "/system/etc/hosts"
try:
self.device.getFile("/system/etc/hosts", hosts_path)
with open(hosts_path) as f:
hosts_file = HostsFile.from_file(f)
for canonical_hostname in hostnames:
hosts_file.set_host(HostsLine(host_ip, canonical_hostname))
with open(hosts_path, "w") as f:
hosts_file.to_file(f)
self.logger.info("Installing hosts file")
self.device.remount()
self.device.removeFile(remote_path)
self.device.pushFile(hosts_path, remote_path)
finally:
os.unlink(hosts_path)
os.rmdir(temp_dir)
def load_prefs(self):
prefs_path = os.path.join(self.prefs_root, "prefs_general.js")
if os.path.exists(prefs_path):
preferences = Preferences.read_prefs(prefs_path)
else:
self.logger.warning("Failed to find base prefs file in %s" % prefs_path)
preferences = []
return preferences
def stop(self):
pass
def on_output(self):
raise NotImplementedError
def cleanup(self):
self.logger.debug("Running browser cleanup steps")
self.device.remount()
for remote, local in self.backup_dirs:
self.device.removeDir(remote)
self.device.pushDir(local, remote)
for remote, local in self.backup_paths:
self.device.removeFile(remote)
self.device.pushFile(local, remote)
shutil.rmtree(self.backup_path)
self.device.reboot(wait=True)
def pid(self):
return None
def is_alive(self):
return True
def executor_browser(self):
return B2GExecutorBrowser, {"marionette_port": self.marionette_port}
class B2GExecutorBrowser(ExecutorBrowser):
# The following methods are called from a different process
def __init__(self, *args, **kwargs):
ExecutorBrowser.__init__(self, *args, **kwargs)
import sys, subprocess
self.device = mozdevice.ADBB2G()
self.device.forward("tcp:%s" % self.marionette_port,
"tcp:2828")
self.executor = None
self.marionette = None
self.gaia_device = None
self.gaia_apps = None
def after_connect(self, executor):
self.executor = executor
self.marionette = executor.marionette
self.executor.logger.debug("Running browser.after_connect steps")
self.gaia_apps = gaiatest.GaiaApps(marionette=executor.marionette)
self.executor.logger.debug("Waiting for homescreen to load")
# Moved out of gaia_test temporarily
self.executor.logger.info("Waiting for B2G to be ready")
self.wait_for_homescreen(timeout=60)
self.install_cert_app()
self.use_cert_app()
def install_cert_app(self):
"""Install the container app used to run the tests"""
if fxos_appgen.is_installed("CertTest App"):
self.executor.logger.info("CertTest App is already installed")
return
self.executor.logger.info("Installing CertTest App")
app_path = os.path.join(here, "b2g_setup", "certtest_app.zip")
fxos_appgen.install_app("CertTest App", app_path, marionette=self.marionette)
self.executor.logger.debug("Install complete")
def use_cert_app(self):
"""Start the app used to run the tests"""
self.executor.logger.info("Homescreen loaded")
self.gaia_apps.launch("CertTest App")
def wait_for_homescreen(self, timeout):
self.executor.logger.info("Waiting for home screen to load")
Wait(self.marionette, timeout).until(expected.element_present(
By.CSS_SELECTOR, '#homescreen[loading-state=false]'))
class B2GMarionetteTestharnessExecutor(MarionetteTestharnessExecutor):
def after_connect(self):
self.browser.after_connect(self)
MarionetteTestharnessExecutor.after_connect(self)
|
mpl-2.0
|
jjscarafia/odoo
|
addons/website_forum/__openerp__.py
|
321
|
1905
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Forum',
'category': 'Website',
'summary': 'Forum, FAQ, Q&A',
'version': '1.0',
'description': """
Ask questions, get answers, no distractions
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/community-builder',
'depends': [
'auth_signup',
'gamification',
'website_mail',
'website_partner'
],
'data': [
'data/forum_data.xml',
'views/forum.xml',
'views/res_users.xml',
'views/website_forum.xml',
'views/ir_qweb.xml',
'security/ir.model.access.csv',
'data/badges_question.xml',
'data/badges_answer.xml',
'data/badges_participation.xml',
'data/badges_moderation.xml',
],
'qweb': [
'static/src/xml/*.xml'
],
'demo': [
'data/forum_demo.xml',
],
'installable': True,
'application': True,
}
|
agpl-3.0
|
shakamunyi/tensorflow
|
tensorflow/python/kernel_tests/session_ops_test.py
|
104
|
10517
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.session_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import session_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class SessionOpsTest(test.TestCase):
def testHandleBasic(self):
with self.test_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
h = sess.run(h)
# Feed a tensor handle.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
y = math_ops.multiply(x, 10)
self.assertEqual(500, sess.run(y, feed_dict={f: h.handle}))
def testHandleEval(self):
with self.test_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
h = sess.run(h)
# Get the tensor from its handle.
self.assertEqual(50, h.eval())
def testHandleAndValue(self):
with self.test_session() as sess:
# Return a handle and a value.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
v = math_ops.multiply(a, c)
h, v = sess.run([h, v])
self.assertEqual(50, h.eval())
self.assertEqual(500, v)
def testHandleCond(self):
with self.test_session() as sess:
# Return a handle and a value
a = constant_op.constant(10)
b = constant_op.constant(5)
p = math_ops.less(a, b)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
p, h = sess.run([p, h])
# Run by feeding a tensor handle.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
if p:
y = math_ops.multiply(x, 10)
else:
y = math_ops.multiply(x, 100)
result = sess.run(y, feed_dict={f: h.handle})
self.assertEqual(5000, result)
def testHandleForLoop(self):
with self.test_session() as sess:
# Initialize a handle.
a = constant_op.constant(0)
h = session_ops.get_session_handle(a)
h = sess.run(h)
# Do some computation.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
# Must define the loop body outside the loop.
h_x = session_ops.get_session_handle(math_ops.add(x, 1))
for _ in range(100):
# This exercises garbage collection.
h = sess.run(h_x, feed_dict={f: h.handle})
self.assertEqual(100, h.eval())
def testHandleWhileLoop(self):
with self.test_session() as sess:
# Initialize a handle.
a = constant_op.constant(0)
h = session_ops.get_session_handle(a)
h = sess.run(h)
# Do some computation.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
b = constant_op.constant(100)
p = math_ops.less(x, b)
# Must define the loop body outside the loop.
h_x = session_ops.get_session_handle(math_ops.add(x, 1))
while True:
rp, h = sess.run([p, h_x], feed_dict={f: h.handle})
if not rp:
break
self.assertEqual(101, h.eval())
def testHandleMover(self):
with self.test_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
h = sess.run(h)
# Feed a tensor handle.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
y = math_ops.multiply(x, 10)
self.assertEqual(500, sess.run(y, feed_dict={f: h.handle}))
# Feed another tensor handle.
with ops.device(test.gpu_device_name()):
a = constant_op.constant(10)
h = session_ops.get_session_handle(a)
h = sess.run(h)
self.assertEqual(100, sess.run(y, feed_dict={f: h.handle}))
def testHandleDelete(self):
with self.test_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
sess.run(h).delete()
def testHandleDeleteRaw(self):
with self.test_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.multiply(a, b)
h = session_ops.get_session_handle(c)
h = sess.run(h)
# Delete using a raw tensor handle.
raw_h = h.get_raw_handle()
f, x = session_ops.delete_session_tensor(raw_h)
sess.run(x, feed_dict={f: raw_h})
def testMultiDevices(self):
with self.test_session() as sess:
with ops.device(test.gpu_device_name()):
a = constant_op.constant(1.0)
a_handle = sess.run(session_ops.get_session_handle(a))
with ops.device("/cpu:0"):
b = constant_op.constant(2.0)
b_handle = sess.run(session_ops.get_session_handle(b))
a_p, a_t = session_ops.get_session_tensor(a_handle.handle, dtypes.float32)
b_p, b_t = session_ops.get_session_tensor(b_handle.handle, dtypes.float32)
c = math_ops.add(a_t, b_t)
c_handle = sess.run(
session_ops.get_session_handle(c),
feed_dict={a_p: a_handle.handle,
b_p: b_handle.handle})
self.assertEqual(3.0, c_handle.eval())
def testHandleGC(self):
with self.test_session() as sess:
# initial values live on CPU
with ops.device("/cpu:0"):
one = constant_op.constant(1, dtype=dtypes.float32)
one_handle = sess.run(session_ops.get_session_handle(one))
x_handle = sess.run(session_ops.get_session_handle(one))
# addition lives on GPU
with ops.device(test.gpu_device_name()):
add_h1, add_t1 = session_ops.get_session_tensor(one_handle.handle,
dtypes.float32)
add_h2, add_t2 = session_ops.get_session_tensor(x_handle.handle,
dtypes.float32)
add_op = math_ops.add(add_t1, add_t2)
add_output = session_ops.get_session_handle(add_op)
# add 1 to tensor 20 times
for _ in range(20):
x_handle = sess.run(
add_output,
feed_dict={add_h1: one_handle.handle,
add_h2: x_handle.handle})
def testHandlePlacement(self):
with self.test_session() as sess:
a = constant_op.constant(1.0)
a_handle_op = session_ops.get_session_handle(a)
b = constant_op.constant(2.0)
b_handle_op = session_ops.get_session_handle(b)
a_handle = sess.run(a_handle_op)
b_handle = sess.run(b_handle_op)
a_p, a_t = session_ops.get_session_tensor(a_handle.handle, dtypes.float32)
b_p, b_t = session_ops.get_session_tensor(b_handle.handle, dtypes.float32)
c = math_ops.add(a_t, b_t)
c_handle = sess.run(
session_ops.get_session_handle(c),
feed_dict={a_p: a_handle.handle,
b_p: b_handle.handle})
self.assertEqual(3.0, c_handle.eval())
def testFeedOneHandleDirectly(self):
with self.test_session() as sess:
a = constant_op.constant(10.0)
b = constant_op.constant(5.0)
c = math_ops.multiply(a, b)
d = math_ops.multiply(c, c)
h_c = sess.run(session_ops.get_session_handle(c))
self.assertAllClose(2500.0, sess.run(d, feed_dict={c: h_c}))
def testDirectHandleFeedOverlappingWithFetches(self):
with self.test_session() as sess:
a = constant_op.constant(10.0)
b = constant_op.constant(5.0)
c = math_ops.multiply(a, b)
h_c = sess.run(session_ops.get_session_handle(c))
d = array_ops.identity(c)
c_val = sess.run(c, feed_dict={c: h_c})
self.assertAllClose(50.0, c_val)
d_val = sess.run(d, feed_dict={c: h_c})
self.assertAllClose(50.0, d_val)
c_val, d_val = sess.run([c, d], feed_dict={c: h_c, d: 60.0})
self.assertAllClose(50.0, c_val)
self.assertAllClose(60.0, d_val)
c_val, d_val = sess.run([c, d], feed_dict={c: 60.0, d: h_c})
self.assertAllClose(60.0, c_val)
self.assertAllClose(50.0, d_val)
c_val, d_val = sess.run([c, d], feed_dict={c: h_c, d: h_c})
self.assertAllClose(50.0, c_val)
self.assertAllClose(50.0, d_val)
def testFeedTwoHandlesDirectly(self):
with self.test_session() as sess:
a = constant_op.constant(10.0)
b = constant_op.constant(5.0)
c = math_ops.multiply(a, b)
d = math_ops.div(a, b)
e = math_ops.subtract(c, d)
h_c = sess.run(session_ops.get_session_handle(c))
h_d = sess.run(session_ops.get_session_handle(d))
self.assertAllClose(48.0, sess.run(e, feed_dict={c: h_c, d: h_d}))
self.assertAllClose(-48.0, sess.run(e, feed_dict={c: h_d, d: h_c}))
def testFeedHandleToVariableDirectly(self):
with self.test_session() as sess:
a = variables.Variable(12.0)
inc_a = state_ops.assign_add(a, 2.0)
b = math_ops.add(a, 5.0)
sess.run(a.initializer)
h_a_read = sess.run(session_ops.get_session_handle(a.read_value()))
self.assertAllClose(12.0, sess.run(a))
self.assertAllClose(17.0, sess.run(b, feed_dict={a: h_a_read}))
sess.run(inc_a)
self.assertAllClose(19.0, sess.run(b, feed_dict={a: h_a_read}))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
DongjunLee/kino-bot
|
kino/skills/weather.py
|
1
|
3681
|
# -*- coding: utf-8 -*-
import arrow
import forecastio
from geopy.geocoders import Nominatim
from hbconfig import Config
from urllib import parse
from ..open_api.airkoreaPy import AirKorea
from ..slack.resource import MsgResource
from ..slack.slackbot import SlackerAdapter
from ..slack.template import MsgTemplate
from ..utils.data_handler import DataHandler
from ..utils.logger import Logger
from ..utils.profile import Profile
class Weather(object):
def __init__(self, slackbot=None):
self.logger = Logger().get_logger()
self.data_handler = DataHandler()
self.profile = Profile()
if slackbot is None:
self.slackbot = SlackerAdapter()
else:
self.slackbot = slackbot
def forecast(self, timely="current"):
cache_data = self.data_handler.read_cache()
user_location = self.profile.get_location()
parsed_user_location = parse.quote(user_location) # user_location is Korean
if parsed_user_location in cache_data:
address = cache_data[parsed_user_location]["address"]
lat = cache_data[parsed_user_location]["lat"]
lon = cache_data[parsed_user_location]["lon"]
else:
geolocator = Nominatim(user_agent="kino-bot")
location = geolocator.geocode(user_location)
address = location.address
lat = location.latitude
lon = location.longitude
self.data_handler.edit_cache(
(parsed_user_location, {"address": address, "lat": lat, "lon": lon})
)
api_key = Config.open_api.dark_sky.TOKEN
dark_sky = forecastio.load_forecast(api_key, lat, lon)
if timely == "current":
currently = dark_sky.currently()
self.__forecast(currently, timely, address)
elif timely == "daily":
hourly = dark_sky.hourly()
self.__forecast(hourly, timely, address)
elif timely == "weekly":
daily = dark_sky.daily()
self.__forecast(daily, timely, address)
def __forecast(self, forecast, timely, address):
icon = forecast.icon
summary = forecast.summary
if timely == "current":
temperature = str(forecast.temperature) + "도"
fallback = summary + " " + temperature
else:
temperature = self.__hourly_temperature(forecast)
fallback = summary + " " + temperature
attachments = MsgTemplate.make_weather_template(
address, icon, summary, temperature=temperature, fallback=fallback
)
self.slackbot.send_message(attachments=attachments)
def __hourly_temperature(self, forecast):
hourly_temp = []
h = forecast.data
for i in range(0, 24, 3):
time = arrow.get(h[i].d["time"], tzinfo=self.profile.get_timezone()).format(
"D일 H시"
)
temperature = h[i].d["temperature"]
hourly_temp.append("- " + time + ": " + str(temperature) + "도")
hourly_temp = "\n".join(hourly_temp)
return hourly_temp
def air_quality(self):
api_key = Config.open_api.airkorea.TOKEN
airkorea = AirKorea(api_key)
station_name = self.profile.get_location(station=True)
try:
response = airkorea.forecast(station_name)
attachments = MsgTemplate.make_air_quality_template(station_name, response)
self.slackbot.send_message(attachments=attachments)
except BaseException:
self.logger.exception("air_auality")
self.slackbot.send_message(text=MsgResource.ERROR)
|
mit
|
DemocracyClub/EveryElection
|
every_election/apps/organisations/boundaries/osni.py
|
1
|
1622
|
import json
import urllib.request
from urllib.error import HTTPError
from retry import retry
from django.contrib.gis.geos import GEOSGeometry
from storage.shapefile import convert_geom_to_multipolygon
class OsniLayer:
@retry(HTTPError, tries=2, delay=30)
def get_data_from_url(self, url):
with urllib.request.urlopen(url, timeout=30) as response:
"""
When an ArcGIS server can't generate a response
within X amount of time, it will return a 202 ACCEPTED
response with a body like
{
"processingTime": "27.018 seconds",
"status": "Processing",
"generating": {}
}
and expects the client to poll it.
"""
if response.code == 202:
raise HTTPError(
url, response.code, response.msg, response.headers, response.fp
)
data = response.read()
return data
def __init__(self, url, gss_field, name_field):
ds = json.loads(self.get_data_from_url(url).decode("utf-8"))
self.features = []
for feature in ds["features"]:
geom = GEOSGeometry(json.dumps(feature["geometry"]), srid=4326)
geom = convert_geom_to_multipolygon(geom)
rec = {
"geometry": geom,
"name": feature["properties"][name_field],
"OBJECTID": feature["properties"]["OBJECTID"],
}
if gss_field:
rec["gss"] = feature["properties"][gss_field]
self.features.append(rec)
|
bsd-3-clause
|
SachaMPS/django-cms
|
cms/utils/admin.py
|
22
|
4578
|
# -*- coding: utf-8 -*-
import json
from django.contrib.auth import get_permission_codename
from django.contrib.sites.models import Site
from django.http import HttpResponse
from django.shortcuts import render
from django.utils.encoding import smart_str
from cms.constants import PUBLISHER_STATE_PENDING, PUBLISHER_STATE_DIRTY
from cms.models import Page, GlobalPagePermission
from cms.utils import get_language_from_request, get_language_list, get_cms_setting
from cms.utils.compat import DJANGO_1_7
NOT_FOUND_RESPONSE = "NotFound"
def jsonify_request(response):
""" Turn any response in a 200 response to let jQuery code handle it nicely.
Response contains a json object with the following attributes:
* status: original response status code
* content: original response content
"""
if DJANGO_1_7:
content = {'status': response.status_code, 'content': smart_str(response.content, response._charset)}
else:
content = {'status': response.status_code, 'content': smart_str(response.content, response.charset)}
return HttpResponse(json.dumps(content), content_type="application/json")
publisher_classes = {
PUBLISHER_STATE_DIRTY: "publisher_dirty",
PUBLISHER_STATE_PENDING: "publisher_pending",
}
def get_admin_menu_item_context(request, page, filtered=False, language=None):
"""
Used for rendering the page tree, inserts into context everything what
we need for single item
"""
has_add_page_permission = page.has_add_permission(request)
has_move_page_permission = page.has_move_page_permission(request)
site = Site.objects.get_current()
lang = get_language_from_request(request)
metadata = ""
if get_cms_setting('PERMISSION'):
# jstree metadata generator
md = []
if not has_move_page_permission:
md.append(('valid_children', False))
md.append(('draggable', False))
if md:
# just turn it into simple javascript object
metadata = "{" + ", ".join(map(lambda e: "%s: %s" % (e[0],
isinstance(e[1], bool) and str(e[1]) or e[1].lower() ), md)) + "}"
has_add_on_same_level_permission = False
opts = Page._meta
if get_cms_setting('PERMISSION'):
if hasattr(request.user, '_global_add_perm_cache'):
global_add_perm = request.user._global_add_perm_cache
else:
global_add_perm = GlobalPagePermission.objects.user_has_add_permission(
request.user, page.site_id).exists()
request.user._global_add_perm_cache = global_add_perm
if request.user.has_perm(opts.app_label + '.' + get_permission_codename('add', opts)) and global_add_perm:
has_add_on_same_level_permission = True
from cms.utils import permissions
if not has_add_on_same_level_permission and page.parent_id:
has_add_on_same_level_permission = permissions.has_generic_permission(page.parent_id, request.user, "add",
page.site_id)
context = {
'page': page,
'site': site,
'lang': lang,
'filtered': filtered,
'metadata': metadata,
'preview_language': language,
'has_change_permission': page.has_change_permission(request),
'has_publish_permission': page.has_publish_permission(request),
'has_delete_permission': page.has_delete_permission(request),
'has_move_page_permission': has_move_page_permission,
'has_add_page_permission': has_add_page_permission,
'has_add_on_same_level_permission': has_add_on_same_level_permission,
'CMS_PERMISSION': get_cms_setting('PERMISSION'),
}
return context
def render_admin_menu_item(request, page, template=None, language=None):
"""
Renders requested page item for the tree. This is used in case when item
must be reloaded over ajax.
"""
if not template:
template = "admin/cms/page/tree/menu_fragment.html"
if not page.pk:
return HttpResponse(NOT_FOUND_RESPONSE) # Not found - tree will remove item
# languages
from cms.utils import permissions
languages = get_language_list(page.site_id)
context = {
'has_add_permission': permissions.has_page_add_permission(request),
'site_languages': languages,
}
filtered = 'filtered' in request.GET or 'filtered' in request.POST
context.update(get_admin_menu_item_context(request, page, filtered, language))
return render(request, template, context)
|
bsd-3-clause
|
183amir/gahshomar
|
src/GahShomar/gs_main_window.py
|
1
|
10779
|
#!/usr/bin/env python3
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: s; tab-width: 4 -*-
#
# Copyright (C) 2014 Amir Mohammadi <[email protected]>
#
# Gahshomar is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Gahshomar is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import logging
logger = logging.getLogger(__name__)
from gi.repository import Gtk, GLib, Gio
from .gs_calendar_widget import PersianCalendarWidget, GeorgianCalendarWidget
from .gs_day_widget import PersianDayWidget, GeorgianDayWidget
from .gs_events_handler import EventsHandler
from .gs_indicator import GahShomarIndicator, USE_IND
from . import gs_settings_page
from .gs_plugin_manager import GSPluginManager
MENU_XML = """
<interface>
<menu id='app-menu'>
<section>
<item>
<attribute name='label' translatable='yes'>_About</attribute>
<attribute name='action'>app.about</attribute>
</item>
<item>
<attribute name='label' translatable='yes'>_Preferences</attribute>
<attribute name='action'>app.preferences</attribute>
</item>
</section>
<section>
<item>
<attribute name='label' translatable='yes'>_Quit</attribute>
<attribute name='action'>app.quit</attribute>
<attribute name='accel'><Primary>q</attribute>
</item>
</section>
</menu>
</interface>
"""
ABOUT_PAGE = '''
<interface>
<object class="GtkAboutDialog" id="aboutdialog1">
<property name="can_focus">False</property>
<property name="title" translatable="yes">درباره برنامه</property>
<property name="type_hint">dialog</property>
<property name="program_name">گاهشمار</property>
<property name="version">3.0.4</property>
<property name="copyright" translatable="yes">Amir Mohammadi <[email protected]></property>
<property name="comments" translatable="yes">گاهشمار (تقویم) ایرانی</property>
<property name="website">http://183amir.github.io/gahshomar/</property>
<property name="authors">Amir Mohammadi</property>
<property name="logo">{FULL_PATH}/data/icons/gahshomar-logo.png</property>
<property name="license_type">gpl-2-0</property>
<child internal-child="vbox">
<object class="GtkBox" id="aboutdialog-vbox1">
<property name="can_focus">False</property>
<property name="orientation">vertical</property>
<property name="spacing">2</property>
<child internal-child="action_area">
<object class="GtkButtonBox" id="aboutdialog-action_area1">
<property name="can_focus">False</property>
<property name="layout_style">end</property>
<child>
<placeholder/>
</child>
<child>
<placeholder/>
</child>
</object>
<packing>
<property name="expand">False</property>
<property name="fill">False</property>
<property name="position">0</property>
</packing>
</child>
<child>
<placeholder/>
</child>
</object>
</child>
</object>
</interface>
'''
class MainWindow(Gtk.ApplicationWindow):
def __init__(self, application, FULL_PATH, config, date=None):
super().__init__(title='گاهشمار', application=application)
if date is None:
date = datetime.date.today()
self.date = date
# config values
self.full_path = FULL_PATH
self.config = config
self.app = application
pday = PersianDayWidget()
gday = GeorgianDayWidget()
self.day_widgets = [pday, gday]
pcal = PersianCalendarWidget(date)
pcal.parent = self
gcal = GeorgianCalendarWidget(date)
gcal.parent = self
self.calendars = [pcal, gcal]
self.handler = EventsHandler(self)
self.main_grid = Gtk.Grid()
main_grid = self.main_grid
self.add(main_grid)
main_grid.set_column_homogeneous(True)
main_grid.set_column_spacing(spacing=20)
# main_grid.set_row_homogeneous(True)
main_grid.set_row_spacing(spacing=20)
# setup appindicator
self.visible = True
self.setup_appindicator()
# check if unity is running
import os
xdg_current_desktop = os.environ.get('XDG_CURRENT_DESKTOP').lower()
self.xdg_current_desktop = xdg_current_desktop
self.draw_interface()
# update interface every 5 seconds
GLib.timeout_add_seconds(int(self.config['Global']['ping_frequency']),
self.handler.update_everything)
# set the icon for the window
self.connect('style-set', self.set_icon_)
try:
self.plugin_manager = GSPluginManager(self)
except Exception:
logger.exception(Exception)
self.gs_settings_win = gs_settings_page.SettingsWindow(self.app)
def draw_interface(self):
xdg_current_desktop = self.xdg_current_desktop
if 'unity' in xdg_current_desktop:
self.offset = 1
else:
self.offset = 0
# logger.debug('main_grid self.offset is {}'.format(self.offset))
main_grid = self.main_grid
for _ in range(len(self.calendars)+1):
main_grid.remove_column(0)
for i, v in enumerate(self.day_widgets):
main_grid.attach(v, i, 0+self.offset, 1, 1)
for i, v in enumerate(self.calendars):
main_grid.attach(v, i, 1+self.offset, 1, 1)
# main_grid.attach(Gtk.VSeparator(), 1, 0, 1, 2)
self.setup_header_bar()
def setup_header_bar(self):
xdg_current_desktop = self.xdg_current_desktop
today_button = Gtk.Button(label='امروز')
today_button.connect("clicked", self.set_today)
close_button = Gtk.Button.new_from_icon_name(
'window-close-symbolic', Gtk.IconSize.BUTTON)
close_button.connect('clicked', self.toggle_main_win)
if 'unity' in xdg_current_desktop:
toolbar = Gtk.Toolbar()
sep = Gtk.SeparatorToolItem()
sep.set_expand(True)
sep.set_draw(False)
toolbar.add(sep)
tb_today = Gtk.ToolButton.new(today_button)
tb_today.connect("clicked", self.set_today)
toolbar.add(tb_today)
self.connect("delete-event", self.toggle_main_win)
# tb_close = Gtk.ToolButton.new(close_button)
# tb_close.connect('clicked', self.toggle_main_win)
# toolbar.add(tb_close)
self.main_grid.attach(toolbar, 0, 0, 2, 1)
else:
# set header bar
self.hb = Gtk.HeaderBar()
self.hb.props.title = 'گاهشمار'
if USE_IND:
self.hb.props.show_close_button = False
self.hb.pack_end(close_button)
else:
self.hb.props.show_close_button = True
self.hb.pack_end(today_button)
self.set_titlebar(self.hb)
def on_settings_clicked(self):
dialog = self.gs_settings_win.get_dialog()
self.dialog = dialog
# dialog.__init__()
dialog.set_transient_for(self)
# dialog.set_decorated(True)
dialog.run()
dialog.destroy()
self.config.write_settings()
logger.debug('Wrote the settings')
def set_today(self, *args):
self.handler.update_everything(datetime.date.today())
def toggle_main_win(self, *args):
if not USE_IND:
return
if self.visible:
self.hide()
self.visible = False
else:
self.show_all()
self.present()
self.visible = True
return True
def setup_appindicator(self):
self.ind = GahShomarIndicator(self, self.date)
def set_icon_(self, *args):
# day = khayyam.JalaliDate.today().day
icon = Gtk.IconTheme.load_icon(
Gtk.IconTheme(),
'gahshomar',
512, 0)
self.set_icon(icon)
class GahShomar(Gtk.Application):
def __init__(self, FULL_PATH, config, minimized=False):
Gtk.Application.__init__(
self, application_id="com.mohammadi.calendar.gahshomar",
inactivity_timeout=3000, register_session=True)
self.FULL_PATH = FULL_PATH
self.config = config
self.minimized = minimized
self.connect("startup", self.startup)
self.connect("activate", self.activate)
def about_activated(self, action, data=None, dialog=None):
dialog.set_transient_for(self.win)
dialog.run()
dialog.destroy()
def preferences_activated(self, action, data=None):
self.win.on_settings_clicked()
# import sys
# import subprocess
# if sys.platform.startswith('win32'):
# xdgopen = 'start'
# if sys.platform.startswith('darwin'):
# xdgopen = 'open'
# else:
# xdgopen = 'xdg-open'
# args = [xdgopen, '{}'.format(path)]
# subprocess.Popen(args)
def new_window(self):
self.win = MainWindow(self, self.FULL_PATH, self.config)
def show_window(self):
if not self.minimized:
self.win.show_all()
else:
self.win.visible = False
def activate(self, data=None):
self.show_window()
def startup(self, data=None):
builder = Gtk.Builder()
builder.add_from_string(ABOUT_PAGE.format(FULL_PATH=self.FULL_PATH))
dialog = builder.get_object('aboutdialog1')
action = Gio.SimpleAction(name="about")
action.connect("activate", self.about_activated, dialog)
self.add_action(action)
action = Gio.SimpleAction(name="preferences")
action.connect("activate", self.preferences_activated)
self.add_action(action)
action = Gio.SimpleAction(name="quit")
action.connect("activate", lambda a, b: self.quit())
self.add_action(action)
builder = Gtk.Builder()
builder.add_from_string(MENU_XML)
self.set_app_menu(builder.get_object("app-menu"))
self.new_window()
|
gpl-3.0
|
migueldiascosta/pymatgen-db
|
matgendb/vv/validate.py
|
1
|
25272
|
"""
Collection validator
"""
__author__ = "Dan Gunter"
__copyright__ = "Copyright 2012-2013, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Dan Gunter"
__email__ = "[email protected]"
__status__ = "Development"
__date__ = "1/31/13"
import pymongo
import random
import re
import sys
import collections
from .util import DoesLogging, total_size
#from .mquery import *
from smoqe.query import *
import six
class DBError(Exception):
pass
class ValidatorSyntaxError(Exception):
"Syntax error in configuration of Validator"
def __init__(self, target, desc):
msg = 'Invalid syntax: {} -> "{}"'.format(desc, target)
Exception.__init__(self, msg)
class PythonMethod(object):
"""Encapsulate an external Python method that will be run on our target
MongoDB collection to perform arbitrary types of validation.
"""
_PATTERN = re.compile(r'\s*(@\w+)(\s+\w+)*')
CANNOT_COMBINE_ERR = 'Call to a Python method cannot be combined '
'with any other constraints'
BAD_CONSTRAINT_ERR = 'Invalid constraint (must be: @<method> [<param> ..])'
@classmethod
def constraint_is_method(cls, text):
"""Check from the text of the constraint whether it is
a Python method, as opposed to a 'normal' constraint.
:return: True if it is, False if not
"""
m = cls._PATTERN.match(text)
return m is not None
def __init__(self, text):
"""Create new instance from a raw constraint string.
:raises: ValidatorSyntaxerror
"""
if not self._PATTERN.match(text):
raise ValidatorSyntaxError(text, self.BAD_CONSTRAINT_ERR)
tokens = re.split('@?\s+', text)
if len(tokens) < 1:
raise ValidatorSyntaxError(text, self.BAD_CONSTRAINT_ERR)
self.method = tokens[0]
self.params = tokens[1:]
def mongo_get(rec, key, default=None):
"""
Get value from dict using MongoDB dot-separated path semantics.
For example:
>>> assert mongo_get({'a': {'b': 1}, 'x': 2}, 'a.b') == 1
>>> assert mongo_get({'a': {'b': 1}, 'x': 2}, 'x') == 2
>>> assert mongo_get({'a': {'b': 1}, 'x': 2}, 'a.b.c') is None
:param rec: mongodb document
:param key: path to mongo value
:param default: default to return if not found
:return: value, potentially nested, or default if not found
:raise: ValueError, if record is not a dict.
"""
if not rec:
return default
if not isinstance(rec, collections.Mapping):
raise ValueError('input record must act like a dict')
if not '.' in key:
return rec.get(key, default)
for key_part in key.split('.'):
if not isinstance(rec, collections.Mapping):
return default
if not key_part in rec:
return default
rec = rec[key_part]
return rec
class Projection(object):
"""Fields on which to project the query results.
"""
def __init__(self):
self._fields = {}
self._slices = {}
def add(self, field, op=None, val=None):
"""Update report fields to include new one, if it doesn't already.
:param field: The field to include
:type field: Field
:param op: Operation
:type op: ConstraintOperator
:return: None
"""
if field.has_subfield():
self._fields[field.full_name] = 1
else:
self._fields[field.name] = 1
if op and op.is_size() and not op.is_variable():
# get minimal part of array with slicing,
# but cannot use slice with variables
self._slices[field.name] = val + 1
if op and op.is_variable():
# add the variable too
self._fields[val] = 1
def to_mongo(self):
"""Translate projection to MongoDB query form.
:return: Dictionary to put into a MongoDB JSON query
:rtype: dict
"""
d = copy.copy(self._fields)
for k, v in six.iteritems(self._slices):
d[k] = {'$slice': v}
return d
class ConstraintViolation(object):
"""A single constraint violation, with no metadata.
"""
def __init__(self, constraint, value, expected):
"""Create new constraint violation
:param constraint: The constraint that was violated
:type constraint: Constraint
"""
self._constraint = constraint
self._got = value
self._expected = expected
@property
def field(self):
return self._constraint.field.name
@property
def op(self):
#return str(self._constraint.op)
return self._constraint.op.display_op
@property
def got_value(self):
return self._got
@property
def expected_value(self):
return self._expected
@expected_value.setter
def expected_value(self, value):
self._expected = value
class NullConstraintViolation(ConstraintViolation):
"""Empty constraint violation, for when there are no constraints.
"""
def __init__(self):
ConstraintViolation.__init__(self, Constraint('NA', '=', 'NA'), 'NA', 'NA')
class ConstraintViolationGroup(object):
"""A group of constraint violations with metadata.
"""
def __init__(self):
"""Create an empty object.
"""
self._viol = []
# These are read/write
self.subject = ''
self.condition = None
def add_violations(self, violations, record=None):
"""Add constraint violations and associated record.
:param violations: List of violations
:type violations: list(ConstraintViolation)
:param record: Associated record
:type record: dict
:rtype: None
"""
rec = {} if record is None else record
for v in violations:
self._viol.append((v, rec))
def __iter__(self):
return iter(self._viol)
def __len__(self):
return len(self._viol)
class ProgressMeter(object):
"""Simple progress tracker
"""
def __init__(self, num, fmt):
self._n = num
self._subject = '?'
self._fmt = fmt
self._count = 0
self._total = 0
@property
def count(self):
return self._total
def set_subject(self, subj):
self._subject = subj
def update(self, *args):
self._count += 1
self._total += 1
if self._n == 0 or self._count < self._n:
return
sys.stderr.write(self._fmt.format(*args, subject=self._subject, count=self.count))
sys.stderr.write('\n')
sys.stderr.flush()
self._count = 0
class ConstraintSpec(DoesLogging):
"""Specification of a set of constraints for a collection.
"""
FILTER_SECT = 'filter'
CONSTRAINT_SECT = 'constraints'
SAMPLE_SECT = 'sample'
def __init__(self, spec):
"""Create specification from a configuration.
:param spec: Configuration for a single collection
:type spec: dict
:raise: ValueError if specification is wrong
"""
DoesLogging.__init__(self, name='mg.ConstraintSpec')
self._sections, _slist = {}, []
for item in spec:
self._log.debug("build constraint from: {}".format(item))
if isinstance(item, dict):
self._add_complex_section(item)
else:
self._add_simple_section(item)
def __iter__(self):
"""Return a list of all the sections.
:rtype: list(ConstraintSpecSection)
"""
sect = []
# simple 1-level flatten operation
for values in six.itervalues(self._sections):
for v in values:
sect.append(v)
return iter(sect)
def _add_complex_section(self, item):
"""Add a section that has a filter and set of constraints
:raise: ValueError if filter or constraints is missing
"""
# extract filter and constraints
try:
fltr = item[self.FILTER_SECT]
except KeyError:
raise ValueError("configuration requires '{}'".format(self.FILTER_SECT))
sample = item.get(self.SAMPLE_SECT, None)
constraints = item.get(self.CONSTRAINT_SECT, None)
section = ConstraintSpecSection(fltr, constraints, sample)
key = section.get_key()
if key in self._sections:
self._sections[key].append(section)
else:
self._sections[key] = [section]
def _add_simple_section(self, item):
self._sections[None] = [ConstraintSpecSection(None, item, None)]
class ConstraintSpecSection(object):
def __init__(self, fltr, constraints, sample):
self._filter, self._constraints, self._sampler = fltr, constraints, sample
# make condition(s) into a tuple
if isinstance(fltr, basestring):
self._key = (fltr,)
elif fltr is None:
self._key = None
else:
self._key = tuple(fltr)
# parse sample keywords into class, if present
if sample:
self._sampler = Sampler(**sample)
def get_key(self):
return self._key
@property
def sampler(self):
return self._sampler
@property
def filters(self):
return self._filter
@property
def constraints(self):
return self._constraints
class Validator(DoesLogging):
"""Validate a collection.
"""
class SectionParts:
"""Encapsulate the tuple of information for each section of filters, constraints,
etc. within a collection.
"""
def __init__(self, cond, body, sampler, report_fields):
"""Create new initialized set of parts.
:param cond: Condition to filter records
:type cond: MongoQuery
:param body: Main set of constraints
:type body: MongoQuery
:param sampler: Sampling class if any
:type sampler: Sampler
:param report_fields: Fields to report on
:type report_fields: list
"""
self.cond, self.body, self.sampler, self.report_fields = \
cond, body, sampler, report_fields
def __init__(self, max_violations=50, max_dberrors=10, aliases=None, add_exists=False):
DoesLogging.__init__(self, name='mg.validator')
self.set_progress(0)
self._aliases = aliases if aliases else {}
self._max_viol = max_violations
if self._max_viol > 0:
self._find_kw = {'limit': self._max_viol}
else:
self._find_kw = {}
self._max_dberr = max_dberrors
self._base_report_fields = {'_id': 1, 'task_id': 1}
self._add_exists = add_exists
def set_aliases(self, a):
"""Set aliases.
"""
self._aliases = a
def set_progress(self, num):
"""Report progress every `num` bad records.
:param num: Report interval
:type num: int
:return: None
"""
report_str = 'Progress for {subject}: {count:d} invalid, {:d} db errors, {:d} bytes'
self._progress = ProgressMeter(num, report_str)
def num_violations(self):
if self._progress is None:
return 0
return self._progress._count
def validate(self, coll, constraint_spec, subject='collection'):
"""Validation of a collection.
This is a generator that yields ConstraintViolationGroups.
:param coll: Mongo collection
:type coll: pymongo.Collection
:param constraint_spec: Constraint specification
:type constraint_spec: ConstraintSpec
:param subject: Name of the thing being validated
:type subject: str
:return: Sets of constraint violation, one for each constraint_section
:rtype: ConstraintViolationGroup
:raises: ValidatorSyntaxError
"""
self._spec = constraint_spec
self._progress.set_subject(subject)
self._build(constraint_spec)
for sect_parts in self._sections:
cvg = self._validate_section(subject, coll, sect_parts)
if cvg is not None:
yield cvg
def _validate_section(self, subject, coll, parts):
"""Validate one section of a spec.
:param subject: Name of subject
:type subject: str
:param coll: The collection to validate
:type coll: pymongo.Collection
:param parts: Section parts
:type parts: Validator.SectionParts
:return: Group of constraint violations, if any, otherwise None
:rtype: ConstraintViolationGroup or None
"""
cvgroup = ConstraintViolationGroup()
cvgroup.subject = subject
# If the constraint is an 'import' of code, treat it differently here
if self._is_python(parts):
num_found = self._run_python(cvgroup, coll, parts)
return None if num_found == 0 else cvgroup
query = parts.cond.to_mongo(disjunction=False)
query.update(parts.body.to_mongo())
cvgroup.condition = parts.cond.to_mongo(disjunction=False)
self._log.debug('Query spec: {}'.format(query))
self._log.debug('Query fields: {}'.format(parts.report_fields))
# Find records that violate 1 or more constraints
cursor = coll.find(query, fields=parts.report_fields, **self._find_kw)
if parts.sampler is not None:
cursor = parts.sampler.sample(cursor)
nbytes, num_dberr, num_rec = 0, 0, 0
while 1:
try:
record = six.advance_iterator(cursor)
nbytes += total_size(record)
num_rec += 1
except StopIteration:
self._log.info("collection {}: {:d} records, {:d} bytes, {:d} db-errors"
.format(subject, num_rec, nbytes, num_dberr))
break
except pymongo.errors.PyMongoError as err:
num_dberr += 1
if num_dberr > self._max_dberr > 0:
raise DBError("Too many errors")
self._log.warn("DB.{:d}: {}".format(num_dberr, err))
continue
# report progress
if self._progress:
self._progress.update(num_dberr, nbytes)
# get reasons for badness
violations = self._get_violations(parts.body, record)
cvgroup.add_violations(violations, record)
return None if nbytes == 0 else cvgroup
def _get_violations(self, query, record):
"""Reverse-engineer the query to figure out why a record was selected.
:param query: MongoDB query
:type query: MongQuery
:param record: Record in question
:type record: dict
:return: Reasons why bad
:rtype: list(ConstraintViolation)
"""
# special case, when no constraints are given
if len(query.all_clauses) == 0:
return [NullConstraintViolation()]
# normal case, check all the constraints
reasons = []
for clause in query.all_clauses:
var_name = None
key = clause.constraint.field.name
op = clause.constraint.op
fval = mongo_get(record, key)
if fval is None:
expected = clause.constraint.value
reasons.append(ConstraintViolation(clause.constraint, 'missing', expected))
continue
if op.is_variable():
# retrieve value for variable
var_name = clause.constraint.value
value = mongo_get(record, var_name, default=None)
if value is None:
reasons.append(ConstraintViolation(clause.constraint, 'missing', var_name))
continue
clause.constraint.value = value # swap out value, temporarily
# take length for size
if op.is_size():
if isinstance(fval, six.string_types) or not hasattr(fval, '__len__'):
reasons.append(ConstraintViolation(clause.constraint, type(fval), 'sequence'))
if op.is_variable():
clause.constraint.value = var_name # put original value back
continue
fval = len(fval)
ok, expected = clause.constraint.passes(fval)
if not ok:
reasons.append(ConstraintViolation(clause.constraint, fval, expected))
if op.is_variable():
clause.constraint.value = var_name # put original value back
return reasons
def _build(self, constraint_spec):
"""Generate queries to execute.
Sets instance variables so that Mongo query strings, etc. can now
be extracted from the object.
:param constraint_spec: Constraint specification
:type constraint_spec: ConstraintSpec
"""
self._sections = []
# For each condition in the spec
for sval in constraint_spec:
rpt_fld = self._base_report_fields.copy()
#print("@@ CONDS = {}".format(sval.filters))
#print("@@ MAIN = {}".format(sval.constraints))
# Constraints
# If the constraint is an external call to Python code
if self._is_python(sval.constraints):
query, proj = self._process_python(sval.constraints)
rpt_fld.update(proj.to_mongo())
# All other constraints, e.g. 'foo > 12'
else:
query = MongoQuery()
if sval.constraints is not None:
groups = self._process_constraint_expressions(sval.constraints)
projection = Projection()
for cg in six.itervalues(groups):
for c in cg:
projection.add(c.field, c.op, c.value)
query.add_clause(MongoClause(c))
if self._add_exists:
for c in cg.existence_constraints:
query.add_clause(MongoClause(c, exists_main=True))
rpt_fld.update(projection.to_mongo())
# Filters
cond_query = MongoQuery()
if sval.filters is not None:
cond_groups = self._process_constraint_expressions(sval.filters, rev=False)
for cg in six.itervalues(cond_groups):
for c in cg:
cond_query.add_clause(MongoClause(c, rev=False))
# Done. Add a new 'SectionPart' for the filter and constraint
result = self.SectionParts(cond_query, query, sval.sampler, rpt_fld)
self._sections.append(result)
def _process_constraint_expressions(self, expr_list, conflict_check=True, rev=True):
"""Create and return constraints from expressions in expr_list.
:param expr_list: The expressions
:conflict_check: If True, check for conflicting expressions within each field
:return: Constraints grouped by field (the key is the field name)
:rtype: dict
"""
# process expressions, grouping by field
groups = {}
for expr in expr_list:
field, raw_op, val = parse_expr(expr)
op = ConstraintOperator(raw_op)
if field not in groups:
groups[field] = ConstraintGroup(Field(field, self._aliases))
groups[field].add_constraint(op, val)
# add existence constraints
for cgroup in six.itervalues(groups):
cgroup.add_existence(rev)
# optionally check for conflicts
if conflict_check:
# check for conflicts in each group
for field_name, group in six.iteritems(groups):
conflicts = group.get_conflicts()
if conflicts:
raise ValueError('Conflicts for field {}: {}'.format(field_name, conflicts))
return groups
def _is_python(self, constraint_list):
"""Check whether constraint is an import of Python code.
:param constraint_list: List of raw constraints from YAML file
:type constraint_list: list(str)
:return: True if this refers to an import of code, False otherwise
:raises: ValidatorSyntaxError
"""
if len(constraint_list) == 1 and \
PythonMethod.constraint_is_method(constraint_list[0]):
return True
if len(constraint_list) > 1 and \
any(filter(PythonMethod.constraint_is_method, constraint_list)):
condensed_list = '/'.join(constraint_list)
err = PythonMethod.CANNOT_COMBINE_ERR
raise ValidatorSyntaxError(condensed_list, err)
return False
def _process_python(self, expr_list):
"""Create a wrapper for a call to some external Python code.
:param expr_list: The expressions
:return: Tuple of (query, field-projection)
:rtype: (PythonMethod, Projection)
"""
return None, None
def set_aliases(self, new_value):
"Set aliases and wrap errors in ValueError"
try:
self.aliases = new_value
except Exception as err:
raise ValueError("invalid value: {}".format(err))
class Sampler(DoesLogging):
"""Randomly sample a proportion of the full collection.
"""
# Random uniform distribution
DIST_RUNIF = 1
# Default distribution
DEFAULT_DIST = DIST_RUNIF
# Names of distributions
DIST_CODES = {'uniform': DIST_RUNIF}
def __init__(self, min_items=0, max_items=1e9, p=1.0, distrib=DEFAULT_DIST, **kw):
"""Create new parameterized sampler.
:param min_items: Minimum number of items in the sample
:param max_items: Maximum number of items in the sample
:param p: Probability of selecting an item
:param distrib: Probability distribution code, one of DIST_<name> in this class
:type distrib: str or int
:raise: ValueError, if `distrib` is an unknown code or string
"""
DoesLogging.__init__(self, 'mg.sampler')
# Sanity checks
if min_items < 0:
raise ValueError('min_items cannot be negative ({:d})'.format(min_items))
if (max_items != 0) and (max_items < min_items):
raise ValueError('max_items must be zero or >= min_items ({:d} < {:d})'.format(max_items, min_items))
if not (0.0 <= p <= 1.0):
raise ValueError('probability, p, must be between 0 and 1 ({:f})'.format(p))
self.min_items = min_items
self.max_items = max_items
self.p = p
self._empty = True
# Distribution
if not isinstance(distrib, int):
distrib = self.DIST_CODES.get(str(distrib), None)
if distrib == self.DIST_RUNIF:
self._keep = self._keep_runif
else:
raise ValueError("unrecognized distribution: {}".format(distrib))
@property
def is_empty(self):
return self._empty
def _keep_runif(self):
return self.p >= random.uniform(0, 1)
def sample(self, cursor):
"""Extract records randomly from the database.
Continue until the target proportion of the items have been
extracted, or until `min_items` if this is larger.
If `max_items` is non-negative, do not extract more than these.
This function is a generator, yielding items incrementally.
:param cursor: Cursor to sample
:type cursor: pymongo.cursor.Cursor
:return: yields each item
:rtype: dict
:raise: ValueError, if max_items is valid and less than `min_items`
or if target collection is empty
"""
count = cursor.count()
# special case: empty collection
if count == 0:
self._empty = True
raise ValueError("Empty collection")
# special case: entire collection
if self.p >= 1 and self.max_items <= 0:
for item in cursor:
yield item
return
# calculate target number of items to select
if self.max_items <= 0:
n_target = max(self.min_items, self.p * count)
else:
if self.p <= 0:
n_target = max(self.min_items, self.max_items)
else:
n_target = max(self.min_items, min(self.max_items, self.p * count))
if n_target == 0:
raise ValueError("No items requested")
# select first `n_target` items that pop up with
# probability self.p
# This is actually biased to items at the beginning
# of the file if n_target is smaller than (p * count),
n = 0
while n < n_target:
try:
item = six.advance_iterator(cursor)
except StopIteration:
# need to keep looping through data until
# we get all our items!
cursor.rewind()
item = six.advance_iterator(cursor)
if self._keep():
yield item
n += 1
|
mit
|
with-git/tensorflow
|
tensorflow/contrib/training/python/training/sampling_ops_threading_test.py
|
129
|
2884
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.training.python.training import sampling_ops
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
class SamplingOpsThreadingTest(test.TestCase):
def testMultiThreadedEstimateDataDistribution(self):
num_classes = 10
# Set up graph.
random_seed.set_random_seed(1234)
label = math_ops.cast(
math_ops.round(random_ops.random_uniform([1]) * num_classes),
dtypes_lib.int32)
prob_estimate = sampling_ops._estimate_data_distribution( # pylint: disable=protected-access
label, num_classes)
# Check that prob_estimate is well-behaved in a multithreaded context.
_, _, [prob_estimate] = sampling_ops._verify_input( # pylint: disable=protected-access
[], label, [prob_estimate])
# Use queues to run multiple threads over the graph, each of which
# fetches `prob_estimate`.
queue = data_flow_ops.FIFOQueue(
capacity=25,
dtypes=[prob_estimate.dtype],
shapes=[prob_estimate.get_shape()])
enqueue_op = queue.enqueue([prob_estimate])
queue_runner_impl.add_queue_runner(
queue_runner_impl.QueueRunner(queue, [enqueue_op] * 25))
out_tensor = queue.dequeue()
# Run the multi-threaded session.
with self.test_session() as sess:
# Need to initialize variables that keep running total of classes seen.
variables.global_variables_initializer().run()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
for _ in range(25):
sess.run([out_tensor])
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
al1221/ghost-openshift
|
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/styles/manni.py
|
364
|
2374
|
# -*- coding: utf-8 -*-
"""
pygments.styles.manni
~~~~~~~~~~~~~~~~~~~~~
A colorful style, inspired by the terminal highlighting style.
This is a port of the style used in the `php port`_ of pygments
by Manni. The style is called 'default' there.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class ManniStyle(Style):
"""
A colorful style, inspired by the terminal highlighting style.
"""
background_color = '#f0f3f3'
styles = {
Whitespace: '#bbbbbb',
Comment: 'italic #0099FF',
Comment.Preproc: 'noitalic #009999',
Comment.Special: 'bold',
Keyword: 'bold #006699',
Keyword.Pseudo: 'nobold',
Keyword.Type: '#007788',
Operator: '#555555',
Operator.Word: 'bold #000000',
Name.Builtin: '#336666',
Name.Function: '#CC00FF',
Name.Class: 'bold #00AA88',
Name.Namespace: 'bold #00CCFF',
Name.Exception: 'bold #CC0000',
Name.Variable: '#003333',
Name.Constant: '#336600',
Name.Label: '#9999FF',
Name.Entity: 'bold #999999',
Name.Attribute: '#330099',
Name.Tag: 'bold #330099',
Name.Decorator: '#9999FF',
String: '#CC3300',
String.Doc: 'italic',
String.Interpol: '#AA0000',
String.Escape: 'bold #CC3300',
String.Regex: '#33AAAA',
String.Symbol: '#FFCC33',
String.Other: '#CC3300',
Number: '#FF6600',
Generic.Heading: 'bold #003300',
Generic.Subheading: 'bold #003300',
Generic.Deleted: 'border:#CC0000 bg:#FFCCCC',
Generic.Inserted: 'border:#00CC00 bg:#CCFFCC',
Generic.Error: '#FF0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: 'bold #000099',
Generic.Output: '#AAAAAA',
Generic.Traceback: '#99CC66',
Error: 'bg:#FFAAAA #AA0000'
}
|
mit
|
dq922/PerfKitExplorer
|
third_party/py/dateutil/zoneinfo/__init__.py
|
265
|
2575
|
"""
Copyright (c) 2003-2005 Gustavo Niemeyer <[email protected]>
This module offers extensions to the standard python 2.3+
datetime module.
"""
from dateutil.tz import tzfile
from tarfile import TarFile
import os
__author__ = "Gustavo Niemeyer <[email protected]>"
__license__ = "PSF License"
__all__ = ["setcachesize", "gettz", "rebuild"]
CACHE = []
CACHESIZE = 10
class tzfile(tzfile):
def __reduce__(self):
return (gettz, (self._filename,))
def getzoneinfofile():
filenames = os.listdir(os.path.join(os.path.dirname(__file__)))
filenames.sort()
filenames.reverse()
for entry in filenames:
if entry.startswith("zoneinfo") and ".tar." in entry:
return os.path.join(os.path.dirname(__file__), entry)
return None
ZONEINFOFILE = getzoneinfofile()
del getzoneinfofile
def setcachesize(size):
global CACHESIZE, CACHE
CACHESIZE = size
del CACHE[size:]
def gettz(name):
tzinfo = None
if ZONEINFOFILE:
for cachedname, tzinfo in CACHE:
if cachedname == name:
break
else:
tf = TarFile.open(ZONEINFOFILE)
try:
zonefile = tf.extractfile(name)
except KeyError:
tzinfo = None
else:
tzinfo = tzfile(zonefile)
tf.close()
CACHE.insert(0, (name, tzinfo))
del CACHE[CACHESIZE:]
return tzinfo
def rebuild(filename, tag=None, format="gz"):
import tempfile, shutil
tmpdir = tempfile.mkdtemp()
zonedir = os.path.join(tmpdir, "zoneinfo")
moduledir = os.path.dirname(__file__)
if tag: tag = "-"+tag
targetname = "zoneinfo%s.tar.%s" % (tag, format)
try:
tf = TarFile.open(filename)
for name in tf.getnames():
if not (name.endswith(".sh") or
name.endswith(".tab") or
name == "leapseconds"):
tf.extract(name, tmpdir)
filepath = os.path.join(tmpdir, name)
os.system("zic -d %s %s" % (zonedir, filepath))
tf.close()
target = os.path.join(moduledir, targetname)
for entry in os.listdir(moduledir):
if entry.startswith("zoneinfo") and ".tar." in entry:
os.unlink(os.path.join(moduledir, entry))
tf = TarFile.open(target, "w:%s" % format)
for entry in os.listdir(zonedir):
entrypath = os.path.join(zonedir, entry)
tf.add(entrypath, entry)
tf.close()
finally:
shutil.rmtree(tmpdir)
|
apache-2.0
|
nirmeshk/oh-mainline
|
vendor/packages/Django/django/views/generic/edit.py
|
102
|
8569
|
from django.forms import models as model_forms
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponseRedirect
from django.utils.encoding import force_text
from django.views.generic.base import TemplateResponseMixin, ContextMixin, View
from django.views.generic.detail import (SingleObjectMixin,
SingleObjectTemplateResponseMixin, BaseDetailView)
class FormMixin(ContextMixin):
"""
A mixin that provides a way to show and handle a form in a request.
"""
initial = {}
form_class = None
success_url = None
def get_initial(self):
"""
Returns the initial data to use for forms on this view.
"""
return self.initial.copy()
def get_form_class(self):
"""
Returns the form class to use in this view
"""
return self.form_class
def get_form(self, form_class):
"""
Returns an instance of the form to be used in this view.
"""
return form_class(**self.get_form_kwargs())
def get_form_kwargs(self):
"""
Returns the keyword arguments for instantiating the form.
"""
kwargs = {'initial': self.get_initial()}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def get_success_url(self):
"""
Returns the supplied success URL.
"""
if self.success_url:
# Forcing possible reverse_lazy evaluation
url = force_text(self.success_url)
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
return url
def form_valid(self, form):
"""
If the form is valid, redirect to the supplied URL.
"""
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
"""
If the form is invalid, re-render the context data with the
data-filled form and errors.
"""
return self.render_to_response(self.get_context_data(form=form))
class ModelFormMixin(FormMixin, SingleObjectMixin):
"""
A mixin that provides a way to show and handle a modelform in a request.
"""
def get_form_class(self):
"""
Returns the form class to use in this view.
"""
if self.form_class:
return self.form_class
else:
if self.model is not None:
# If a model has been explicitly provided, use it
model = self.model
elif hasattr(self, 'object') and self.object is not None:
# If this view is operating on a single object, use
# the class of that object
model = self.object.__class__
else:
# Try to get a queryset and extract the model class
# from that
model = self.get_queryset().model
return model_forms.modelform_factory(model)
def get_form_kwargs(self):
"""
Returns the keyword arguments for instantiating the form.
"""
kwargs = super(ModelFormMixin, self).get_form_kwargs()
kwargs.update({'instance': self.object})
return kwargs
def get_success_url(self):
"""
Returns the supplied URL.
"""
if self.success_url:
url = self.success_url % self.object.__dict__
else:
try:
url = self.object.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured(
"No URL to redirect to. Either provide a url or define"
" a get_absolute_url method on the Model.")
return url
def form_valid(self, form):
"""
If the form is valid, save the associated model.
"""
self.object = form.save()
return super(ModelFormMixin, self).form_valid(form)
def get_context_data(self, **kwargs):
"""
If an object has been supplied, inject it into the context with the
supplied context_object_name name.
"""
context = {}
if self.object:
context['object'] = self.object
context_object_name = self.get_context_object_name(self.object)
if context_object_name:
context[context_object_name] = self.object
context.update(kwargs)
return super(ModelFormMixin, self).get_context_data(**context)
class ProcessFormView(View):
"""
A mixin that renders a form on GET and processes it on POST.
"""
def get(self, request, *args, **kwargs):
"""
Handles GET requests and instantiates a blank version of the form.
"""
form_class = self.get_form_class()
form = self.get_form(form_class)
return self.render_to_response(self.get_context_data(form=form))
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form instance with the passed
POST variables and then checked for validity.
"""
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
# PUT is a valid HTTP verb for creating (with a known URL) or editing an
# object, note that browsers only support POST for now.
def put(self, *args, **kwargs):
return self.post(*args, **kwargs)
class BaseFormView(FormMixin, ProcessFormView):
"""
A base view for displaying a form
"""
class FormView(TemplateResponseMixin, BaseFormView):
"""
A view for displaying a form, and rendering a template response.
"""
class BaseCreateView(ModelFormMixin, ProcessFormView):
"""
Base view for creating an new object instance.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = None
return super(BaseCreateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = None
return super(BaseCreateView, self).post(request, *args, **kwargs)
class CreateView(SingleObjectTemplateResponseMixin, BaseCreateView):
"""
View for creating a new object instance,
with a response rendered by template.
"""
template_name_suffix = '_form'
class BaseUpdateView(ModelFormMixin, ProcessFormView):
"""
Base view for updating an existing object.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseUpdateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseUpdateView, self).post(request, *args, **kwargs)
class UpdateView(SingleObjectTemplateResponseMixin, BaseUpdateView):
"""
View for updating an object,
with a response rendered by template.
"""
template_name_suffix = '_form'
class DeletionMixin(object):
"""
A mixin providing the ability to delete objects
"""
success_url = None
def delete(self, request, *args, **kwargs):
"""
Calls the delete() method on the fetched object and then
redirects to the success URL.
"""
self.object = self.get_object()
self.object.delete()
return HttpResponseRedirect(self.get_success_url())
# Add support for browsers which only accept GET and POST for now.
def post(self, *args, **kwargs):
return self.delete(*args, **kwargs)
def get_success_url(self):
if self.success_url:
return self.success_url
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
class BaseDeleteView(DeletionMixin, BaseDetailView):
"""
Base view for deleting an object.
Using this base class requires subclassing to provide a response mixin.
"""
class DeleteView(SingleObjectTemplateResponseMixin, BaseDeleteView):
"""
View for deleting an object retrieved with `self.get_object()`,
with a response rendered by template.
"""
template_name_suffix = '_confirm_delete'
|
agpl-3.0
|
ctherien/pysptools
|
pysptools/skl/km.py
|
1
|
4885
|
#
#------------------------------------------------------------------------------
# Copyright (c) 2013-2014, Christian Therien
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
#
# km.py - This file is part of the PySptools package.
#
"""
KMeans class
"""
import numpy as np
import sklearn.cluster as cluster
#from . import out
#from .inval import *
from pysptools.classification.out import Output
from pysptools.classification.inval import *
class KMeans(object):
""" KMeans clustering algorithm adapted to hyperspectral imaging """
def __init__(self):
self.cluster = None
self.n_clusters = None
self.output = Output('KMeans')
@PredictInputValidation('KMeans')
def predict(self, M, n_clusters=5, n_jobs=1, init='k-means++'):
"""
KMeans clustering algorithm adapted to hyperspectral imaging.
It is a simple wrapper to the scikit-learn version.
Parameters:
M: `numpy array`
A HSI cube (m x n x p).
n_clusters: `int [default 5]`
The number of clusters to generate.
n_jobs: `int [default 1]`
Taken from scikit-learn doc:
The number of jobs to use for the computation. This works by breaking down the pairwise matrix into n_jobs even slices and computing them in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used.
init: `string or array [default 'k-means++']`
Taken from scikit-learn doc: Method for initialization, defaults to `k-means++`:
`k-means++` : selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. See section Notes in k_init for more details.
`random`: choose k observations (rows) at random from data for the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features) and gives the initial centers.
Returns: `numpy array`
A cluster map (m x n x c), c is the clusters number .
"""
h, w, numBands = M.shape
self.n_clusters = n_clusters
X = np.reshape(M, (w*h, numBands))
clf = cluster.KMeans(n_clusters=n_clusters, n_jobs=n_jobs, init=init)
cls = clf.fit_predict(X)
self.cluster = np.reshape(cls, (h, w))
return self.cluster
@PlotInputValidation3('KMeans')
def plot(self, path, interpolation='none', colorMap='Accent', suffix=None):
"""
Plot the cluster map.
Parameters:
path: `string`
The path where to put the plot.
interpolation: `string [default none]`
A matplotlib interpolation method.
colorMap: `string [default 'Accent']`
A color map element of
['Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3'],
"Accent" is the default and it fall back on "Jet".
suffix: `string [default None]`
Add a suffix to the file name.
"""
self.output.plot(self.cluster, self.n_clusters, path=path, interpolation=interpolation, colorMap=colorMap, suffix=suffix)
@DisplayInputValidation3('KMeans')
def display(self, interpolation='none', colorMap='Accent', suffix=None):
"""
Display the cluster map.
Parameters:
path: `string`
The path where to put the plot.
interpolation: `string [default none]`
A matplotlib interpolation method.
colorMap: `string [default 'Accent']`
A color map element of
['Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3'],
"Accent" is the default and it fall back on "Jet".
suffix: `string [default None]`
Add a suffix to the title.
"""
self.output.plot(self.cluster, self.n_clusters, interpolation=interpolation, colorMap=colorMap, suffix=suffix)
|
apache-2.0
|
yujikato/DIRAC
|
src/DIRAC/TransformationSystem/Agent/TransformationAgent.py
|
1
|
30031
|
""" TransformationAgent processes transformations found in the transformation database.
The following options can be set for the TransformationAgent.
.. literalinclude:: ../ConfigTemplate.cfg
:start-after: ##BEGIN TransformationAgent
:end-before: ##END
:dedent: 2
:caption: TransformationAgent options
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import os
import datetime
import pickle
from six.moves import queue as Queue
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities.ThreadPool import ThreadPool
from DIRAC.Core.Utilities.ThreadSafe import Synchronizer
from DIRAC.Core.Utilities.List import breakListIntoChunks, randomize
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
from DIRAC.TransformationSystem.Agent.TransformationAgentsUtilities import TransformationAgentsUtilities
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
__RCSID__ = "$Id$"
AGENT_NAME = 'Transformation/TransformationAgent'
gSynchro = Synchronizer()
class TransformationAgent(AgentModule, TransformationAgentsUtilities):
""" Usually subclass of AgentModule
"""
def __init__(self, *args, **kwargs):
""" c'tor
"""
AgentModule.__init__(self, *args, **kwargs)
TransformationAgentsUtilities.__init__(self)
# few parameters
self.pluginLocation = ''
self.transformationStatus = []
self.maxFiles = 0
self.transformationTypes = []
# clients (out of the threads)
self.transfClient = None
# parameters for the threading
self.transQueue = Queue.Queue()
self.transInQueue = []
# parameters for caching
self.workDirectory = ''
self.cacheFile = ''
self.controlDirectory = ''
self.lastFileOffset = {}
# Validity of the cache
self.replicaCache = None
self.replicaCacheValidity = None
self.writingCache = False
self.removedFromCache = 0
self.noUnusedDelay = 0
self.unusedFiles = {}
self.unusedTimeStamp = {}
self.debug = False
self.transInThread = {}
self.pluginTimeout = {}
def initialize(self):
""" standard initialize
"""
# few parameters
self.pluginLocation = self.am_getOption('PluginLocation',
'DIRAC.TransformationSystem.Agent.TransformationPlugin')
self.transformationStatus = self.am_getOption('transformationStatus', ['Active', 'Completing', 'Flush'])
# Prepare to change the name of the CS option as MaxFiles is ambiguous
self.maxFiles = self.am_getOption('MaxFilesToProcess', self.am_getOption('MaxFiles', 5000))
agentTSTypes = self.am_getOption('TransformationTypes', [])
if agentTSTypes:
self.transformationTypes = sorted(agentTSTypes)
else:
dataProc = Operations().getValue('Transformations/DataProcessing', ['MCSimulation', 'Merge'])
dataManip = Operations().getValue('Transformations/DataManipulation', ['Replication', 'Removal'])
self.transformationTypes = sorted(dataProc + dataManip)
# clients
self.transfClient = TransformationClient()
# for caching using a pickle file
self.workDirectory = self.am_getWorkDirectory()
self.cacheFile = os.path.join(self.workDirectory, 'ReplicaCache.pkl')
self.controlDirectory = self.am_getControlDirectory()
# remember the offset if any in TS
self.lastFileOffset = {}
# Validity of the cache
self.replicaCache = {}
self.replicaCacheValidity = self.am_getOption('ReplicaCacheValidity', 2)
self.noUnusedDelay = self.am_getOption('NoUnusedDelay', 6)
# Get it threaded
maxNumberOfThreads = self.am_getOption('maxThreadsInPool', 1)
threadPool = ThreadPool(maxNumberOfThreads, maxNumberOfThreads)
self.log.info("Multithreaded with %d threads" % maxNumberOfThreads)
for i in range(maxNumberOfThreads):
threadPool.generateJobAndQueueIt(self._execute, [i])
self.log.info("Will treat the following transformation types: %s" % str(self.transformationTypes))
return S_OK()
def finalize(self):
""" graceful finalization
"""
method = 'finalize'
if self.transInQueue:
self.transInQueue = []
self._logInfo("Wait for threads to get empty before terminating the agent (%d tasks)" %
len(self.transInThread), method=method)
self._logInfo('Remaining transformations:',
','.join(str(transID) for transID in self.transInThread), method=method)
while self.transInThread:
time.sleep(2)
self._logInfo("Threads are empty, terminating the agent...", method=method)
self.__writeCache()
return S_OK()
def execute(self):
""" Just puts transformations in the queue
"""
# Get the transformations to process
res = self.getTransformations()
if not res['OK']:
self._logError("Failed to obtain transformations:", res['Message'])
return S_OK()
# Process the transformations
count = 0
for transDict in res['Value']:
transID = int(transDict['TransformationID'])
if transDict.get('InheritedFrom'):
# Try and move datasets from the ancestor production
res = self.transfClient.moveFilesToDerivedTransformation(transDict)
if not res['OK']:
self._logError("Error moving files from an inherited transformation", res['Message'], transID=transID)
else:
parentProd, movedFiles = res['Value']
if movedFiles:
self._logInfo("Successfully moved files from %d to %d:" % (parentProd, transID), transID=transID)
for status, val in movedFiles.items():
self._logInfo("\t%d files to status %s" % (val, status), transID=transID)
if transID not in self.transInQueue:
count += 1
self.transInQueue.append(transID)
self.transQueue.put(transDict)
self._logInfo("Out of %d transformations, %d put in thread queue" % (len(res['Value']), count))
return S_OK()
def getTransformations(self):
""" Obtain the transformations to be executed - this is executed at the start of every loop (it's really the
only real thing in the execute()
"""
transName = self.am_getOption('Transformation', 'All')
method = 'getTransformations'
if transName == 'All':
self._logInfo("Getting all transformations%s, status %s." %
(' of type %s' % str(self.transformationTypes) if self.transformationTypes else '',
str(self.transformationStatus)),
method=method)
transfDict = {'Status': self.transformationStatus}
if self.transformationTypes:
transfDict['Type'] = self.transformationTypes
res = self.transfClient.getTransformations(transfDict, extraParams=True)
if not res['OK']:
return res
transformations = res['Value']
self._logInfo("Obtained %d transformations to process" % len(transformations), method=method)
else:
self._logInfo("Getting transformation %s." % transName, method=method)
res = self.transfClient.getTransformation(transName, extraParams=True)
if not res['OK']:
self._logError("Failed to get transformation:", res['Message'], method=method)
return res
transformations = [res['Value']]
return S_OK(transformations)
def _getClients(self):
""" returns the clients used in the threads
"""
threadTransformationClient = TransformationClient()
threadDataManager = DataManager()
return {'TransformationClient': threadTransformationClient,
'DataManager': threadDataManager}
def _execute(self, threadID):
""" thread - does the real job: processing the transformations to be processed
"""
# Each thread will have its own clients
clients = self._getClients()
while True:
transDict = self.transQueue.get()
try:
transID = int(transDict['TransformationID'])
if transID not in self.transInQueue:
break
self.transInThread[transID] = ' [Thread%d] [%s] ' % (threadID, str(transID))
self._logInfo("Processing transformation %s." % transID, transID=transID)
startTime = time.time()
res = self.processTransformation(transDict, clients)
if not res['OK']:
self._logInfo("Failed to process transformation:", res['Message'], transID=transID)
except Exception as x: # pylint: disable=broad-except
self._logException('Exception in plugin', lException=x, transID=transID)
finally:
if not transID:
transID = 'None'
self._logInfo("Processed transformation in %.1f seconds" % (time.time() - startTime), transID=transID)
if transID in self.transInQueue:
self.transInQueue.remove(transID)
self.transInThread.pop(transID, None)
self._logVerbose("%d transformations still in queue" % len(self.transInQueue))
return S_OK()
def processTransformation(self, transDict, clients):
""" process a single transformation (in transDict)
"""
method = 'processTransformation'
transID = transDict['TransformationID']
forJobs = transDict['Type'].lower() not in ('replication', 'removal')
# First get the LFNs associated to the transformation
transFiles = self._getTransformationFiles(transDict, clients, replicateOrRemove=not forJobs)
if not transFiles['OK']:
return transFiles
if not transFiles['Value']:
return S_OK()
if transID not in self.replicaCache:
self.__readCache(transID)
transFiles = transFiles['Value']
unusedLfns = [f['LFN'] for f in transFiles]
unusedFiles = len(unusedLfns)
plugin = transDict.get('Plugin', 'Standard')
# Limit the number of LFNs to be considered for replication or removal as they are treated individually
if not forJobs:
maxFiles = Operations().getValue('TransformationPlugins/%s/MaxFilesToProcess' % plugin, 0)
# Get plugin-specific limit in number of files (0 means no limit)
totLfns = len(unusedLfns)
lfnsToProcess = self.__applyReduction(unusedLfns, maxFiles=maxFiles)
if len(lfnsToProcess) != totLfns:
self._logInfo("Reduced number of files from %d to %d" % (totLfns, len(lfnsToProcess)),
method=method, transID=transID)
transFiles = [f for f in transFiles if f['LFN'] in lfnsToProcess]
else:
lfnsToProcess = unusedLfns
# Check the data is available with replicas
res = self.__getDataReplicas(transDict, lfnsToProcess, clients, forJobs=forJobs)
if not res['OK']:
self._logError("Failed to get data replicas:", res['Message'],
method=method, transID=transID)
return res
dataReplicas = res['Value']
# Get the plug-in type and create the plug-in object
self._logInfo("Processing transformation with '%s' plug-in." % plugin,
method=method, transID=transID)
res = self.__generatePluginObject(plugin, clients)
if not res['OK']:
return res
oPlugin = res['Value']
# Get the plug-in and set the required params
oPlugin.setParameters(transDict)
oPlugin.setInputData(dataReplicas)
oPlugin.setTransformationFiles(transFiles)
res = oPlugin.run()
if not res['OK']:
self._logError("Failed to generate tasks for transformation:", res['Message'],
method=method, transID=transID)
return res
tasks = res['Value']
self.pluginTimeout[transID] = res.get('Timeout', False)
# Create the tasks
allCreated = True
created = 0
lfnsInTasks = []
for se, lfns in tasks:
res = clients['TransformationClient'].addTaskForTransformation(transID, lfns, se)
if not res['OK']:
self._logError("Failed to add task generated by plug-in:", res['Message'],
method=method, transID=transID)
allCreated = False
else:
created += 1
lfnsInTasks += [lfn for lfn in lfns if lfn in lfnsToProcess]
if created:
self._logInfo("Successfully created %d tasks for transformation." % created,
method=method, transID=transID)
else:
self._logInfo("No new tasks created for transformation.",
method=method, transID=transID)
self.unusedFiles[transID] = unusedFiles - len(lfnsInTasks)
# If not all files were obtained, move the offset
lastOffset = self.lastFileOffset.get(transID)
if lastOffset:
self.lastFileOffset[transID] = max(0, lastOffset - len(lfnsInTasks))
self.__removeFilesFromCache(transID, lfnsInTasks)
# If this production is to Flush
if transDict['Status'] == 'Flush' and allCreated:
res = clients['TransformationClient'].setTransformationParameter(transID, 'Status', 'Active')
if not res['OK']:
self._logError("Failed to update transformation status to 'Active':", res['Message'],
method=method, transID=transID)
else:
self._logInfo("Updated transformation status to 'Active'.",
method=method, transID=transID)
return S_OK()
######################################################################
#
# Internal methods used by the agent
#
def _getTransformationFiles(self, transDict, clients, statusList=None, replicateOrRemove=False):
""" get the data replicas for a certain transID
"""
# By default, don't skip if no new Unused for DM transformations
skipIfNoNewUnused = not replicateOrRemove
transID = transDict['TransformationID']
plugin = transDict.get('Plugin', 'Standard')
# Check if files should be sorted and limited in number
operations = Operations()
sortedBy = operations.getValue('TransformationPlugins/%s/SortedBy' % plugin, None)
maxFiles = operations.getValue('TransformationPlugins/%s/MaxFilesToProcess' % plugin, 0)
# If the NoUnuse delay is explicitly set, we want to take it into account, and skip if no new Unused
if operations.getValue('TransformationPlugins/%s/NoUnusedDelay' % plugin, 0):
skipIfNoNewUnused = True
noUnusedDelay = 0 if self.pluginTimeout.get(transID, False) else \
operations.getValue('TransformationPlugins/%s/NoUnusedDelay' % plugin, self.noUnusedDelay)
method = '_getTransformationFiles'
lastOffset = self.lastFileOffset.setdefault(transID, 0)
# Files that were problematic (either explicit or because SE was banned) may be recovered,
# and always removing the missing ones
if not statusList:
statusList = ['Unused', 'ProbInFC']
statusList += ['MissingInFC'] if transDict['Type'] == 'Removal' else []
transClient = clients['TransformationClient']
res = transClient.getTransformationFiles(condDict={'TransformationID': transID,
'Status': statusList},
orderAttribute=sortedBy,
offset=lastOffset, maxfiles=maxFiles)
if not res['OK']:
self._logError("Failed to obtain input data:", res['Message'],
method=method, transID=transID)
return res
transFiles = res['Value']
if maxFiles and len(transFiles) == maxFiles:
self.lastFileOffset[transID] += maxFiles
else:
del self.lastFileOffset[transID]
if not transFiles:
self._logInfo("No '%s' files found for transformation." % ','.join(statusList),
method=method, transID=transID)
if transDict['Status'] == 'Flush':
res = transClient.setTransformationParameter(transID, 'Status', 'Active')
if not res['OK']:
self._logError("Failed to update transformation status to 'Active':", res['Message'],
method=method, transID=transID)
else:
self._logInfo("Updated transformation status to 'Active'.",
method=method, transID=transID)
return S_OK()
# Check if transformation is kicked
kickFile = os.path.join(self.controlDirectory, 'KickTransformation_%s' % str(transID))
try:
kickTrans = os.path.exists(kickFile)
if kickTrans:
os.remove(kickFile)
except OSError:
pass
# Check if something new happened
now = datetime.datetime.utcnow()
if not kickTrans and skipIfNoNewUnused and noUnusedDelay:
nextStamp = self.unusedTimeStamp.setdefault(transID, now) + datetime.timedelta(hours=noUnusedDelay)
skip = now < nextStamp
if len(transFiles) == self.unusedFiles.get(transID, 0) and transDict['Status'] != 'Flush' and skip:
self._logInfo("No new '%s' files found for transformation." % ','.join(statusList),
method=method, transID=transID)
return S_OK()
self.unusedTimeStamp[transID] = now
# If files are not Unused, set them Unused
notUnused = [trFile['LFN'] for trFile in transFiles if trFile['Status'] != 'Unused']
otherStatuses = sorted(set([trFile['Status'] for trFile in transFiles]) - set(['Unused']))
if notUnused:
res = transClient.setFileStatusForTransformation(transID, 'Unused', notUnused, force=True)
if not res['OK']:
self._logError("Error setting %d files Unused:" % len(notUnused), res['Message'],
method=method, transID=transID)
else:
self._logInfo("Set %d files from %s to Unused" % (len(notUnused), ','.join(otherStatuses)))
self.__removeFilesFromCache(transID, notUnused)
return S_OK(transFiles)
def __applyReduction(self, lfns, maxFiles=None):
""" eventually remove the number of files to be considered
"""
if maxFiles is None:
maxFiles = self.maxFiles
if not maxFiles or len(lfns) <= maxFiles:
return lfns
return randomize(lfns)[:maxFiles]
def __getDataReplicas(self, transDict, lfns, clients, forJobs=True):
""" Get the replicas for the LFNs and check their statuses. It first looks within the cache.
"""
method = '__getDataReplicas'
transID = transDict['TransformationID']
if 'RemoveFile' in transDict['Body']:
# When removing files, we don't care about their replicas
return S_OK(dict.fromkeys(lfns, ['None']))
clearCacheFile = os.path.join(self.controlDirectory, 'ClearCache_%s' % str(transID))
try:
clearCache = os.path.exists(clearCacheFile)
if clearCache:
os.remove(clearCacheFile)
except Exception:
pass
if clearCache or transDict['Status'] == 'Flush':
self._logInfo("Replica cache cleared", method=method, transID=transID)
# We may need to get new replicas
self.__clearCacheForTrans(transID)
else:
# If the cache needs to be cleaned
self.__cleanCache(transID)
startTime = time.time()
dataReplicas = {}
nLfns = len(lfns)
self._logVerbose("Getting replicas for %d files" % nLfns, method=method, transID=transID)
cachedReplicaSets = self.replicaCache.get(transID, {})
cachedReplicas = {}
# Merge all sets of replicas
for replicas in cachedReplicaSets.values():
cachedReplicas.update(replicas)
self._logInfo("Number of cached replicas: %d" % len(cachedReplicas), method=method, transID=transID)
setCached = set(cachedReplicas)
setLfns = set(lfns)
for lfn in setLfns & setCached:
dataReplicas[lfn] = cachedReplicas[lfn]
newLFNs = setLfns - setCached
self._logInfo("ReplicaCache hit for %d out of %d LFNs" % (len(dataReplicas), nLfns),
method=method, transID=transID)
if newLFNs:
startTime = time.time()
self._logInfo("Getting replicas for %d files from catalog" % len(newLFNs),
method=method, transID=transID)
newReplicas = {}
for chunk in breakListIntoChunks(newLFNs, 10000):
res = self._getDataReplicasDM(transID, chunk, clients, forJobs=forJobs)
if res['OK']:
reps = dict((lfn, ses) for lfn, ses in res['Value'].items() if ses)
newReplicas.update(reps)
self.__updateCache(transID, reps)
else:
self._logWarn("Failed to get replicas for %d files" % len(chunk), res['Message'],
method=method, transID=transID)
self._logInfo("Obtained %d replicas from catalog in %.1f seconds"
% (len(newReplicas), time.time() - startTime),
method=method, transID=transID)
dataReplicas.update(newReplicas)
noReplicas = newLFNs - set(dataReplicas)
self.__writeCache(transID)
if noReplicas:
self._logWarn("Found %d files without replicas (or only in Failover)" % len(noReplicas),
method=method, transID=transID)
return S_OK(dataReplicas)
def _getDataReplicasDM(self, transID, lfns, clients, forJobs=True, ignoreMissing=False):
""" Get the replicas for the LFNs and check their statuses, using the replica manager
"""
method = '_getDataReplicasDM'
startTime = time.time()
self._logVerbose("Getting replicas%s from catalog for %d files" % (' for jobs' if forJobs else '', len(lfns)),
method=method, transID=transID)
if forJobs:
# Get only replicas eligible for jobs
res = clients['DataManager'].getReplicasForJobs(lfns, getUrl=False)
else:
# Get all replicas
res = clients['DataManager'].getReplicas(lfns, getUrl=False)
if not res['OK']:
return res
replicas = res['Value']
# Prepare a dictionary for all LFNs
dataReplicas = {}
self._logVerbose("Replica results for %d files obtained in %.2f seconds" %
(len(lfns), time.time() - startTime),
method=method, transID=transID)
# If files are neither Successful nor Failed, they are set problematic in the FC
problematicLfns = [lfn for lfn in lfns if lfn not in replicas['Successful'] and lfn not in replicas['Failed']]
if problematicLfns:
self._logInfo("%d files found problematic in the catalog, set ProbInFC" % len(problematicLfns))
res = clients['TransformationClient'].setFileStatusForTransformation(transID, 'ProbInFC', problematicLfns)
if not res['OK']:
self._logError("Failed to update status of problematic files:", res['Message'],
method=method, transID=transID)
# Create a dictionary containing all the file replicas
failoverLfns = []
for lfn, replicaDict in replicas['Successful'].items():
for se in replicaDict:
# This remains here for backward compatibility in case VOs have not defined SEs not to be used for jobs
if forJobs and 'failover' in se.lower():
self._logVerbose("Ignoring failover replica for %s." % lfn, method=method, transID=transID)
else:
dataReplicas.setdefault(lfn, []).append(se)
if not dataReplicas.get(lfn):
failoverLfns.append(lfn)
if failoverLfns:
self._logVerbose("%d files have no replica but possibly in Failover SE" % len(failoverLfns))
# Make sure that file missing from the catalog are marked in the transformation DB.
missingLfns = []
for lfn, reason in replicas['Failed'].items():
if "No such file or directory" in reason:
self._logVerbose("%s not found in the catalog." % lfn, method=method, transID=transID)
missingLfns.append(lfn)
if missingLfns:
self._logInfo("%d files not found in the catalog" % len(missingLfns))
if ignoreMissing:
dataReplicas.update(dict.fromkeys(missingLfns, []))
else:
res = clients['TransformationClient'].setFileStatusForTransformation(transID, 'MissingInFC', missingLfns)
if not res['OK']:
self._logError("Failed to update status of missing files:", res['Message'],
method=method, transID=transID)
return S_OK(dataReplicas)
def __updateCache(self, transID, newReplicas):
""" Add replicas to the cache
"""
self.replicaCache.setdefault(transID, {})[datetime.datetime.utcnow()] = newReplicas
# if len( newReplicas ) > 5000:
# self.__writeCache( transID )
def __clearCacheForTrans(self, transID):
""" Remove all replicas for a transformation
"""
self.replicaCache.pop(transID, None)
def __cleanReplicas(self, transID, lfns):
""" Remove cached replicas that are not in a list
"""
cachedReplicas = set()
for replicas in self.replicaCache.get(transID, {}).values():
cachedReplicas.update(replicas)
toRemove = cachedReplicas - set(lfns)
if toRemove:
self._logInfo("Remove %d files from cache" % len(toRemove), method='__cleanReplicas', transID=transID)
self.__removeFromCache(transID, toRemove)
def __cleanCache(self, transID):
""" Cleans the cache
"""
try:
if transID in self.replicaCache:
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=self.replicaCacheValidity)
for updateTime in set(self.replicaCache[transID]):
nCache = len(self.replicaCache[transID][updateTime])
if updateTime < timeLimit or not nCache:
self._logInfo("Clear %s replicas for transformation %s, time %s" %
('%d cached' % nCache if nCache else 'empty cache', str(transID), str(updateTime)),
transID=transID, method='__cleanCache')
del self.replicaCache[transID][updateTime]
# Remove empty transformations
if not self.replicaCache[transID]:
del self.replicaCache[transID]
except Exception as x:
self._logException("Exception when cleaning replica cache:", lException=x)
def __removeFilesFromCache(self, transID, lfns):
removed = self.__removeFromCache(transID, lfns)
if removed:
self._logInfo("Removed %d replicas from cache" % removed, method='__removeFilesFromCache', transID=transID)
self.__writeCache(transID)
def __removeFromCache(self, transID, lfns):
if transID not in self.replicaCache:
return
removed = 0
if self.replicaCache[transID] and lfns:
for lfn in lfns:
for timeKey in self.replicaCache[transID]:
if self.replicaCache[transID][timeKey].pop(lfn, None):
removed += 1
return removed
def __cacheFile(self, transID):
return self.cacheFile.replace('.pkl', '_%s.pkl' % str(transID))
@gSynchro
def __readCache(self, transID):
""" Reads from the cache
"""
if transID in self.replicaCache:
return
try:
method = '__readCache'
fileName = self.__cacheFile(transID)
if not os.path.exists(fileName):
self.replicaCache[transID] = {}
else:
with open(fileName, 'r') as cacheFile:
self.replicaCache[transID] = pickle.load(cacheFile)
self._logInfo("Successfully loaded replica cache from file %s (%d files)" %
(fileName, self.__filesInCache(transID)),
method=method, transID=transID)
except Exception as x:
self._logException("Failed to load replica cache from file %s" % fileName, lException=x,
method=method, transID=transID)
self.replicaCache[transID] = {}
def __filesInCache(self, transID):
cache = self.replicaCache.get(transID, {})
return sum(len(lfns) for lfns in cache.values())
@gSynchro
def __writeCache(self, transID=None):
""" Writes the cache
"""
method = '__writeCache'
try:
startTime = time.time()
transList = [transID] if transID else set(self.replicaCache)
filesInCache = 0
nCache = 0
for t_id in transList:
# Protect the copy of the cache
filesInCache += self.__filesInCache(t_id)
# write to a temporary file in order to avoid corrupted files
cacheFile = self.__cacheFile(t_id)
tmpFile = cacheFile + '.tmp'
with open(tmpFile, 'w') as fd:
pickle.dump(self.replicaCache.get(t_id, {}), fd)
# Now rename the file as it shold
os.rename(tmpFile, cacheFile)
nCache += 1
self._logInfo("Successfully wrote %d replica cache file(s) (%d files) in %.1f seconds"
% (nCache, filesInCache, time.time() - startTime),
method=method, transID=transID if transID else None)
except Exception as x:
self._logException("Could not write replica cache file %s" % cacheFile, lException=x,
method=method, transID=t_id)
def __generatePluginObject(self, plugin, clients):
""" This simply instantiates the TransformationPlugin class with the relevant plugin name
"""
try:
plugModule = __import__(self.pluginLocation, globals(), locals(), ['TransformationPlugin'])
except ImportError as e:
self._logException("Failed to import 'TransformationPlugin' %s" % plugin, lException=e,
method="__generatePluginObject")
return S_ERROR()
try:
plugin_o = getattr(plugModule, 'TransformationPlugin')('%s' % plugin,
transClient=clients['TransformationClient'],
dataManager=clients['DataManager'])
return S_OK(plugin_o)
except AttributeError as e:
self._logException("Failed to create %s()" % plugin, lException=e, method="__generatePluginObject")
return S_ERROR()
plugin_o.setDirectory(self.workDirectory)
plugin_o.setCallback(self.pluginCallback)
def pluginCallback(self, transID, invalidateCache=False):
""" Standard plugin callback
"""
if invalidateCache:
try:
if transID in self.replicaCache:
self._logInfo("Removed cached replicas for transformation", method='pluginCallBack', transID=transID)
self.replicaCache.pop(transID)
self.__writeCache(transID)
except Exception:
pass
|
gpl-3.0
|
NeCTAR-RC/nova
|
nova/cells/weights/ram_by_instance_type.py
|
22
|
1674
|
# Copyright (c) 2012-2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Weigh cells by memory needed in a way that spreads instances.
"""
from nova.cells import weights
import nova.conf
CONF = nova.conf.CONF
class RamByInstanceTypeWeigher(weights.BaseCellWeigher):
"""Weigh cells by instance_type requested."""
def weight_multiplier(self):
return CONF.cells.ram_weight_multiplier
def _weigh_object(self, cell, weight_properties):
"""Use the 'ram_free' for a particular instance_type advertised from a
child cell's capacity to compute a weight. We want to direct the
build to a cell with a higher capacity. Since higher weights win,
we just return the number of units available for the instance_type.
"""
request_spec = weight_properties['request_spec']
instance_type = request_spec['instance_type']
memory_needed = instance_type['memory_mb']
ram_free = cell.capacities.get('ram_free', {})
units_by_mb = ram_free.get('units_by_mb', {})
return units_by_mb.get(str(memory_needed), 0)
|
apache-2.0
|
svanschalkwyk/datafari
|
windows/python/Lib/curses/__init__.py
|
108
|
1817
|
"""curses
The main package for curses support for Python. Normally used by importing
the package, and perhaps a particular module inside it.
import curses
from curses import textpad
curses.initscr()
...
"""
__revision__ = "$Id$"
from _curses import *
from curses.wrapper import wrapper
import os as _os
import sys as _sys
# Some constants, most notably the ACS_* ones, are only added to the C
# _curses module's dictionary after initscr() is called. (Some
# versions of SGI's curses don't define values for those constants
# until initscr() has been called.) This wrapper function calls the
# underlying C initscr(), and then copies the constants from the
# _curses module to the curses package's dictionary. Don't do 'from
# curses import *' if you'll be needing the ACS_* constants.
def initscr():
import _curses, curses
# we call setupterm() here because it raises an error
# instead of calling exit() in error cases.
setupterm(term=_os.environ.get("TERM", "unknown"),
fd=_sys.__stdout__.fileno())
stdscr = _curses.initscr()
for key, value in _curses.__dict__.items():
if key[0:4] == 'ACS_' or key in ('LINES', 'COLS'):
setattr(curses, key, value)
return stdscr
# This is a similar wrapper for start_color(), which adds the COLORS and
# COLOR_PAIRS variables which are only available after start_color() is
# called.
def start_color():
import _curses, curses
retval = _curses.start_color()
if hasattr(_curses, 'COLORS'):
curses.COLORS = _curses.COLORS
if hasattr(_curses, 'COLOR_PAIRS'):
curses.COLOR_PAIRS = _curses.COLOR_PAIRS
return retval
# Import Python has_key() implementation if _curses doesn't contain has_key()
try:
has_key
except NameError:
from has_key import has_key
|
apache-2.0
|
partofthething/home-assistant
|
homeassistant/helpers/device_registry.py
|
1
|
29903
|
"""Provide a way to connect entities belonging to one device."""
from collections import OrderedDict
import logging
import time
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple, Union, cast
import attr
from homeassistant.const import EVENT_HOMEASSISTANT_STARTED
from homeassistant.core import Event, callback
from homeassistant.loader import bind_hass
import homeassistant.util.uuid as uuid_util
from .debounce import Debouncer
from .typing import UNDEFINED, HomeAssistantType, UndefinedType
# mypy: disallow_any_generics
if TYPE_CHECKING:
from homeassistant.config_entries import ConfigEntry
from . import entity_registry
_LOGGER = logging.getLogger(__name__)
DATA_REGISTRY = "device_registry"
EVENT_DEVICE_REGISTRY_UPDATED = "device_registry_updated"
STORAGE_KEY = "core.device_registry"
STORAGE_VERSION = 1
SAVE_DELAY = 10
CLEANUP_DELAY = 10
CONNECTION_NETWORK_MAC = "mac"
CONNECTION_UPNP = "upnp"
CONNECTION_ZIGBEE = "zigbee"
IDX_CONNECTIONS = "connections"
IDX_IDENTIFIERS = "identifiers"
REGISTERED_DEVICE = "registered"
DELETED_DEVICE = "deleted"
DISABLED_CONFIG_ENTRY = "config_entry"
DISABLED_INTEGRATION = "integration"
DISABLED_USER = "user"
ORPHANED_DEVICE_KEEP_SECONDS = 86400 * 30
@attr.s(slots=True, frozen=True)
class DeviceEntry:
"""Device Registry Entry."""
config_entries: Set[str] = attr.ib(converter=set, factory=set)
connections: Set[Tuple[str, str]] = attr.ib(converter=set, factory=set)
identifiers: Set[Tuple[str, str]] = attr.ib(converter=set, factory=set)
manufacturer: Optional[str] = attr.ib(default=None)
model: Optional[str] = attr.ib(default=None)
name: Optional[str] = attr.ib(default=None)
sw_version: Optional[str] = attr.ib(default=None)
via_device_id: Optional[str] = attr.ib(default=None)
area_id: Optional[str] = attr.ib(default=None)
name_by_user: Optional[str] = attr.ib(default=None)
entry_type: Optional[str] = attr.ib(default=None)
id: str = attr.ib(factory=uuid_util.random_uuid_hex)
# This value is not stored, just used to keep track of events to fire.
is_new: bool = attr.ib(default=False)
disabled_by: Optional[str] = attr.ib(
default=None,
validator=attr.validators.in_(
(
DISABLED_CONFIG_ENTRY,
DISABLED_INTEGRATION,
DISABLED_USER,
None,
)
),
)
suggested_area: Optional[str] = attr.ib(default=None)
@property
def disabled(self) -> bool:
"""Return if entry is disabled."""
return self.disabled_by is not None
@attr.s(slots=True, frozen=True)
class DeletedDeviceEntry:
"""Deleted Device Registry Entry."""
config_entries: Set[str] = attr.ib()
connections: Set[Tuple[str, str]] = attr.ib()
identifiers: Set[Tuple[str, str]] = attr.ib()
id: str = attr.ib()
orphaned_timestamp: Optional[float] = attr.ib()
def to_device_entry(
self,
config_entry_id: str,
connections: Set[Tuple[str, str]],
identifiers: Set[Tuple[str, str]],
) -> DeviceEntry:
"""Create DeviceEntry from DeletedDeviceEntry."""
return DeviceEntry(
# type ignores: likely https://github.com/python/mypy/issues/8625
config_entries={config_entry_id}, # type: ignore[arg-type]
connections=self.connections & connections, # type: ignore[arg-type]
identifiers=self.identifiers & identifiers, # type: ignore[arg-type]
id=self.id,
is_new=True,
)
def format_mac(mac: str) -> str:
"""Format the mac address string for entry into dev reg."""
to_test = mac
if len(to_test) == 17 and to_test.count(":") == 5:
return to_test.lower()
if len(to_test) == 17 and to_test.count("-") == 5:
to_test = to_test.replace("-", "")
elif len(to_test) == 14 and to_test.count(".") == 2:
to_test = to_test.replace(".", "")
if len(to_test) == 12:
# no : included
return ":".join(to_test.lower()[i : i + 2] for i in range(0, 12, 2))
# Not sure how formatted, return original
return mac
class DeviceRegistry:
"""Class to hold a registry of devices."""
devices: Dict[str, DeviceEntry]
deleted_devices: Dict[str, DeletedDeviceEntry]
_devices_index: Dict[str, Dict[str, Dict[Tuple[str, str], str]]]
def __init__(self, hass: HomeAssistantType) -> None:
"""Initialize the device registry."""
self.hass = hass
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
self._clear_index()
@callback
def async_get(self, device_id: str) -> Optional[DeviceEntry]:
"""Get device."""
return self.devices.get(device_id)
@callback
def async_get_device(
self,
identifiers: Set[Tuple[str, str]],
connections: Optional[Set[Tuple[str, str]]] = None,
) -> Optional[DeviceEntry]:
"""Check if device is registered."""
device_id = self._async_get_device_id_from_index(
REGISTERED_DEVICE, identifiers, connections
)
if device_id is None:
return None
return self.devices[device_id]
def _async_get_deleted_device(
self,
identifiers: Set[Tuple[str, str]],
connections: Optional[Set[Tuple[str, str]]],
) -> Optional[DeletedDeviceEntry]:
"""Check if device is deleted."""
device_id = self._async_get_device_id_from_index(
DELETED_DEVICE, identifiers, connections
)
if device_id is None:
return None
return self.deleted_devices[device_id]
def _async_get_device_id_from_index(
self,
index: str,
identifiers: Set[Tuple[str, str]],
connections: Optional[Set[Tuple[str, str]]],
) -> Optional[str]:
"""Check if device has previously been registered."""
devices_index = self._devices_index[index]
for identifier in identifiers:
if identifier in devices_index[IDX_IDENTIFIERS]:
return devices_index[IDX_IDENTIFIERS][identifier]
if not connections:
return None
for connection in _normalize_connections(connections):
if connection in devices_index[IDX_CONNECTIONS]:
return devices_index[IDX_CONNECTIONS][connection]
return None
def _add_device(self, device: Union[DeviceEntry, DeletedDeviceEntry]) -> None:
"""Add a device and index it."""
if isinstance(device, DeletedDeviceEntry):
devices_index = self._devices_index[DELETED_DEVICE]
self.deleted_devices[device.id] = device
else:
devices_index = self._devices_index[REGISTERED_DEVICE]
self.devices[device.id] = device
_add_device_to_index(devices_index, device)
def _remove_device(self, device: Union[DeviceEntry, DeletedDeviceEntry]) -> None:
"""Remove a device and remove it from the index."""
if isinstance(device, DeletedDeviceEntry):
devices_index = self._devices_index[DELETED_DEVICE]
self.deleted_devices.pop(device.id)
else:
devices_index = self._devices_index[REGISTERED_DEVICE]
self.devices.pop(device.id)
_remove_device_from_index(devices_index, device)
def _update_device(self, old_device: DeviceEntry, new_device: DeviceEntry) -> None:
"""Update a device and the index."""
self.devices[new_device.id] = new_device
devices_index = self._devices_index[REGISTERED_DEVICE]
_remove_device_from_index(devices_index, old_device)
_add_device_to_index(devices_index, new_device)
def _clear_index(self) -> None:
"""Clear the index."""
self._devices_index = {
REGISTERED_DEVICE: {IDX_IDENTIFIERS: {}, IDX_CONNECTIONS: {}},
DELETED_DEVICE: {IDX_IDENTIFIERS: {}, IDX_CONNECTIONS: {}},
}
def _rebuild_index(self) -> None:
"""Create the index after loading devices."""
self._clear_index()
for device in self.devices.values():
_add_device_to_index(self._devices_index[REGISTERED_DEVICE], device)
for deleted_device in self.deleted_devices.values():
_add_device_to_index(self._devices_index[DELETED_DEVICE], deleted_device)
@callback
def async_get_or_create(
self,
*,
config_entry_id: str,
connections: Optional[Set[Tuple[str, str]]] = None,
identifiers: Optional[Set[Tuple[str, str]]] = None,
manufacturer: Union[str, None, UndefinedType] = UNDEFINED,
model: Union[str, None, UndefinedType] = UNDEFINED,
name: Union[str, None, UndefinedType] = UNDEFINED,
default_manufacturer: Union[str, None, UndefinedType] = UNDEFINED,
default_model: Union[str, None, UndefinedType] = UNDEFINED,
default_name: Union[str, None, UndefinedType] = UNDEFINED,
sw_version: Union[str, None, UndefinedType] = UNDEFINED,
entry_type: Union[str, None, UndefinedType] = UNDEFINED,
via_device: Optional[Tuple[str, str]] = None,
# To disable a device if it gets created
disabled_by: Union[str, None, UndefinedType] = UNDEFINED,
suggested_area: Union[str, None, UndefinedType] = UNDEFINED,
) -> Optional[DeviceEntry]:
"""Get device. Create if it doesn't exist."""
if not identifiers and not connections:
return None
if identifiers is None:
identifiers = set()
if connections is None:
connections = set()
else:
connections = _normalize_connections(connections)
device = self.async_get_device(identifiers, connections)
if device is None:
deleted_device = self._async_get_deleted_device(identifiers, connections)
if deleted_device is None:
device = DeviceEntry(is_new=True)
else:
self._remove_device(deleted_device)
device = deleted_device.to_device_entry(
config_entry_id, connections, identifiers
)
self._add_device(device)
if default_manufacturer is not UNDEFINED and device.manufacturer is None:
manufacturer = default_manufacturer
if default_model is not UNDEFINED and device.model is None:
model = default_model
if default_name is not UNDEFINED and device.name is None:
name = default_name
if via_device is not None:
via = self.async_get_device({via_device})
via_device_id: Union[str, UndefinedType] = via.id if via else UNDEFINED
else:
via_device_id = UNDEFINED
return self._async_update_device(
device.id,
add_config_entry_id=config_entry_id,
via_device_id=via_device_id,
merge_connections=connections or UNDEFINED,
merge_identifiers=identifiers or UNDEFINED,
manufacturer=manufacturer,
model=model,
name=name,
sw_version=sw_version,
entry_type=entry_type,
disabled_by=disabled_by,
suggested_area=suggested_area,
)
@callback
def async_update_device(
self,
device_id: str,
*,
area_id: Union[str, None, UndefinedType] = UNDEFINED,
manufacturer: Union[str, None, UndefinedType] = UNDEFINED,
model: Union[str, None, UndefinedType] = UNDEFINED,
name: Union[str, None, UndefinedType] = UNDEFINED,
name_by_user: Union[str, None, UndefinedType] = UNDEFINED,
new_identifiers: Union[Set[Tuple[str, str]], UndefinedType] = UNDEFINED,
sw_version: Union[str, None, UndefinedType] = UNDEFINED,
via_device_id: Union[str, None, UndefinedType] = UNDEFINED,
remove_config_entry_id: Union[str, UndefinedType] = UNDEFINED,
disabled_by: Union[str, None, UndefinedType] = UNDEFINED,
suggested_area: Union[str, None, UndefinedType] = UNDEFINED,
) -> Optional[DeviceEntry]:
"""Update properties of a device."""
return self._async_update_device(
device_id,
area_id=area_id,
manufacturer=manufacturer,
model=model,
name=name,
name_by_user=name_by_user,
new_identifiers=new_identifiers,
sw_version=sw_version,
via_device_id=via_device_id,
remove_config_entry_id=remove_config_entry_id,
disabled_by=disabled_by,
suggested_area=suggested_area,
)
@callback
def _async_update_device(
self,
device_id: str,
*,
add_config_entry_id: Union[str, UndefinedType] = UNDEFINED,
remove_config_entry_id: Union[str, UndefinedType] = UNDEFINED,
merge_connections: Union[Set[Tuple[str, str]], UndefinedType] = UNDEFINED,
merge_identifiers: Union[Set[Tuple[str, str]], UndefinedType] = UNDEFINED,
new_identifiers: Union[Set[Tuple[str, str]], UndefinedType] = UNDEFINED,
manufacturer: Union[str, None, UndefinedType] = UNDEFINED,
model: Union[str, None, UndefinedType] = UNDEFINED,
name: Union[str, None, UndefinedType] = UNDEFINED,
sw_version: Union[str, None, UndefinedType] = UNDEFINED,
entry_type: Union[str, None, UndefinedType] = UNDEFINED,
via_device_id: Union[str, None, UndefinedType] = UNDEFINED,
area_id: Union[str, None, UndefinedType] = UNDEFINED,
name_by_user: Union[str, None, UndefinedType] = UNDEFINED,
disabled_by: Union[str, None, UndefinedType] = UNDEFINED,
suggested_area: Union[str, None, UndefinedType] = UNDEFINED,
) -> Optional[DeviceEntry]:
"""Update device attributes."""
old = self.devices[device_id]
changes: Dict[str, Any] = {}
config_entries = old.config_entries
if (
suggested_area not in (UNDEFINED, None, "")
and area_id is UNDEFINED
and old.area_id is None
):
area = self.hass.helpers.area_registry.async_get(
self.hass
).async_get_or_create(suggested_area)
area_id = area.id
if (
add_config_entry_id is not UNDEFINED
and add_config_entry_id not in old.config_entries
):
config_entries = old.config_entries | {add_config_entry_id}
if (
remove_config_entry_id is not UNDEFINED
and remove_config_entry_id in config_entries
):
if config_entries == {remove_config_entry_id}:
self.async_remove_device(device_id)
return None
config_entries = config_entries - {remove_config_entry_id}
if config_entries != old.config_entries:
changes["config_entries"] = config_entries
for attr_name, setvalue in (
("connections", merge_connections),
("identifiers", merge_identifiers),
):
old_value = getattr(old, attr_name)
# If not undefined, check if `value` contains new items.
if setvalue is not UNDEFINED and not setvalue.issubset(old_value):
changes[attr_name] = old_value | setvalue
if new_identifiers is not UNDEFINED:
changes["identifiers"] = new_identifiers
for attr_name, value in (
("manufacturer", manufacturer),
("model", model),
("name", name),
("sw_version", sw_version),
("entry_type", entry_type),
("via_device_id", via_device_id),
("disabled_by", disabled_by),
("suggested_area", suggested_area),
):
if value is not UNDEFINED and value != getattr(old, attr_name):
changes[attr_name] = value
if area_id is not UNDEFINED and area_id != old.area_id:
changes["area_id"] = area_id
if name_by_user is not UNDEFINED and name_by_user != old.name_by_user:
changes["name_by_user"] = name_by_user
if old.is_new:
changes["is_new"] = False
if not changes:
return old
new = attr.evolve(old, **changes)
self._update_device(old, new)
self.async_schedule_save()
self.hass.bus.async_fire(
EVENT_DEVICE_REGISTRY_UPDATED,
{
"action": "create" if "is_new" in changes else "update",
"device_id": new.id,
},
)
return new
@callback
def async_remove_device(self, device_id: str) -> None:
"""Remove a device from the device registry."""
device = self.devices[device_id]
self._remove_device(device)
self._add_device(
DeletedDeviceEntry(
config_entries=device.config_entries,
connections=device.connections,
identifiers=device.identifiers,
id=device.id,
orphaned_timestamp=None,
)
)
self.hass.bus.async_fire(
EVENT_DEVICE_REGISTRY_UPDATED, {"action": "remove", "device_id": device_id}
)
self.async_schedule_save()
async def async_load(self) -> None:
"""Load the device registry."""
async_setup_cleanup(self.hass, self)
data = await self._store.async_load()
devices = OrderedDict()
deleted_devices = OrderedDict()
if data is not None:
for device in data["devices"]:
devices[device["id"]] = DeviceEntry(
config_entries=set(device["config_entries"]),
# type ignores (if tuple arg was cast): likely https://github.com/python/mypy/issues/8625
connections={tuple(conn) for conn in device["connections"]}, # type: ignore[misc]
identifiers={tuple(iden) for iden in device["identifiers"]}, # type: ignore[misc]
manufacturer=device["manufacturer"],
model=device["model"],
name=device["name"],
sw_version=device["sw_version"],
# Introduced in 0.110
entry_type=device.get("entry_type"),
id=device["id"],
# Introduced in 0.79
# renamed in 0.95
via_device_id=(
device.get("via_device_id") or device.get("hub_device_id")
),
# Introduced in 0.87
area_id=device.get("area_id"),
name_by_user=device.get("name_by_user"),
# Introduced in 0.119
disabled_by=device.get("disabled_by"),
)
# Introduced in 0.111
for device in data.get("deleted_devices", []):
deleted_devices[device["id"]] = DeletedDeviceEntry(
config_entries=set(device["config_entries"]),
# type ignores (if tuple arg was cast): likely https://github.com/python/mypy/issues/8625
connections={tuple(conn) for conn in device["connections"]}, # type: ignore[misc]
identifiers={tuple(iden) for iden in device["identifiers"]}, # type: ignore[misc]
id=device["id"],
# Introduced in 2021.2
orphaned_timestamp=device.get("orphaned_timestamp"),
)
self.devices = devices
self.deleted_devices = deleted_devices
self._rebuild_index()
@callback
def async_schedule_save(self) -> None:
"""Schedule saving the device registry."""
self._store.async_delay_save(self._data_to_save, SAVE_DELAY)
@callback
def _data_to_save(self) -> Dict[str, List[Dict[str, Any]]]:
"""Return data of device registry to store in a file."""
data = {}
data["devices"] = [
{
"config_entries": list(entry.config_entries),
"connections": list(entry.connections),
"identifiers": list(entry.identifiers),
"manufacturer": entry.manufacturer,
"model": entry.model,
"name": entry.name,
"sw_version": entry.sw_version,
"entry_type": entry.entry_type,
"id": entry.id,
"via_device_id": entry.via_device_id,
"area_id": entry.area_id,
"name_by_user": entry.name_by_user,
"disabled_by": entry.disabled_by,
}
for entry in self.devices.values()
]
data["deleted_devices"] = [
{
"config_entries": list(entry.config_entries),
"connections": list(entry.connections),
"identifiers": list(entry.identifiers),
"id": entry.id,
"orphaned_timestamp": entry.orphaned_timestamp,
}
for entry in self.deleted_devices.values()
]
return data
@callback
def async_clear_config_entry(self, config_entry_id: str) -> None:
"""Clear config entry from registry entries."""
now_time = time.time()
for device in list(self.devices.values()):
self._async_update_device(device.id, remove_config_entry_id=config_entry_id)
for deleted_device in list(self.deleted_devices.values()):
config_entries = deleted_device.config_entries
if config_entry_id not in config_entries:
continue
if config_entries == {config_entry_id}:
# Add a time stamp when the deleted device became orphaned
self.deleted_devices[deleted_device.id] = attr.evolve(
deleted_device, orphaned_timestamp=now_time, config_entries=set()
)
else:
config_entries = config_entries - {config_entry_id}
# No need to reindex here since we currently
# do not have a lookup by config entry
self.deleted_devices[deleted_device.id] = attr.evolve(
deleted_device, config_entries=config_entries
)
self.async_schedule_save()
@callback
def async_purge_expired_orphaned_devices(self) -> None:
"""Purge expired orphaned devices from the registry.
We need to purge these periodically to avoid the database
growing without bound.
"""
now_time = time.time()
for deleted_device in list(self.deleted_devices.values()):
if deleted_device.orphaned_timestamp is None:
continue
if (
deleted_device.orphaned_timestamp + ORPHANED_DEVICE_KEEP_SECONDS
< now_time
):
self._remove_device(deleted_device)
@callback
def async_clear_area_id(self, area_id: str) -> None:
"""Clear area id from registry entries."""
for dev_id, device in self.devices.items():
if area_id == device.area_id:
self._async_update_device(dev_id, area_id=None)
@callback
def async_get(hass: HomeAssistantType) -> DeviceRegistry:
"""Get device registry."""
return cast(DeviceRegistry, hass.data[DATA_REGISTRY])
async def async_load(hass: HomeAssistantType) -> None:
"""Load device registry."""
assert DATA_REGISTRY not in hass.data
hass.data[DATA_REGISTRY] = DeviceRegistry(hass)
await hass.data[DATA_REGISTRY].async_load()
@bind_hass
async def async_get_registry(hass: HomeAssistantType) -> DeviceRegistry:
"""Get device registry.
This is deprecated and will be removed in the future. Use async_get instead.
"""
return async_get(hass)
@callback
def async_entries_for_area(registry: DeviceRegistry, area_id: str) -> List[DeviceEntry]:
"""Return entries that match an area."""
return [device for device in registry.devices.values() if device.area_id == area_id]
@callback
def async_entries_for_config_entry(
registry: DeviceRegistry, config_entry_id: str
) -> List[DeviceEntry]:
"""Return entries that match a config entry."""
return [
device
for device in registry.devices.values()
if config_entry_id in device.config_entries
]
@callback
def async_config_entry_disabled_by_changed(
registry: DeviceRegistry, config_entry: "ConfigEntry"
) -> None:
"""Handle a config entry being disabled or enabled.
Disable devices in the registry that are associated with a config entry when
the config entry is disabled, enable devices in the registry that are associated
with a config entry when the config entry is enabled and the devices are marked
DISABLED_CONFIG_ENTRY.
"""
devices = async_entries_for_config_entry(registry, config_entry.entry_id)
if not config_entry.disabled_by:
for device in devices:
if device.disabled_by != DISABLED_CONFIG_ENTRY:
continue
registry.async_update_device(device.id, disabled_by=None)
return
for device in devices:
if device.disabled:
# Device already disabled, do not overwrite
continue
registry.async_update_device(device.id, disabled_by=DISABLED_CONFIG_ENTRY)
@callback
def async_cleanup(
hass: HomeAssistantType,
dev_reg: DeviceRegistry,
ent_reg: "entity_registry.EntityRegistry",
) -> None:
"""Clean up device registry."""
# Find all devices that are referenced by a config_entry.
config_entry_ids = {entry.entry_id for entry in hass.config_entries.async_entries()}
references_config_entries = {
device.id
for device in dev_reg.devices.values()
for config_entry_id in device.config_entries
if config_entry_id in config_entry_ids
}
# Find all devices that are referenced in the entity registry.
references_entities = {entry.device_id for entry in ent_reg.entities.values()}
orphan = set(dev_reg.devices) - references_entities - references_config_entries
for dev_id in orphan:
dev_reg.async_remove_device(dev_id)
# Find all referenced config entries that no longer exist
# This shouldn't happen but have not been able to track down the bug :(
for device in list(dev_reg.devices.values()):
for config_entry_id in device.config_entries:
if config_entry_id not in config_entry_ids:
dev_reg.async_update_device(
device.id, remove_config_entry_id=config_entry_id
)
# Periodic purge of orphaned devices to avoid the registry
# growing without bounds when there are lots of deleted devices
dev_reg.async_purge_expired_orphaned_devices()
@callback
def async_setup_cleanup(hass: HomeAssistantType, dev_reg: DeviceRegistry) -> None:
"""Clean up device registry when entities removed."""
from . import entity_registry # pylint: disable=import-outside-toplevel
async def cleanup() -> None:
"""Cleanup."""
ent_reg = await entity_registry.async_get_registry(hass)
async_cleanup(hass, dev_reg, ent_reg)
debounced_cleanup = Debouncer(
hass, _LOGGER, cooldown=CLEANUP_DELAY, immediate=False, function=cleanup
)
async def entity_registry_changed(event: Event) -> None:
"""Handle entity updated or removed dispatch."""
await debounced_cleanup.async_call()
@callback
def entity_registry_changed_filter(event: Event) -> bool:
"""Handle entity updated or removed filter."""
if (
event.data["action"] == "update"
and "device_id" not in event.data["changes"]
) or event.data["action"] == "create":
return False
return True
if hass.is_running:
hass.bus.async_listen(
entity_registry.EVENT_ENTITY_REGISTRY_UPDATED,
entity_registry_changed,
event_filter=entity_registry_changed_filter,
)
return
async def startup_clean(event: Event) -> None:
"""Clean up on startup."""
hass.bus.async_listen(
entity_registry.EVENT_ENTITY_REGISTRY_UPDATED,
entity_registry_changed,
event_filter=entity_registry_changed_filter,
)
await debounced_cleanup.async_call()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STARTED, startup_clean)
def _normalize_connections(connections: Set[Tuple[str, str]]) -> Set[Tuple[str, str]]:
"""Normalize connections to ensure we can match mac addresses."""
return {
(key, format_mac(value)) if key == CONNECTION_NETWORK_MAC else (key, value)
for key, value in connections
}
def _add_device_to_index(
devices_index: Dict[str, Dict[Tuple[str, str], str]],
device: Union[DeviceEntry, DeletedDeviceEntry],
) -> None:
"""Add a device to the index."""
for identifier in device.identifiers:
devices_index[IDX_IDENTIFIERS][identifier] = device.id
for connection in device.connections:
devices_index[IDX_CONNECTIONS][connection] = device.id
def _remove_device_from_index(
devices_index: Dict[str, Dict[Tuple[str, str], str]],
device: Union[DeviceEntry, DeletedDeviceEntry],
) -> None:
"""Remove a device from the index."""
for identifier in device.identifiers:
if identifier in devices_index[IDX_IDENTIFIERS]:
del devices_index[IDX_IDENTIFIERS][identifier]
for connection in device.connections:
if connection in devices_index[IDX_CONNECTIONS]:
del devices_index[IDX_CONNECTIONS][connection]
|
mit
|
Dhivyap/ansible
|
test/units/modules/network/netvisor/test_pn_role.py
|
23
|
2994
|
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.netvisor import pn_role
from units.modules.utils import set_module_args
from .nvos_module import TestNvosModule
class TestRoleModule(TestNvosModule):
module = pn_role
def setUp(self):
self.mock_run_nvos_commands = patch('ansible.modules.network.netvisor.pn_role.run_cli')
self.run_nvos_commands = self.mock_run_nvos_commands.start()
self.mock_run_check_cli = patch('ansible.modules.network.netvisor.pn_role.check_cli')
self.run_check_cli = self.mock_run_check_cli.start()
def tearDown(self):
self.mock_run_nvos_commands.stop()
self.run_check_cli.stop()
def run_cli_patch(self, module, cli, state_map):
if state_map['present'] == 'role-create':
results = dict(
changed=True,
cli_cmd=cli
)
elif state_map['absent'] == 'role-delete':
results = dict(
changed=True,
cli_cmd=cli
)
elif state_map['update'] == 'role-modify':
results = dict(
changed=True,
cli_cmd=cli
)
module.exit_json(**results)
def load_fixtures(self, commands=None, state=None, transport='cli'):
self.run_nvos_commands.side_effect = self.run_cli_patch
if state == 'present':
self.run_check_cli.return_value = False
if state == 'absent':
self.run_check_cli.return_value = True
if state == 'update':
self.run_check_cli.return_value = True
def test_role_create(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_name': 'foo',
'pn_scope': 'local', 'pn_access': 'read-only', 'state': 'present'})
result = self.execute_module(changed=True, state='present')
expected_cmd = ' switch sw01 role-create name foo scope local access read-only'
self.assertEqual(result['cli_cmd'], expected_cmd)
def test_role_delete(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_name': 'foo',
'state': 'absent'})
result = self.execute_module(changed=True, state='absent')
expected_cmd = ' switch sw01 role-delete name foo '
self.assertEqual(result['cli_cmd'], expected_cmd)
def test_role_update(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_name': 'foo',
'pn_access': 'read-write', 'pn_sudo': True, 'pn_shell': True, 'state': 'update'})
result = self.execute_module(changed=True, state='update')
expected_cmd = ' switch sw01 role-modify name foo access read-write shell sudo '
self.assertEqual(result['cli_cmd'], expected_cmd)
|
gpl-3.0
|
iotile/coretools
|
iotilecore/test/test_schema_verify/test_dict_verifier.py
|
1
|
2701
|
import pytest
from iotile.core.utilities.schema_verify import BytesVerifier, DictionaryVerifier, ListVerifier, StringVerifier, IntVerifier, BooleanVerifier, LiteralVerifier, OptionsVerifier
from iotile.core.exceptions import ValidationError
@pytest.fixture
def verifier1():
ver = DictionaryVerifier('test verifier')
ver.add_required('req_key', DictionaryVerifier())
ver.add_optional('opt_key', ListVerifier(StringVerifier('a string')))
ver.add_optional('opt2_key', BooleanVerifier(desc='a boolean'))
return ver
@pytest.fixture
def dict1():
dict1 = {}
dict1['req_key'] = {}
dict1['opt_key'] = ['a', 'b', 'c']
dict1['opt2_key'] = True
return dict1
def test_dict_verifier(verifier1, dict1):
"""Make sure dict verification works
"""
verifier1.verify(dict1)
def test_dict_noreq(verifier1):
"""Make sure a missing required key is found
"""
dict1 = {}
#dict1['req_key'] = {}
dict1['opt_key'] = ['a', 'b', 'c']
dict1['opt2_key'] = True
return dict1
with pytest.raises(ValidationError):
verifier1.verify(dict1)
def test_dict_noopt2(verifier1):
"""Make sure a missing optional key is not a cause of problems
"""
dict1 = {}
dict1['req_key'] = {}
dict1['opt_key'] = ['a', 'b', 'c']
#dict1['opt2_key'] = True
verifier1.verify(dict1)
def test_dict_noopt(verifier1):
"""Make sure a missing optional key is not a cause of problems
"""
dict1 = {}
dict1['req_key'] = {}
#dict1['opt_key'] = ['a', 'b', 'c']
dict1['opt2_key'] = True
verifier1.verify(dict1)
def test_dict_wrongopt(verifier1):
"""Make sure an optional key with the wrong schema is found
"""
dict1 = {}
dict1['req_key'] = {}
#dict1['opt_key'] = ['a', 'b', 'c']
dict1['opt2_key'] = "hello"
with pytest.raises(ValidationError):
verifier1.verify(dict1)
def test_options_verifier():
"""Check and make sure that OptionsVerifier works
"""
value = 'abc'
verifier = OptionsVerifier(IntVerifier('int'), StringVerifier('string'))
verifier.verify(value)
verifier = OptionsVerifier(LiteralVerifier('abc'))
verifier.verify(value)
with pytest.raises(ValidationError):
verifier.verify('ab')
with pytest.raises(ValidationError):
verifier.verify(1)
def test_bytes_decoding():
"""Check to make sure that decoding bytes works."""
instring = 'zasAAA=='
verifier = BytesVerifier(encoding='base64')
out1 = verifier.verify(instring)
assert len(out1) == 4
verifier = BytesVerifier(encoding='hex')
out2 = verifier.verify('cdab0000')
assert len(out2) == 4
assert out1 == out2
|
gpl-3.0
|
veger/ansible
|
test/units/modules/storage/netapp/test_netapp_e_auditlog.py
|
68
|
10758
|
# (c) 2018, NetApp Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from ansible.modules.storage.netapp.netapp_e_auditlog import AuditLog
from units.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args
__metaclass__ = type
from units.compat import mock
class AuditLogTests(ModuleTestCase):
REQUIRED_PARAMS = {'api_username': 'rw',
'api_password': 'password',
'api_url': 'http://localhost',
'ssid': '1'}
REQ_FUNC = 'ansible.modules.storage.netapp.netapp_e_auditlog.request'
MAX_RECORDS_MAXIMUM = 50000
MAX_RECORDS_MINIMUM = 100
def _set_args(self, **kwargs):
module_args = self.REQUIRED_PARAMS.copy()
if kwargs is not None:
module_args.update(kwargs)
set_module_args(module_args)
def test_max_records_argument_pass(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
max_records_set = (self.MAX_RECORDS_MINIMUM, 25000, self.MAX_RECORDS_MAXIMUM)
for max_records in max_records_set:
initial["max_records"] = max_records
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})):
audit_log = AuditLog()
self.assertTrue(audit_log.max_records == max_records)
def test_max_records_argument_fail(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
max_records_set = (self.MAX_RECORDS_MINIMUM - 1, self.MAX_RECORDS_MAXIMUM + 1)
for max_records in max_records_set:
with self.assertRaisesRegexp(AnsibleFailJson, r"Audit-log max_records count must be between 100 and 50000"):
initial["max_records"] = max_records
self._set_args(**initial)
AuditLog()
def test_threshold_argument_pass(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
threshold_set = (60, 75, 90)
for threshold in threshold_set:
initial["threshold"] = threshold
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})):
audit_log = AuditLog()
self.assertTrue(audit_log.threshold == threshold)
def test_threshold_argument_fail(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
threshold_set = (59, 91)
for threshold in threshold_set:
with self.assertRaisesRegexp(AnsibleFailJson, r"Audit-log percent threshold must be between 60 and 90"):
initial["threshold"] = threshold
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})):
AuditLog()
def test_is_proxy_pass(self):
"""Verify that True is returned when proxy is used to communicate with storage."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90,
"api_url": "https://10.1.1.10/devmgr/v2"}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
self.assertTrue(audit_log.is_proxy())
def test_is_proxy_fail(self):
"""Verify that AnsibleJsonFail exception is thrown when exception occurs."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve the webservices about information"):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
audit_log.is_proxy()
def test_get_configuration_pass(self):
"""Validate get configuration does not throw exception when normal request is returned."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
expected = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, return_value=(200, expected)):
body = audit_log.get_configuration()
self.assertTrue(body == expected)
def test_get_configuration_fail(self):
"""Verify AnsibleJsonFail exception is thrown."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve the audit-log configuration!"):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
audit_log.get_configuration()
def test_build_configuration_pass(self):
"""Validate configuration changes will force an update."""
response = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
changes = [{"max_records": 50000},
{"log_level": "all"},
{"full_policy": "preventSystemAccess"},
{"threshold": 75}]
for change in changes:
initial_with_changes = initial.copy()
initial_with_changes.update(change)
self._set_args(**initial_with_changes)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, return_value=(200, response)):
update = audit_log.build_configuration()
self.assertTrue(update)
def test_delete_log_messages_fail(self):
"""Verify AnsibleJsonFail exception is thrown."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to delete audit-log messages!"):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
audit_log.delete_log_messages()
def test_update_configuration_delete_pass(self):
"""Verify 422 and force successfully returns True."""
body = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
initial = {"max_records": 2000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90,
"force": True}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, side_effect=[(200, body),
(422, {u"invalidFieldsIfKnown": None,
u"errorMessage": u"Configuration change...",
u"localizedMessage": u"Configuration change...",
u"retcode": u"auditLogImmediateFullCondition",
u"codeType": u"devicemgrerror"}),
(200, None),
(200, None)]):
self.assertTrue(audit_log.update_configuration())
def test_update_configuration_delete_skip_fail(self):
"""Verify 422 and no force results in AnsibleJsonFail exception."""
body = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
initial = {"max_records": 2000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90,
"force": False}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to update audit-log configuration!"):
with mock.patch(self.REQ_FUNC, side_effect=[(200, body), Exception(422, {"errorMessage": "error"}),
(200, None), (200, None)]):
audit_log.update_configuration()
|
gpl-3.0
|
zhenv5/scikit-learn
|
sklearn/decomposition/__init__.py
|
147
|
1421
|
"""
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
|
bsd-3-clause
|
alexec/selenium
|
py/test/selenium/webdriver/common/element_attribute_tests.py
|
65
|
12160
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import pytest
class ElementAttributeTests(unittest.TestCase):
def testShouldReturnNullWhenGettingTheValueOfAnAttributeThatIsNotListed(self):
self._loadSimplePage()
head = self.driver.find_element_by_xpath("/html")
attribute = head.get_attribute("cheese")
self.assertTrue(attribute is None)
def testShouldReturnNullWhenGettingSrcAttributeOfInvalidImgTag(self):
self._loadSimplePage()
img = self.driver.find_element_by_id("invalidImgTag")
img_attr = img.get_attribute("src")
self.assertTrue(img_attr is None)
def testShouldReturnAnAbsoluteUrlWhenGettingSrcAttributeOfAValidImgTag(self):
self._loadSimplePage()
img = self.driver.find_element_by_id("validImgTag")
img_attr = img.get_attribute("src")
self.assertTrue("icon.gif" in img_attr)
def testShouldReturnAnAbsoluteUrlWhenGettingHrefAttributeOfAValidAnchorTag(self):
self._loadSimplePage()
img = self.driver.find_element_by_id("validAnchorTag")
img_attr = img.get_attribute("href")
self.assertTrue("icon.gif" in img_attr)
def testShouldReturnEmptyAttributeValuesWhenPresentAndTheValueIsActuallyEmpty(self):
self._loadSimplePage()
body = self.driver.find_element_by_xpath("//body")
self.assertEqual("", body.get_attribute("style"))
def testShouldReturnTheValueOfTheDisabledAttributeAsFalseIfNotSet(self):
self._loadPage("formPage")
inputElement = self.driver.find_element_by_xpath("//input[@id='working']")
self.assertEqual(None, inputElement.get_attribute("disabled"))
self.assertTrue(inputElement.is_enabled())
pElement = self.driver.find_element_by_id("peas")
self.assertEqual(None, pElement.get_attribute("disabled"))
self.assertTrue(pElement.is_enabled())
def testShouldReturnTheValueOfTheIndexAttrbuteEvenIfItIsMissing(self):
self._loadPage("formPage")
multiSelect = self.driver.find_element_by_id("multi")
options = multiSelect.find_elements_by_tag_name("option")
self.assertEqual("1", options[1].get_attribute("index"))
def testShouldIndicateTheElementsThatAreDisabledAreNotis_enabled(self):
self._loadPage("formPage")
inputElement = self.driver.find_element_by_xpath("//input[@id='notWorking']")
self.assertFalse(inputElement.is_enabled())
inputElement = self.driver.find_element_by_xpath("//input[@id='working']")
self.assertTrue(inputElement.is_enabled())
def testElementsShouldBeDisabledIfTheyAreDisabledUsingRandomDisabledStrings(self):
self._loadPage("formPage")
disabledTextElement1 = self.driver.find_element_by_id("disabledTextElement1")
self.assertFalse(disabledTextElement1.is_enabled())
disabledTextElement2 = self.driver.find_element_by_id("disabledTextElement2")
self.assertFalse(disabledTextElement2.is_enabled())
disabledSubmitElement = self.driver.find_element_by_id("disabledSubmitElement")
self.assertFalse(disabledSubmitElement.is_enabled())
def testShouldIndicateWhenATextAreaIsDisabled(self):
self._loadPage("formPage")
textArea = self.driver.find_element_by_xpath("//textarea[@id='notWorkingArea']")
self.assertFalse(textArea.is_enabled())
def testShouldThrowExceptionIfSendingKeysToElementDisabledUsingRandomDisabledStrings(self):
self._loadPage("formPage")
disabledTextElement1 = self.driver.find_element_by_id("disabledTextElement1")
try:
disabledTextElement1.send_keys("foo")
self.fail("Should have thrown exception")
except:
pass
self.assertEqual("", disabledTextElement1.text)
disabledTextElement2 = self.driver.find_element_by_id("disabledTextElement2")
try:
disabledTextElement2.send_keys("bar")
self.fail("Should have thrown exception")
except:
pass
self.assertEqual("", disabledTextElement2.text)
def testShouldIndicateWhenASelectIsDisabled(self):
self._loadPage("formPage")
enabled = self.driver.find_element_by_name("selectomatic")
disabled = self.driver.find_element_by_name("no-select")
self.assertTrue(enabled.is_enabled())
self.assertFalse(disabled.is_enabled())
def testShouldReturnTheValueOfCheckedForACheckboxEvenIfItLacksThatAttribute(self):
self._loadPage("formPage")
checkbox = self.driver.find_element_by_xpath("//input[@id='checky']")
self.assertTrue(checkbox.get_attribute("checked") is None)
checkbox.click()
self.assertEqual("true", checkbox.get_attribute("checked"))
def testShouldReturnTheValueOfSelectedForRadioButtonsEvenIfTheyLackThatAttribute(self):
self._loadPage("formPage")
neverSelected = self.driver.find_element_by_id("cheese")
initiallyNotSelected = self.driver.find_element_by_id("peas")
initiallySelected = self.driver.find_element_by_id("cheese_and_peas")
self.assertTrue(neverSelected.get_attribute("selected") is None, "false")
self.assertTrue(initiallyNotSelected.get_attribute("selected") is None, "false")
self.assertEqual("true", initiallySelected.get_attribute("selected"), "true")
initiallyNotSelected.click()
self.assertTrue(neverSelected.get_attribute("selected") is None)
self.assertEqual("true", initiallyNotSelected.get_attribute("selected"))
self.assertTrue(initiallySelected.get_attribute("selected") is None)
def testShouldReturnTheValueOfSelectedForOptionsInSelectsEvenIfTheyLackThatAttribute(self):
self._loadPage("formPage")
selectBox = self.driver.find_element_by_xpath("//select[@name='selectomatic']")
options = selectBox.find_elements_by_tag_name("option")
one = options[0]
two = options[1]
self.assertTrue(one.is_selected())
self.assertFalse(two.is_selected())
self.assertEqual("true", one.get_attribute("selected"))
self.assertTrue(two.get_attribute("selected") is None)
def testShouldReturnValueOfClassAttributeOfAnElement(self):
self._loadPage("xhtmlTest")
heading = self.driver.find_element_by_xpath("//h1")
classname = heading.get_attribute("class")
self.assertEqual("header", classname)
# Disabled due to issues with Frames
#def testShouldReturnValueOfClassAttributeOfAnElementAfterSwitchingIFrame(self):
# self._loadPage("iframes")
# self.driver.switch_to.frame("iframe1")
#
# wallace = self.driver.find_element_by_xpath("//div[@id='wallace']")
# classname = wallace.get_attribute("class")
# self.assertEqual("gromit", classname)
def testShouldReturnTheContentsOfATextAreaAsItsValue(self):
self._loadPage("formPage")
value = self.driver.find_element_by_id("withText").get_attribute("value")
self.assertEqual("Example text", value)
def testShouldReturnTheContentsOfATextAreaAsItsValueWhenSetToNonNorminalTrue(self):
self._loadPage("formPage")
e = self.driver.find_element_by_id("withText")
self.driver.execute_script("arguments[0].value = 'tRuE'", e)
value = e.get_attribute("value")
self.assertEqual("tRuE", value)
def testShouldTreatReadonlyAsAValue(self):
self._loadPage("formPage")
element = self.driver.find_element_by_name("readonly")
readOnlyAttribute = element.get_attribute("readonly")
textInput = self.driver.find_element_by_name("x")
notReadOnly = textInput.get_attribute("readonly")
self.assertNotEqual(readOnlyAttribute, notReadOnly)
def testShouldGetNumericAtribute(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id("withText")
self.assertEqual("5", element.get_attribute("rows"))
def testCanReturnATextApproximationOfTheStyleAttribute(self):
self._loadPage("javascriptPage")
style = self.driver.find_element_by_id("red-item").get_attribute("style")
self.assertTrue("background-color" in style.lower())
def testShouldCorrectlyReportValueOfColspan(self):
self._loadPage("tables")
th1 = self.driver.find_element_by_id("th1")
td2 = self.driver.find_element_by_id("td2")
self.assertEqual("th1", th1.get_attribute("id"))
self.assertEqual("3", th1.get_attribute("colspan"))
self.assertEqual("td2", td2.get_attribute("id"));
self.assertEquals("2", td2.get_attribute("colspan"));
def testCanRetrieveTheCurrentValueOfATextFormField_textInput(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id("working")
self.assertEqual("", element.get_attribute("value"))
element.send_keys("hello world")
self.assertEqual("hello world", element.get_attribute("value"))
def testCanRetrieveTheCurrentValueOfATextFormField_emailInput(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id("email")
self.assertEqual("", element.get_attribute("value"))
element.send_keys("[email protected]")
self.assertEqual("[email protected]", element.get_attribute("value"))
def testCanRetrieveTheCurrentValueOfATextFormField_textArea(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id("emptyTextArea")
self.assertEqual("", element.get_attribute("value"))
element.send_keys("hello world")
self.assertEqual("hello world", element.get_attribute("value"))
@pytest.mark.ignore_chrome
def testShouldReturnNullForNonPresentBooleanAttributes(self):
self._loadPage("booleanAttributes")
element1 = self.driver.find_element_by_id("working")
self.assertEqual(None, element1.get_attribute("required"))
element2 = self.driver.find_element_by_id("wallace")
self.assertEqual(None, element2.get_attribute("nowrap"))
@pytest.mark.ignore_ie
def testShouldReturnTrueForPresentBooleanAttributes(self):
self._loadPage("booleanAttributes")
element1 = self.driver.find_element_by_id("emailRequired")
self.assertEqual("true", element1.get_attribute("required"))
element2 = self.driver.find_element_by_id("emptyTextAreaRequired")
self.assertEqual("true", element2.get_attribute("required"))
element3 = self.driver.find_element_by_id("inputRequired")
self.assertEqual("true", element3.get_attribute("required"))
element4 = self.driver.find_element_by_id("textAreaRequired")
self.assertEqual("true", element4.get_attribute("required"))
element5 = self.driver.find_element_by_id("unwrappable")
self.assertEqual("true", element5.get_attribute("nowrap"))
def tesShouldGetUnicodeCharsFromAttribute(self):
self._loadPage("formPage")
title = self.driver.find_element_by_id("vsearchGadget").get_attribute("title")
self.assertEqual('Hvad s\xf8ger du?', title)
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
|
apache-2.0
|
markYoungH/chromium.src
|
build/android/pylib/base/base_test_result.py
|
9
|
5913
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing base test results classes."""
class ResultType(object):
"""Class enumerating test types."""
PASS = 'PASS'
SKIP = 'SKIP'
FAIL = 'FAIL'
CRASH = 'CRASH'
TIMEOUT = 'TIMEOUT'
UNKNOWN = 'UNKNOWN'
@staticmethod
def GetTypes():
"""Get a list of all test types."""
return [ResultType.PASS, ResultType.SKIP, ResultType.FAIL,
ResultType.CRASH, ResultType.TIMEOUT, ResultType.UNKNOWN]
class BaseTestResult(object):
"""Base class for a single test result."""
def __init__(self, name, test_type, duration=0, log=''):
"""Construct a BaseTestResult.
Args:
name: Name of the test which defines uniqueness.
test_type: Type of the test result as defined in ResultType.
duration: Time it took for the test to run in milliseconds.
log: An optional string listing any errors.
"""
assert name
assert test_type in ResultType.GetTypes()
self._name = name
self._test_type = test_type
self._duration = duration
self._log = log
def __str__(self):
return self._name
def __repr__(self):
return self._name
def __cmp__(self, other):
# pylint: disable=W0212
return cmp(self._name, other._name)
def __hash__(self):
return hash(self._name)
def SetName(self, name):
"""Set the test name.
Because we're putting this into a set, this should only be used if moving
this test result into another set.
"""
self._name = name
def GetName(self):
"""Get the test name."""
return self._name
def GetType(self):
"""Get the test result type."""
return self._test_type
def GetDuration(self):
"""Get the test duration."""
return self._duration
def GetLog(self):
"""Get the test log."""
return self._log
class TestRunResults(object):
"""Set of results for a test run."""
def __init__(self):
self._results = set()
def GetLogs(self):
"""Get the string representation of all test logs."""
s = []
for test_type in ResultType.GetTypes():
if test_type != ResultType.PASS:
for t in sorted(self._GetType(test_type)):
log = t.GetLog()
if log:
s.append('[%s] %s:' % (test_type, t))
s.append(log)
return '\n'.join(s)
def GetGtestForm(self):
"""Get the gtest string representation of this object."""
s = []
plural = lambda n, s, p: '%d %s' % (n, p if n != 1 else s)
tests = lambda n: plural(n, 'test', 'tests')
s.append('[==========] %s ran.' % (tests(len(self.GetAll()))))
s.append('[ PASSED ] %s.' % (tests(len(self.GetPass()))))
skipped = self.GetSkip()
if skipped:
s.append('[ SKIPPED ] Skipped %s, listed below:' % tests(len(skipped)))
for t in sorted(skipped):
s.append('[ SKIPPED ] %s' % str(t))
all_failures = self.GetFail().union(self.GetCrash(), self.GetTimeout(),
self.GetUnknown())
if all_failures:
s.append('[ FAILED ] %s, listed below:' % tests(len(all_failures)))
for t in sorted(self.GetFail()):
s.append('[ FAILED ] %s' % str(t))
for t in sorted(self.GetCrash()):
s.append('[ FAILED ] %s (CRASHED)' % str(t))
for t in sorted(self.GetTimeout()):
s.append('[ FAILED ] %s (TIMEOUT)' % str(t))
for t in sorted(self.GetUnknown()):
s.append('[ FAILED ] %s (UNKNOWN)' % str(t))
s.append('')
s.append(plural(len(all_failures), 'FAILED TEST', 'FAILED TESTS'))
return '\n'.join(s)
def GetShortForm(self):
"""Get the short string representation of this object."""
s = []
s.append('ALL: %d' % len(self._results))
for test_type in ResultType.GetTypes():
s.append('%s: %d' % (test_type, len(self._GetType(test_type))))
return ''.join([x.ljust(15) for x in s])
def __str__(self):
return self.GetLongForm()
def AddResult(self, result):
"""Add |result| to the set.
Args:
result: An instance of BaseTestResult.
"""
assert isinstance(result, BaseTestResult)
self._results.add(result)
def AddResults(self, results):
"""Add |results| to the set.
Args:
results: An iterable of BaseTestResult objects.
"""
for t in results:
self.AddResult(t)
def AddTestRunResults(self, results):
"""Add the set of test results from |results|.
Args:
results: An instance of TestRunResults.
"""
assert isinstance(results, TestRunResults)
# pylint: disable=W0212
self._results.update(results._results)
def GetAll(self):
"""Get the set of all test results."""
return self._results.copy()
def _GetType(self, test_type):
"""Get the set of test results with the given test type."""
return set(t for t in self._results if t.GetType() == test_type)
def GetPass(self):
"""Get the set of all passed test results."""
return self._GetType(ResultType.PASS)
def GetSkip(self):
"""Get the set of all skipped test results."""
return self._GetType(ResultType.SKIP)
def GetFail(self):
"""Get the set of all failed test results."""
return self._GetType(ResultType.FAIL)
def GetCrash(self):
"""Get the set of all crashed test results."""
return self._GetType(ResultType.CRASH)
def GetTimeout(self):
"""Get the set of all timed out test results."""
return self._GetType(ResultType.TIMEOUT)
def GetUnknown(self):
"""Get the set of all unknown test results."""
return self._GetType(ResultType.UNKNOWN)
def GetNotPass(self):
"""Get the set of all non-passed test results."""
return self.GetAll() - self.GetPass()
def DidRunPass(self):
"""Return whether the test run was successful."""
return not self.GetNotPass() - self.GetSkip()
|
bsd-3-clause
|
Ambrosys/protobuf
|
gtest/test/gtest_throw_on_failure_test.py
|
2917
|
5766
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's throw-on-failure mode with exceptions disabled.
This script invokes gtest_throw_on_failure_test_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the gtest_throw_on_failure_test_ program, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_throw_on_failure_test_')
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
env_var = env_var.upper()
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns True/False if its exit code is/isn't 0."""
print 'Running "%s". . .' % ' '.join(command)
p = gtest_test_utils.Subprocess(command)
return p.exited and p.exit_code == 0
# The tests. TODO([email protected]): refactor the class to share common
# logic with code in gtest_break_on_failure_unittest.py.
class ThrowOnFailureTest(gtest_test_utils.TestCase):
"""Tests the throw-on-failure mode."""
def RunAndVerify(self, env_var_value, flag_value, should_fail):
"""Runs gtest_throw_on_failure_test_ and verifies that it does
(or does not) exit with a non-zero code.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
should_fail: True iff the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % THROW_ON_FAILURE
else:
flag = '--%s' % THROW_ON_FAILURE
command = [EXE_PATH]
if flag:
command.append(flag)
if should_fail:
should_or_not = 'should'
else:
should_or_not = 'should not'
failed = not Run(command)
SetEnvVar(THROW_ON_FAILURE, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
'exit code.' %
(THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(failed == should_fail, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False)
def testThrowOnFailureEnvVar(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value=None,
should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value=None,
flag_value='1',
should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='0',
flag_value='1',
should_fail=True)
self.RunAndVerify(env_var_value='1',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value='1',
should_fail=True)
if __name__ == '__main__':
gtest_test_utils.Main()
|
bsd-3-clause
|
ubc/edx-platform
|
lms/djangoapps/lti_provider/tests/test_signature_validator.py
|
139
|
3804
|
"""
Tests for the SignatureValidator class.
"""
import ddt
from django.test import TestCase
from django.test.client import RequestFactory
from mock import patch
from lti_provider.models import LtiConsumer
from lti_provider.signature_validator import SignatureValidator
def get_lti_consumer():
"""
Helper method for all Signature Validator tests to get an LtiConsumer object.
"""
return LtiConsumer(
consumer_name='Consumer Name',
consumer_key='Consumer Key',
consumer_secret='Consumer Secret'
)
@ddt.ddt
class ClientKeyValidatorTest(TestCase):
"""
Tests for the check_client_key method in the SignatureValidator class.
"""
def setUp(self):
super(ClientKeyValidatorTest, self).setUp()
self.lti_consumer = get_lti_consumer()
def test_valid_client_key(self):
"""
Verify that check_client_key succeeds with a valid key
"""
key = self.lti_consumer.consumer_key
self.assertTrue(SignatureValidator(self.lti_consumer).check_client_key(key))
@ddt.data(
('0123456789012345678901234567890123456789',),
('',),
(None,),
)
@ddt.unpack
def test_invalid_client_key(self, key):
"""
Verify that check_client_key fails with a disallowed key
"""
self.assertFalse(SignatureValidator(self.lti_consumer).check_client_key(key))
@ddt.ddt
class NonceValidatorTest(TestCase):
"""
Tests for the check_nonce method in the SignatureValidator class.
"""
def setUp(self):
super(NonceValidatorTest, self).setUp()
self.lti_consumer = get_lti_consumer()
def test_valid_nonce(self):
"""
Verify that check_nonce succeeds with a key of maximum length
"""
nonce = '0123456789012345678901234567890123456789012345678901234567890123'
self.assertTrue(SignatureValidator(self.lti_consumer).check_nonce(nonce))
@ddt.data(
('01234567890123456789012345678901234567890123456789012345678901234',),
('',),
(None,),
)
@ddt.unpack
def test_invalid_nonce(self, nonce):
"""
Verify that check_nonce fails with badly formatted nonce
"""
self.assertFalse(SignatureValidator(self.lti_consumer).check_nonce(nonce))
class SignatureValidatorTest(TestCase):
"""
Tests for the custom SignatureValidator class that uses the oauthlib library
to check message signatures. Note that these tests mock out the library
itself, since we assume it to be correct.
"""
def setUp(self):
super(SignatureValidatorTest, self).setUp()
self.lti_consumer = get_lti_consumer()
def test_get_existing_client_secret(self):
"""
Verify that get_client_secret returns the right value for the correct
key
"""
key = self.lti_consumer.consumer_key
secret = SignatureValidator(self.lti_consumer).get_client_secret(key, None)
self.assertEqual(secret, self.lti_consumer.consumer_secret)
@patch('oauthlib.oauth1.SignatureOnlyEndpoint.validate_request',
return_value=(True, None))
def test_verification_parameters(self, verify_mock):
"""
Verify that the signature validaton library method is called using the
correct parameters derived from the HttpRequest.
"""
body = 'oauth_signature_method=HMAC-SHA1&oauth_version=1.0'
content_type = 'application/x-www-form-urlencoded'
request = RequestFactory().post('/url', body, content_type=content_type)
headers = {'Content-Type': content_type}
SignatureValidator(self.lti_consumer).verify(request)
verify_mock.assert_called_once_with(
request.build_absolute_uri(), 'POST', body, headers)
|
agpl-3.0
|
wrcj12138aaa/WinObjC
|
deps/3rdparty/icu/icu/source/test/depstest/depstest.py
|
189
|
7263
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2014, International Business Machines
# Corporation and others. All Rights Reserved.
#
# file name: depstest.py
#
# created on: 2011may24
"""ICU dependency tester.
This probably works only on Linux.
The exit code is 0 if everything is fine, 1 for errors, 2 for only warnings.
Sample invocation:
~/svn.icu/trunk/src/source/test/depstest$ ./depstest.py ~/svn.icu/trunk/dbg
"""
__author__ = "Markus W. Scherer"
import glob
import os.path
import subprocess
import sys
import dependencies
_ignored_symbols = set()
_obj_files = {}
_symbols_to_files = {}
_return_value = 0
# Classes with vtables (and thus virtual methods).
_virtual_classes = set()
# Classes with weakly defined destructors.
# nm shows a symbol class of "W" rather than "T".
_weak_destructors = set()
def _ReadObjFile(root_path, library_name, obj_name):
global _ignored_symbols, _obj_files, _symbols_to_files
global _virtual_classes, _weak_destructors
lib_obj_name = library_name + "/" + obj_name
if lib_obj_name in _obj_files:
print "Warning: duplicate .o file " + lib_obj_name
_return_value = 2
return
path = os.path.join(root_path, library_name, obj_name)
nm_result = subprocess.Popen(["nm", "--demangle", "--format=sysv",
"--extern-only", "--no-sort", path],
stdout=subprocess.PIPE).communicate()[0]
obj_imports = set()
obj_exports = set()
for line in nm_result.splitlines():
fields = line.split("|")
if len(fields) == 1: continue
name = fields[0].strip()
# Ignore symbols like '__cxa_pure_virtual',
# 'vtable for __cxxabiv1::__si_class_type_info' or
# 'DW.ref.__gxx_personality_v0'.
if name.startswith("__cxa") or "__cxxabi" in name or "__gxx" in name:
_ignored_symbols.add(name)
continue
type = fields[2].strip()
if type == "U":
obj_imports.add(name)
else:
obj_exports.add(name)
_symbols_to_files[name] = lib_obj_name
# Is this a vtable? E.g., "vtable for icu_49::ByteSink".
if name.startswith("vtable for icu"):
_virtual_classes.add(name[name.index("::") + 2:])
# Is this a destructor? E.g., "icu_49::ByteSink::~ByteSink()".
index = name.find("::~")
if index >= 0 and type == "W":
_weak_destructors.add(name[index + 3:name.index("(", index)])
_obj_files[lib_obj_name] = {"imports": obj_imports, "exports": obj_exports}
def _ReadLibrary(root_path, library_name):
obj_paths = glob.glob(os.path.join(root_path, library_name, "*.o"))
for path in obj_paths:
_ReadObjFile(root_path, library_name, os.path.basename(path))
def _Resolve(name, parents):
global _ignored_symbols, _obj_files, _symbols_to_files, _return_value
item = dependencies.items[name]
item_type = item["type"]
if name in parents:
sys.exit("Error: %s %s has a circular dependency on itself: %s" %
(item_type, name, parents))
# Check if already cached.
exports = item.get("exports")
if exports != None: return item
# Calculcate recursively.
parents.append(name)
imports = set()
exports = set()
system_symbols = item.get("system_symbols")
if system_symbols == None: system_symbols = item["system_symbols"] = set()
files = item.get("files")
if files:
for file_name in files:
obj_file = _obj_files[file_name]
imports |= obj_file["imports"]
exports |= obj_file["exports"]
imports -= exports | _ignored_symbols
deps = item.get("deps")
if deps:
for dep in deps:
dep_item = _Resolve(dep, parents)
# Detect whether this item needs to depend on dep,
# except when this item has no files, that is, when it is just
# a deliberate umbrella group or library.
dep_exports = dep_item["exports"]
dep_system_symbols = dep_item["system_symbols"]
if files and imports.isdisjoint(dep_exports) and imports.isdisjoint(dep_system_symbols):
print "Info: %s %s does not need to depend on %s\n" % (item_type, name, dep)
# We always include the dependency's exports, even if we do not need them
# to satisfy local imports.
exports |= dep_exports
system_symbols |= dep_system_symbols
item["exports"] = exports
item["system_symbols"] = system_symbols
imports -= exports | system_symbols
for symbol in imports:
for file_name in files:
if symbol in _obj_files[file_name]["imports"]:
neededFile = _symbols_to_files.get(symbol)
if neededFile in dependencies.file_to_item:
neededItem = "but %s does not depend on %s (for %s)" % (name, dependencies.file_to_item[neededFile], neededFile)
else:
neededItem = "- is this a new system symbol?"
sys.stderr.write("Error: in %s %s: %s imports %s %s\n" %
(item_type, name, file_name, symbol, neededItem))
_return_value = 1
del parents[-1]
return item
def Process(root_path):
"""Loads dependencies.txt, reads the libraries' .o files, and processes them.
Modifies dependencies.items: Recursively builds each item's system_symbols and exports.
"""
global _ignored_symbols, _obj_files, _return_value
global _virtual_classes, _weak_destructors
dependencies.Load()
for name_and_item in dependencies.items.iteritems():
name = name_and_item[0]
item = name_and_item[1]
system_symbols = item.get("system_symbols")
if system_symbols:
for symbol in system_symbols:
_symbols_to_files[symbol] = name
for library_name in dependencies.libraries:
_ReadLibrary(root_path, library_name)
o_files_set = set(_obj_files.keys())
files_missing_from_deps = o_files_set - dependencies.files
files_missing_from_build = dependencies.files - o_files_set
if files_missing_from_deps:
sys.stderr.write("Error: files missing from dependencies.txt:\n%s\n" %
sorted(files_missing_from_deps))
_return_value = 1
if files_missing_from_build:
sys.stderr.write("Error: files in dependencies.txt but not built:\n%s\n" %
sorted(files_missing_from_build))
_return_value = 1
if not _return_value:
for library_name in dependencies.libraries:
_Resolve(library_name, [])
if not _return_value:
virtual_classes_with_weak_destructors = _virtual_classes & _weak_destructors
if virtual_classes_with_weak_destructors:
sys.stderr.write("Error: Some classes have virtual methods, and "
"an implicit or inline destructor "
"(see ICU ticket #8454 for details):\n%s\n" %
sorted(virtual_classes_with_weak_destructors))
_return_value = 1
def main():
global _return_value
if len(sys.argv) <= 1:
sys.exit(("Command line error: " +
"need one argument with the root path to the built ICU libraries/*.o files."))
Process(sys.argv[1])
if _ignored_symbols:
print "Info: ignored symbols:\n%s" % sorted(_ignored_symbols)
if not _return_value:
print "OK: Specified and actual dependencies match."
else:
print "Error: There were errors, please fix them and re-run. Processing may have terminated abnormally."
return _return_value
if __name__ == "__main__":
sys.exit(main())
|
mit
|
linjoahow/cd0505
|
static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/__init__.py
|
603
|
6082
|
## pygame - Python Game Library
## Copyright (C) 2000-2001 Pete Shinners
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## [email protected]
'''Top-level Pygame module.
Pygame is a set of Python modules designed for writing games.
It is written on top of the excellent SDL library. This allows you
to create fully featured games and multimedia programs in the Python
language. The package is highly portable, with games running on
Windows, MacOS, OS X, BeOS, FreeBSD, IRIX, and Linux.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import os
import sys
class MissingModule:
def __init__(self, name, info='', urgent=0):
self.name = name
self.info = str(info)
self.urgent = urgent
if urgent:
self.warn()
def __getattr__(self, var):
if not self.urgent:
self.warn()
self.urgent = 1
MissingPygameModule = "%s module not available" % self.name
raise NotImplementedError(MissingPygameModule)
def __nonzero__(self):
return 0
def warn(self):
if self.urgent: type = 'import'
else: type = 'use'
message = '%s %s: %s' % (type, self.name, self.info)
try:
import warnings
if self.urgent: level = 4
else: level = 3
warnings.warn(message, RuntimeWarning, level)
except ImportError:
print(message)
#we need to import like this, each at a time. the cleanest way to import
#our modules is with the import command (not the __import__ function)
#first, the "required" modules
#from pygame.array import * #brython fix me
from pygame.base import *
from pygame.constants import *
from pygame.version import *
from pygame.rect import Rect
import pygame.color
Color = pygame.color.Color
__version__ = ver
#added by earney
from . import time
from . import display
from . import constants
from . import event
from . import font
from . import mixer
from . import sprite
from .surface import Surface
from . import image
from . import mouse
from . import transform
#next, the "standard" modules
#we still allow them to be missing for stripped down pygame distributions
'''
try: import pygame.cdrom
except (ImportError,IOError), msg:cdrom=MissingModule("cdrom", msg, 1)
try: import pygame.cursors
except (ImportError,IOError), msg:cursors=MissingModule("cursors", msg, 1)
try: import pygame.display
except (ImportError,IOError), msg:display=MissingModule("display", msg, 1)
try: import pygame.draw
except (ImportError,IOError), msg:draw=MissingModule("draw", msg, 1)
try: import pygame.event
except (ImportError,IOError), msg:event=MissingModule("event", msg, 1)
try: import pygame.image
except (ImportError,IOError), msg:image=MissingModule("image", msg, 1)
try: import pygame.joystick
except (ImportError,IOError), msg:joystick=MissingModule("joystick", msg, 1)
try: import pygame.key
except (ImportError,IOError), msg:key=MissingModule("key", msg, 1)
try: import pygame.mouse
except (ImportError,IOError), msg:mouse=MissingModule("mouse", msg, 1)
try: import pygame.sprite
except (ImportError,IOError), msg:sprite=MissingModule("sprite", msg, 1)
try: from pygame.surface import Surface
except (ImportError,IOError):Surface = lambda:Missing_Function
try: from pygame.overlay import Overlay
except (ImportError,IOError):Overlay = lambda:Missing_Function
try: import pygame.time
except (ImportError,IOError), msg:time=MissingModule("time", msg, 1)
try: import pygame.transform
except (ImportError,IOError), msg:transform=MissingModule("transform", msg, 1)
#lastly, the "optional" pygame modules
try:
import pygame.font
import pygame.sysfont
pygame.font.SysFont = pygame.sysfont.SysFont
pygame.font.get_fonts = pygame.sysfont.get_fonts
pygame.font.match_font = pygame.sysfont.match_font
except (ImportError,IOError), msg:font=MissingModule("font", msg, 0)
try: import pygame.mixer
except (ImportError,IOError), msg:mixer=MissingModule("mixer", msg, 0)
#try: import pygame.movie
#except (ImportError,IOError), msg:movie=MissingModule("movie", msg, 0)
#try: import pygame.movieext
#except (ImportError,IOError), msg:movieext=MissingModule("movieext", msg, 0)
try: import pygame.surfarray
except (ImportError,IOError), msg:surfarray=MissingModule("surfarray", msg, 0)
try: import pygame.sndarray
except (ImportError,IOError), msg:sndarray=MissingModule("sndarray", msg, 0)
#try: import pygame.fastevent
#except (ImportError,IOError), msg:fastevent=MissingModule("fastevent", msg, 0)
#there's also a couple "internal" modules not needed
#by users, but putting them here helps "dependency finder"
#programs get everything they need (like py2exe)
try: import pygame.imageext; del pygame.imageext
except (ImportError,IOError):pass
try: import pygame.mixer_music; del pygame.mixer_music
except (ImportError,IOError):pass
def packager_imports():
"""
Some additional things that py2app/py2exe will want to see
"""
import OpenGL.GL
'''
#make Rects pickleable
import copyreg
def __rect_constructor(x,y,w,h):
return Rect(x,y,w,h)
def __rect_reduce(r):
assert type(r) == Rect
return __rect_constructor, (r.x, r.y, r.w, r.h)
copyreg.pickle(Rect, __rect_reduce, __rect_constructor)
#cleanup namespace
del pygame, os, sys, #TODO rwobject, surflock, MissingModule, copy_reg
|
agpl-3.0
|
ralphcallaway/MavensMate-SublimeText
|
lib/command_helper.py
|
8
|
3236
|
def get_message(params, operation):
message = 'Handling requested operation...'
if operation == 'new-metadata':
message = 'Opening New Metadata UI'
elif operation == 'compile-metadata':
if 'paths' in params and len(params['paths']) == 1:
what = params['paths'][0]
if '/' in what:
what = what.split('/')[-1]
message = 'Compiling: ' + what
else:
message = 'Compiling Selected Metadata'
elif operation == 'compile-project':
message = 'Compiling Project'
elif operation == 'edit-project':
message = 'Opening Edit Project dialog'
elif operation == 'run-tests':
if 'selected' in params and len(params['selected']) == 1:
message = "Running Apex Test for " + params['selected'][0]
else:
message = 'Opening Apex Test Runner'
elif operation == 'clean-project':
message = 'Cleaning Project'
elif operation == 'deploy':
message = 'Opening Deploy dialog'
elif operation == 'execute-apex':
message = 'Opening Execute Apex dialog'
elif operation == 'upgrade-project':
message = 'Your MavensMate project needs to be upgraded. Opening the upgrade UI.'
elif operation == 'index-metadata':
message = 'Indexing Metadata'
elif operation == 'delete-metadata':
if 'paths' in params and len(params['paths']) == 1:
what = params['paths'][0]
if '/' in what:
what = what.split('/')[-1]
message = 'Deleting: ' + what
else:
message = 'Deleting Selected Metadata'
elif operation == 'refresh-metadata':
if 'paths' in params and len(params['paths']) == 1:
what = params['paths'][0]
if '/' in what:
what = what.split('/')[-1]
message = 'Refreshing: ' + what
else:
message = 'Refreshing Selected Metadata'
elif operation == 'open-metadata':
message = 'Opening Selected Metadata'
elif operation == 'start-logging':
message = 'Started logging for user ids specified in config/.debug'
elif operation == 'stop-logging':
message = 'Stopped logging for user ids specified in config/.debug'
elif operation == 'fetch-logs':
message = 'Fetching Apex Logs (will be placed in project/debug/logs)'
elif operation == 'import-project':
message = 'Opening New Project Dialog'
elif operation == 'index-apex':
message = 'Indexing Project Apex Metadata'
elif operation == 'test-async':
if 'classes' in params and len(params['classes']) == 1:
what = params['classes'][0]
if '/' in what:
what = what.split('/')[-1]
message = 'Running Apex unit tests for: ' + what
else:
message = 'Running Apex unit tests for this class...'
elif operation == 'run-apex-script':
message = 'Running Apex script (logs can be found in project/apex-scripts/log)'
elif operation == 'new-apex-script':
message = 'Creating new Apex script'
elif operation == 'run-all-tests':
message = 'Running all tests...'
return message
|
gpl-3.0
|
r0balo/pelisalacarta
|
python/main-classic/servers/thevideome.py
|
2
|
2392
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para thevideo.me
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import re
from core import httptools
from core import logger
from core import scrapertools
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "File was deleted" in data or "Page Cannot Be Found" in data:
return False, "[thevideo.me] El archivo ha sido eliminado o no existe"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
if not "embed" in page_url:
page_url = page_url.replace("http://thevideo.me/", "http://thevideo.me/embed-") + ".html"
data = httptools.downloadpage(page_url).data
mpri_Key = scrapertools.find_single_match(data, "mpri_Key='([^']+)'")
data_vt = httptools.downloadpage("http://thevideo.me/jwv/%s" % mpri_Key).data
vt = scrapertools.find_single_match(data_vt, 'function\|([^\|]+)\|')
if "fallback" in vt:
vt = scrapertools.find_single_match(data_vt, 'jwConfig\|([^\|]+)\|')
media_urls = scrapertools.find_multiple_matches(data, '\{"file"\s*\:\s*"([^"]+)"\s*,\s*"label"\s*\:\s*"([^"]+)"')
video_urls = []
for media_url, label in media_urls:
media_url += "?direct=false&ua=1&vt=%s" % vt
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:]+" ("+label+") [thevideo.me]", media_url])
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
# Añade manualmente algunos erróneos para evitarlos
encontrados = set()
devuelve = []
patronvideos = 'thevideo.me/(?:embed-|)([A-z0-9]+)'
logger.info("#" + patronvideos + "#")
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match in matches:
titulo = "[thevideo.me]"
url = "http://thevideo.me/embed-" + match + ".html"
if url not in encontrados:
logger.info(" url=" + url)
devuelve.append([titulo, url, 'thevideome'])
encontrados.add(url)
else:
logger.info(" url duplicada=" + url)
return devuelve
|
gpl-3.0
|
joshivineet/googletest
|
test/gtest_env_var_test.py
|
2408
|
3487
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
if __name__ == '__main__':
gtest_test_utils.Main()
|
bsd-3-clause
|
eduNEXT/edunext-platform
|
lms/djangoapps/instructor_task/tests/test_tasks.py
|
2
|
29965
|
"""
Unit tests for LMS instructor-initiated background tasks.
Runs tasks on answers to course problems to validate that code
paths actually work.
"""
import json
from functools import partial
from uuid import uuid4
import ddt
from celery.states import FAILURE, SUCCESS
from django.utils.translation import ugettext_noop
from mock import MagicMock, Mock, patch
from opaque_keys.edx.keys import i4xEncoder
from six.moves import range
from course_modes.models import CourseMode
from lms.djangoapps.courseware.models import StudentModule
from lms.djangoapps.courseware.tests.factories import StudentModuleFactory
from lms.djangoapps.instructor_task.exceptions import UpdateProblemModuleStateError
from lms.djangoapps.instructor_task.models import InstructorTask
from lms.djangoapps.instructor_task.tasks import (
delete_problem_state,
export_ora2_data,
generate_certificates,
override_problem_score,
rescore_problem,
reset_problem_attempts
)
from lms.djangoapps.instructor_task.tasks_helper.misc import upload_ora2_data
from lms.djangoapps.instructor_task.tests.factories import InstructorTaskFactory
from lms.djangoapps.instructor_task.tests.test_base import InstructorTaskModuleTestCase
from xmodule.modulestore.exceptions import ItemNotFoundError
PROBLEM_URL_NAME = "test_urlname"
class TestTaskFailure(Exception):
"""
An example exception to indicate failure of a mocked task.
"""
pass
class TestInstructorTasks(InstructorTaskModuleTestCase):
"""
Ensure tasks behave as expected.
"""
def setUp(self):
super(TestInstructorTasks, self).setUp()
self.initialize_course()
self.instructor = self.create_instructor('instructor')
self.location = self.problem_location(PROBLEM_URL_NAME)
def _create_input_entry(
self, student_ident=None, use_problem_url=True, course_id=None, only_if_higher=False, score=None
):
"""Creates a InstructorTask entry for testing."""
task_id = str(uuid4())
task_input = {'only_if_higher': only_if_higher}
if use_problem_url:
task_input['problem_url'] = self.location
if student_ident is not None:
task_input['student'] = student_ident
if score is not None:
task_input['score'] = score
course_id = course_id or self.course.id
instructor_task = InstructorTaskFactory.create(
course_id=course_id,
requester=self.instructor,
task_input=json.dumps(task_input, cls=i4xEncoder),
task_key='dummy value',
task_id=task_id
)
return instructor_task
def _get_xmodule_instance_args(self):
"""
Calculate dummy values for parameters needed for instantiating xmodule instances.
"""
return {
'xqueue_callback_url_prefix': 'dummy_value',
'request_info': {
'username': 'dummy_username',
'user_id': 'dummy_id',
},
}
def _run_task_with_mock_celery(self, task_class, entry_id, task_id, expected_failure_message=None):
"""Submit a task and mock how celery provides a current_task."""
self.current_task = Mock()
self.current_task.request = Mock()
self.current_task.request.id = task_id
self.current_task.update_state = Mock()
if expected_failure_message is not None:
self.current_task.update_state.side_effect = TestTaskFailure(expected_failure_message)
task_args = [entry_id, self._get_xmodule_instance_args()]
with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task') as mock_get_task:
mock_get_task.return_value = self.current_task
return task_class.apply(task_args, task_id=task_id).get()
def _test_missing_current_task(self, task_class):
"""Check that a task_class fails when celery doesn't provide a current_task."""
task_entry = self._create_input_entry()
with self.assertRaises(ValueError):
task_class(task_entry.id, self._get_xmodule_instance_args())
def _test_undefined_course(self, task_class):
"""Run with celery, but with no course defined."""
task_entry = self._create_input_entry(course_id="bogus/course/id")
with self.assertRaises(ItemNotFoundError):
self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id)
def _test_undefined_problem(self, task_class):
"""Run with celery, but no problem defined."""
task_entry = self._create_input_entry()
with self.assertRaises(ItemNotFoundError):
self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id)
def _test_run_with_task(self, task_class, action_name, expected_num_succeeded,
expected_num_skipped=0, expected_attempted=0, expected_total=0):
"""Run a task and check the number of StudentModules processed."""
task_entry = self._create_input_entry()
status = self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id)
expected_attempted = expected_attempted \
if expected_attempted else expected_num_succeeded + expected_num_skipped
expected_total = expected_total \
if expected_total else expected_num_succeeded + expected_num_skipped
# check return value
self.assertEqual(status.get('attempted'), expected_attempted)
self.assertEqual(status.get('succeeded'), expected_num_succeeded)
self.assertEqual(status.get('skipped'), expected_num_skipped)
self.assertEqual(status.get('total'), expected_total)
self.assertEqual(status.get('action_name'), action_name)
self.assertGreater(status.get('duration_ms'), 0)
# compare with entry in table:
entry = InstructorTask.objects.get(id=task_entry.id)
self.assertEqual(json.loads(entry.task_output), status)
self.assertEqual(entry.task_state, SUCCESS)
def _test_run_with_no_state(self, task_class, action_name):
"""Run with no StudentModules defined for the current problem."""
self.define_option_problem(PROBLEM_URL_NAME)
self._test_run_with_task(task_class, action_name, 0)
def _create_students_with_state(self, num_students, state=None, grade=0, max_grade=1):
"""Create students, a problem, and StudentModule objects for testing"""
self.define_option_problem(PROBLEM_URL_NAME)
enrolled_students = self._create_and_enroll_students(num_students)
for student in enrolled_students:
StudentModuleFactory.create(
course_id=self.course.id,
module_state_key=self.location,
student=student,
grade=grade,
max_grade=max_grade,
state=state
)
return enrolled_students
def _create_and_enroll_students(self, num_students, mode=CourseMode.DEFAULT_MODE_SLUG):
"""Create & enroll students for testing"""
return [
self.create_student(username='robot%d' % i, email='robot+test+%[email protected]' % i, mode=mode)
for i in range(num_students)
]
def _create_students_with_no_state(self, num_students):
"""Create students and a problem for testing"""
self.define_option_problem(PROBLEM_URL_NAME)
enrolled_students = self._create_and_enroll_students(num_students)
return enrolled_students
def _assert_num_attempts(self, students, num_attempts):
"""Check the number attempts for all students is the same"""
for student in students:
module = StudentModule.objects.get(course_id=self.course.id,
student=student,
module_state_key=self.location)
state = json.loads(module.state)
self.assertEqual(state['attempts'], num_attempts)
def _test_run_with_failure(self, task_class, expected_message):
"""Run a task and trigger an artificial failure with the given message."""
task_entry = self._create_input_entry()
self.define_option_problem(PROBLEM_URL_NAME)
with self.assertRaises(TestTaskFailure):
self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id, expected_message)
# compare with entry in table:
entry = InstructorTask.objects.get(id=task_entry.id)
self.assertEqual(entry.task_state, FAILURE)
output = json.loads(entry.task_output)
self.assertEqual(output['exception'], 'TestTaskFailure')
self.assertEqual(output['message'], expected_message)
def _test_run_with_long_error_msg(self, task_class):
"""
Run with an error message that is so long it will require
truncation (as well as the jettisoning of the traceback).
"""
task_entry = self._create_input_entry()
self.define_option_problem(PROBLEM_URL_NAME)
expected_message = "x" * 1500
with self.assertRaises(TestTaskFailure):
self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id, expected_message)
# compare with entry in table:
entry = InstructorTask.objects.get(id=task_entry.id)
self.assertEqual(entry.task_state, FAILURE)
self.assertGreater(1023, len(entry.task_output))
output = json.loads(entry.task_output)
self.assertEqual(output['exception'], 'TestTaskFailure')
self.assertEqual(output['message'], expected_message[:len(output['message']) - 3] + "...")
self.assertNotIn('traceback', output)
def _test_run_with_short_error_msg(self, task_class):
"""
Run with an error message that is short enough to fit
in the output, but long enough that the traceback won't.
Confirm that the traceback is truncated.
"""
task_entry = self._create_input_entry()
self.define_option_problem(PROBLEM_URL_NAME)
expected_message = "x" * 900
with self.assertRaises(TestTaskFailure):
self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id, expected_message)
# compare with entry in table:
entry = InstructorTask.objects.get(id=task_entry.id)
self.assertEqual(entry.task_state, FAILURE)
self.assertGreater(1023, len(entry.task_output))
output = json.loads(entry.task_output)
self.assertEqual(output['exception'], 'TestTaskFailure')
self.assertEqual(output['message'], expected_message)
self.assertEqual(output['traceback'][-3:], "...")
class TestOverrideScoreInstructorTask(TestInstructorTasks):
"""Tests instructor task to override learner's problem score"""
def assert_task_output(self, output, **expected_output):
"""
Check & compare output of the task
"""
self.assertEqual(output.get('total'), expected_output.get('total'))
self.assertEqual(output.get('attempted'), expected_output.get('attempted'))
self.assertEqual(output.get('succeeded'), expected_output.get('succeeded'))
self.assertEqual(output.get('skipped'), expected_output.get('skipped'))
self.assertEqual(output.get('failed'), expected_output.get('failed'))
self.assertEqual(output.get('action_name'), expected_output.get('action_name'))
self.assertGreater(output.get('duration_ms'), expected_output.get('duration_ms', 0))
def get_task_output(self, task_id):
"""Get and load instructor task output"""
entry = InstructorTask.objects.get(id=task_id)
return json.loads(entry.task_output)
def test_override_missing_current_task(self):
self._test_missing_current_task(override_problem_score)
def test_override_undefined_course(self):
"""Tests that override problem score raises exception with undefined course"""
self._test_undefined_course(override_problem_score)
def test_override_undefined_problem(self):
"""Tests that override problem score raises exception with undefined problem"""
self._test_undefined_problem(override_problem_score)
def test_override_with_no_state(self):
"""Tests override score with no problem state in StudentModule"""
self._test_run_with_no_state(override_problem_score, 'overridden')
def test_override_with_failure(self):
self._test_run_with_failure(override_problem_score, 'We expected this to fail')
def test_override_with_long_error_msg(self):
self._test_run_with_long_error_msg(override_problem_score)
def test_override_with_short_error_msg(self):
self._test_run_with_short_error_msg(override_problem_score)
def test_overriding_non_scorable(self):
"""
Tests that override problem score raises an error if module descriptor has not `set_score` method.
"""
input_state = json.dumps({'done': True})
num_students = 1
self._create_students_with_state(num_students, input_state)
task_entry = self._create_input_entry(score=0)
mock_instance = MagicMock()
del mock_instance.set_score
with patch(
'lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal'
) as mock_get_module:
mock_get_module.return_value = mock_instance
with self.assertRaises(UpdateProblemModuleStateError):
self._run_task_with_mock_celery(override_problem_score, task_entry.id, task_entry.task_id)
# check values stored in table:
entry = InstructorTask.objects.get(id=task_entry.id)
output = json.loads(entry.task_output)
self.assertEqual(output['exception'], "UpdateProblemModuleStateError")
self.assertEqual(output['message'], "Scores cannot be overridden for this problem type.")
self.assertGreater(len(output['traceback']), 0)
def test_overriding_unaccessable(self):
"""
Tests score override for a problem in a course, for all students fails if user has answered a
problem to which user does not have access to.
"""
input_state = json.dumps({'done': True})
num_students = 1
self._create_students_with_state(num_students, input_state)
task_entry = self._create_input_entry(score=0)
with patch('lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal',
return_value=None):
self._run_task_with_mock_celery(override_problem_score, task_entry.id, task_entry.task_id)
self.assert_task_output(
output=self.get_task_output(task_entry.id),
total=num_students,
attempted=num_students,
succeeded=0,
skipped=0,
failed=num_students,
action_name='overridden'
)
def test_overriding_success(self):
"""
Tests score override for a problem in a course, for all students succeeds.
"""
mock_instance = MagicMock()
getattr(mock_instance, 'override_problem_score').return_value = None
num_students = 10
self._create_students_with_state(num_students)
task_entry = self._create_input_entry(score=0)
with patch(
'lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal'
) as mock_get_module:
mock_get_module.return_value = mock_instance
mock_instance.max_score = MagicMock(return_value=99999.0)
mock_instance.weight = 99999.0
self._run_task_with_mock_celery(override_problem_score, task_entry.id, task_entry.task_id)
self.assert_task_output(
output=self.get_task_output(task_entry.id),
total=num_students,
attempted=num_students,
succeeded=num_students,
skipped=0,
failed=0,
action_name='overridden'
)
def test_overriding_success_with_no_state(self):
"""
Tests that score override is successful for a learner when they have no state.
"""
num_students = 1
enrolled_students = self._create_students_with_no_state(num_students=num_students)
task_entry = self._create_input_entry(score=1, student_ident=enrolled_students[0].username)
self._run_task_with_mock_celery(override_problem_score, task_entry.id, task_entry.task_id)
self.assert_task_output(
output=self.get_task_output(task_entry.id),
total=num_students,
attempted=num_students,
succeeded=num_students,
skipped=0,
failed=0,
action_name='overridden'
)
@ddt.ddt
class TestRescoreInstructorTask(TestInstructorTasks):
"""Tests problem-rescoring instructor task."""
def assert_task_output(self, output, **expected_output):
"""
Check & compare output of the task
"""
self.assertEqual(output.get('total'), expected_output.get('total'))
self.assertEqual(output.get('attempted'), expected_output.get('attempted'))
self.assertEqual(output.get('succeeded'), expected_output.get('succeeded'))
self.assertEqual(output.get('skipped'), expected_output.get('skipped'))
self.assertEqual(output.get('failed'), expected_output.get('failed'))
self.assertEqual(output.get('action_name'), expected_output.get('action_name'))
self.assertGreater(output.get('duration_ms'), expected_output.get('duration_ms', 0))
def get_task_output(self, task_id):
"""Get and load instructor task output"""
entry = InstructorTask.objects.get(id=task_id)
return json.loads(entry.task_output)
def test_rescore_missing_current_task(self):
self._test_missing_current_task(rescore_problem)
def test_rescore_undefined_course(self):
self._test_undefined_course(rescore_problem)
def test_rescore_undefined_problem(self):
self._test_undefined_problem(rescore_problem)
def test_rescore_with_no_state(self):
self._test_run_with_no_state(rescore_problem, 'rescored')
def test_rescore_with_failure(self):
self._test_run_with_failure(rescore_problem, 'We expected this to fail')
def test_rescore_with_long_error_msg(self):
self._test_run_with_long_error_msg(rescore_problem)
def test_rescore_with_short_error_msg(self):
self._test_run_with_short_error_msg(rescore_problem)
def test_rescoring_unrescorable(self):
input_state = json.dumps({'done': True})
num_students = 1
self._create_students_with_state(num_students, input_state)
task_entry = self._create_input_entry()
mock_instance = MagicMock()
del mock_instance.rescore_problem
del mock_instance.rescore
with patch('lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal') as mock_get_module:
mock_get_module.return_value = mock_instance
with self.assertRaises(UpdateProblemModuleStateError):
self._run_task_with_mock_celery(rescore_problem, task_entry.id, task_entry.task_id)
# check values stored in table:
entry = InstructorTask.objects.get(id=task_entry.id)
output = json.loads(entry.task_output)
self.assertEqual(output['exception'], "UpdateProblemModuleStateError")
self.assertEqual(output['message'], u"Specified module {0} of type {1} does not support rescoring.".format(
self.location,
mock_instance.__class__,
))
self.assertGreater(len(output['traceback']), 0)
def test_rescoring_unaccessable(self):
"""
Tests rescores a problem in a course, for all students fails if user has answered a
problem to which user does not have access to.
"""
input_state = json.dumps({'done': True})
num_students = 1
self._create_students_with_state(num_students, input_state)
task_entry = self._create_input_entry()
with patch('lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal', return_value=None):
self._run_task_with_mock_celery(rescore_problem, task_entry.id, task_entry.task_id)
self.assert_task_output(
output=self.get_task_output(task_entry.id),
total=num_students,
attempted=num_students,
succeeded=0,
skipped=0,
failed=num_students,
action_name='rescored'
)
def test_rescoring_success(self):
"""
Tests rescores a problem in a course, for all students succeeds.
"""
mock_instance = MagicMock()
getattr(mock_instance, 'rescore').return_value = None
mock_instance.has_submitted_answer.return_value = True
del mock_instance.done # old CAPA code used to use this value so we delete it here to be sure
num_students = 10
self._create_students_with_state(num_students)
task_entry = self._create_input_entry()
with patch(
'lms.djangoapps.instructor_task.tasks_helper.module_state.get_module_for_descriptor_internal'
) as mock_get_module:
mock_get_module.return_value = mock_instance
self._run_task_with_mock_celery(rescore_problem, task_entry.id, task_entry.task_id)
self.assert_task_output(
output=self.get_task_output(task_entry.id),
total=num_students,
attempted=num_students,
succeeded=num_students,
skipped=0,
failed=0,
action_name='rescored'
)
class TestResetAttemptsInstructorTask(TestInstructorTasks):
"""Tests instructor task that resets problem attempts."""
def test_reset_missing_current_task(self):
self._test_missing_current_task(reset_problem_attempts)
def test_reset_undefined_course(self):
self._test_undefined_course(reset_problem_attempts)
def test_reset_undefined_problem(self):
self._test_undefined_problem(reset_problem_attempts)
def test_reset_with_no_state(self):
self._test_run_with_no_state(reset_problem_attempts, 'reset')
def test_reset_with_failure(self):
self._test_run_with_failure(reset_problem_attempts, 'We expected this to fail')
def test_reset_with_long_error_msg(self):
self._test_run_with_long_error_msg(reset_problem_attempts)
def test_reset_with_short_error_msg(self):
self._test_run_with_short_error_msg(reset_problem_attempts)
def test_reset_with_some_state(self):
initial_attempts = 3
input_state = json.dumps({'attempts': initial_attempts})
num_students = 10
students = self._create_students_with_state(num_students, input_state)
# check that entries were set correctly
self._assert_num_attempts(students, initial_attempts)
# run the task
self._test_run_with_task(reset_problem_attempts, 'reset', num_students)
# check that entries were reset
self._assert_num_attempts(students, 0)
def test_reset_with_zero_attempts(self):
initial_attempts = 0
input_state = json.dumps({'attempts': initial_attempts})
num_students = 10
students = self._create_students_with_state(num_students, input_state)
# check that entries were set correctly
self._assert_num_attempts(students, initial_attempts)
# run the task
self._test_run_with_task(reset_problem_attempts, 'reset', 0, expected_num_skipped=num_students)
# check that entries were reset
self._assert_num_attempts(students, 0)
def _test_reset_with_student(self, use_email):
"""Run a reset task for one student, with several StudentModules for the problem defined."""
num_students = 10
initial_attempts = 3
input_state = json.dumps({'attempts': initial_attempts})
students = self._create_students_with_state(num_students, input_state)
# check that entries were set correctly
for student in students:
module = StudentModule.objects.get(course_id=self.course.id,
student=student,
module_state_key=self.location)
state = json.loads(module.state)
self.assertEqual(state['attempts'], initial_attempts)
if use_email:
student_ident = students[3].email
else:
student_ident = students[3].username
task_entry = self._create_input_entry(student_ident)
status = self._run_task_with_mock_celery(reset_problem_attempts, task_entry.id, task_entry.task_id)
# check return value
self.assertEqual(status.get('attempted'), 1)
self.assertEqual(status.get('succeeded'), 1)
self.assertEqual(status.get('total'), 1)
self.assertEqual(status.get('action_name'), 'reset')
self.assertGreater(status.get('duration_ms'), 0)
# compare with entry in table:
entry = InstructorTask.objects.get(id=task_entry.id)
self.assertEqual(json.loads(entry.task_output), status)
self.assertEqual(entry.task_state, SUCCESS)
# check that the correct entry was reset
for index, student in enumerate(students):
module = StudentModule.objects.get(course_id=self.course.id,
student=student,
module_state_key=self.location)
state = json.loads(module.state)
if index == 3:
self.assertEqual(state['attempts'], 0)
else:
self.assertEqual(state['attempts'], initial_attempts)
def test_reset_with_student_username(self):
self._test_reset_with_student(False)
def test_reset_with_student_email(self):
self._test_reset_with_student(True)
class TestDeleteStateInstructorTask(TestInstructorTasks):
"""Tests instructor task that deletes problem state."""
def test_delete_missing_current_task(self):
self._test_missing_current_task(delete_problem_state)
def test_delete_undefined_course(self):
self._test_undefined_course(delete_problem_state)
def test_delete_undefined_problem(self):
self._test_undefined_problem(delete_problem_state)
def test_delete_with_no_state(self):
self._test_run_with_no_state(delete_problem_state, 'deleted')
def test_delete_with_failure(self):
self._test_run_with_failure(delete_problem_state, 'We expected this to fail')
def test_delete_with_long_error_msg(self):
self._test_run_with_long_error_msg(delete_problem_state)
def test_delete_with_short_error_msg(self):
self._test_run_with_short_error_msg(delete_problem_state)
def test_delete_with_some_state(self):
# This will create StudentModule entries -- we don't have to worry about
# the state inside them.
num_students = 10
students = self._create_students_with_state(num_students)
# check that entries were created correctly
for student in students:
StudentModule.objects.get(course_id=self.course.id,
student=student,
module_state_key=self.location)
self._test_run_with_task(delete_problem_state, 'deleted', num_students)
# confirm that no state can be found anymore:
for student in students:
with self.assertRaises(StudentModule.DoesNotExist):
StudentModule.objects.get(course_id=self.course.id,
student=student,
module_state_key=self.location)
class TestCertificateGenerationnstructorTask(TestInstructorTasks):
"""Tests instructor task that generates student certificates."""
def test_generate_certificates_missing_current_task(self):
"""
Test error is raised when certificate generation task run without current task
"""
self._test_missing_current_task(generate_certificates)
def test_generate_certificates_task_run(self):
"""
Test certificate generation task run without any errors
"""
self._test_run_with_task(
generate_certificates,
'certificates generated',
0,
0,
expected_attempted=1,
expected_total=1
)
class TestOra2ResponsesInstructorTask(TestInstructorTasks):
"""Tests instructor task that fetches ora2 response data."""
def test_ora2_missing_current_task(self):
self._test_missing_current_task(export_ora2_data)
def test_ora2_with_failure(self):
self._test_run_with_failure(export_ora2_data, 'We expected this to fail')
def test_ora2_with_long_error_msg(self):
self._test_run_with_long_error_msg(export_ora2_data)
def test_ora2_with_short_error_msg(self):
self._test_run_with_short_error_msg(export_ora2_data)
def test_ora2_runs_task(self):
task_entry = self._create_input_entry()
task_xmodule_args = self._get_xmodule_instance_args()
with patch('lms.djangoapps.instructor_task.tasks.run_main_task') as mock_main_task:
export_ora2_data(task_entry.id, task_xmodule_args)
action_name = ugettext_noop('generated')
assert mock_main_task.call_count == 1
args = mock_main_task.call_args[0]
assert args[0] == task_entry.id
assert callable(args[1])
assert args[2] == action_name
|
agpl-3.0
|
ebewe/mangopay2-python-sdk
|
mangopaysdk/tools/authenticationHelper.py
|
2
|
1347
|
from requests.auth import HTTPBasicAuth
from requests_oauthlib import OAuth2
from mangopaysdk.tools import enums
from mangopaysdk.configuration import Configuration
class AuthenticationHelper:
# Root/parent MangoPayApi instance that holds the OAuthToken and Configuration instance
_root = None
def __init__ (self, root):
"""Constructor.
param MangoPayApi Root/parent instance that holds the OAuthToken and Configuration instance
"""
self._root = root
def GetRequestAuthObject(self, authRequired):
"""Get HTTP header value with authorization string.
param authRequired - if False force basic auth
return string Authorization string
"""
if authRequired == False: # or self._root.Config.AuthenticationType == enums.AuthenticationType.Basic:
return HTTPBasicAuth(self._root.Config.ClientID, self._root.Config.ClientPassword)
else:
oauth = self._root.OAuthTokenManager.GetToken()
if not oauth or not oauth.access_token or not oauth.token_type:
raise Exception('OAuthToken is not created (or is invalid) for strong authentication')
token = {'access_token' : oauth.access_token, 'token_type' : oauth.token_type}
return OAuth2(token = token, client_id = self._root.Config.ClientID)
|
mit
|
ogenstad/ansible
|
lib/ansible/modules/network/aireos/aireos_command.py
|
73
|
6929
|
#!/usr/bin/python
#
# Copyright: Ansible Team
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: aireos_command
version_added: "2.4"
author: "James Mighion (@jmighion)"
short_description: Run commands on remote devices running Cisco WLC
description:
- Sends arbitrary commands to an aireos node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(aireos_config) to configure WLC devices.
extends_documentation_fragment: aireos
options:
commands:
description:
- List of commands to send to the remote aireos device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of retries, the task fails.
See examples.
aliases: ['waitfor']
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the wait_for must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
default: all
choices: ['any', 'all']
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
default: 1
"""
EXAMPLES = """
tasks:
- name: run show sysinfo on remote devices
aireos_command:
commands: show sysinfo
- name: run show sysinfo and check to see if output contains Cisco Controller
aireos_command:
commands: show sysinfo
wait_for: result[0] contains 'Cisco Controller'
- name: run multiple commands on remote nodes
aireos_command:
commands:
- show sysinfo
- show interface summary
- name: run multiple commands and evaluate the output
aireos_command:
commands:
- show sysinfo
- show interface summary
wait_for:
- result[0] contains Cisco Controller
- result[1] contains Loopback0
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.network.aireos.aireos import run_commands
from ansible.module_utils.network.aireos.aireos import aireos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import ComplexList
from ansible.module_utils.network.common.parsing import Conditional
from ansible.module_utils.six import string_types
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
yield item
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
prompt=dict(),
answer=dict()
), module)
commands = command(module.params['commands'])
for index, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('show'):
warnings.append(
'only show commands are supported when using check mode, not '
'executing `%s`' % item['command']
)
elif item['command'].startswith('conf'):
module.fail_json(
msg='aireos_command does not support running config mode '
'commands. Please use aireos_config instead'
)
return commands
def main():
"""main entry point for module execution
"""
argument_spec = dict(
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(aireos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
check_args(module, warnings)
commands = parse_commands(module, warnings)
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not been satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result.update({
'changed': False,
'stdout': responses,
'stdout_lines': list(to_lines(responses))
})
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
cprogrammer1994/ModernGL
|
examples/ported/hello_program.py
|
1
|
1088
|
import numpy as np
import _example
class Example(_example.Example):
title = 'Hello Program'
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.prog = self.ctx.program(
vertex_shader='''
#version 330
in vec2 in_vert;
void main() {
gl_Position = vec4(in_vert, 0.0, 1.0);
}
''',
fragment_shader='''
#version 330
out vec4 f_color;
void main() {
f_color = vec4(0.2, 0.4, 0.7, 1.0);
}
''',
)
vertices = np.array([
0.0, 0.8,
-0.6, -0.8,
0.6, -0.8,
])
self.vbo = self.ctx.buffer(vertices.astype('f4').tobytes())
self.vao = self.ctx.vertex_array(self.prog, self.vbo, 'in_vert')
def render(self, time: float, frame_time: float):
self.ctx.screen.clear(color=(1.0, 1.0, 1.0))
self.vao.render()
if __name__ == '__main__':
Example.run()
|
mit
|
ronert/cf-predict
|
setup.py
|
1
|
1175
|
#!/usr/bin/env python
"""Setup script for cf-predict."""
import setuptools
from cf_predict import __project__, __version__
try:
README = open("README.rst").read()
CHANGES = open("CHANGES.rst").read()
except IOError:
DESCRIPTION = "<placeholder>"
else:
DESCRIPTION = README + '\n' + CHANGES
setuptools.setup(
name=__project__,
version=__version__,
description="Cloud Foundry Python Predictive API Boilerplate",
url='https://github.com/ronert/cf-predict',
author='Ronert Obst',
author_email='[email protected]',
packages=setuptools.find_packages(),
entry_points={'console_scripts': []},
long_description=(DESCRIPTION),
license='MIT',
classifiers=[
# TODO: update this list to match your application: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 1 - Planning',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
install_requires=open("requirements.txt").readlines(),
)
|
mit
|
trenton3983/Fluent_Python
|
09-pythonic-obj/vector2d_v2_fmt_snippet.py
|
7
|
2700
|
"""
A 2-dimensional vector class
>>> v1 = Vector2d(3, 4)
>>> x, y = v1
>>> x, y
(3.0, 4.0)
>>> v1
Vector2d(3.0, 4.0)
>>> v1_clone = eval(repr(v1))
>>> v1 == v1_clone
True
>>> print(v1)
(3.0, 4.0)
>>> octets = bytes(v1)
>>> octets
b'\\x00\\x00\\x00\\x00\\x00\\x00\\x08@\\x00\\x00\\x00\\x00\\x00\\x00\\x10@'
>>> abs(v1)
5.0
>>> bool(v1), bool(Vector2d(0, 0))
(True, False)
Test of ``.frombytes()`` class method:
>>> v1_clone = Vector2d.frombytes(bytes(v1))
>>> v1_clone
Vector2d(3.0, 4.0)
>>> v1 == v1_clone
True
Tests of ``format()`` with Cartesian coordinates:
>>> format(v1)
'(3.0, 4.0)'
>>> format(v1, '.2f')
'(3.00, 4.00)'
>>> format(v1, '.3e')
'(3.000e+00, 4.000e+00)'
Tests of the ``angle`` method::
>>> Vector2d(0, 0).angle()
0.0
>>> Vector2d(1, 0).angle()
0.0
>>> epsilon = 10**-8
>>> abs(Vector2d(0, 1).angle() - math.pi/2) < epsilon
True
>>> abs(Vector2d(1, 1).angle() - math.pi/4) < epsilon
True
Tests of ``format()`` with polar coordinates:
>>> format(Vector2d(1, 1), 'p') # doctest:+ELLIPSIS
'<1.414213..., 0.785398...>'
>>> format(Vector2d(1, 1), '.3ep')
'<1.414e+00, 7.854e-01>'
>>> format(Vector2d(1, 1), '0.5fp')
'<1.41421, 0.78540>'
"""
from array import array
import math
class Vector2d:
typecode = 'd'
def __init__(self, x, y):
self.x = float(x)
self.y = float(y)
def __iter__(self):
return (i for i in (self.x, self.y))
def __repr__(self):
class_name = type(self).__name__
return '{}({!r}, {!r})'.format(class_name, *self)
def __str__(self):
return str(tuple(self))
def __bytes__(self):
return bytes(array(Vector2d.typecode, self))
def __eq__(self, other):
return tuple(self) == tuple(other)
def __abs__(self):
return math.hypot(self.x, self.y)
def __bool__(self):
return bool(abs(self))
def angle(self):
return math.atan2(self.y, self.x)
# BEGIN VECTOR2D_V2_FORMAT
def __format__(self, fmt_spec=''):
if fmt_spec.endswith('p'): # <1>
fmt_spec = fmt_spec[:-1] # <2>
coords = (abs(self), self.angle()) # <3>
outer_fmt = '<{}, {}>' # <4>
else:
coords = self # <5>
outer_fmt = '({}, {})' # <6>
components = (format(c, fmt_spec) for c in coords) # <7>
return outer_fmt.format(*components) # <8>
# END VECTOR2D_V2_FORMAT
@classmethod
def frombytes(cls, octets):
memv = memoryview(octets).cast(cls.typecode)
return cls(*memv)
|
mit
|
endolith/numpy
|
numpy/distutils/fcompiler/__init__.py
|
6
|
39984
|
"""numpy.distutils.fcompiler
Contains FCompiler, an abstract base class that defines the interface
for the numpy.distutils Fortran compiler abstraction model.
Terminology:
To be consistent, where the term 'executable' is used, it means the single
file, like 'gcc', that is executed, and should be a string. In contrast,
'command' means the entire command line, like ['gcc', '-c', 'file.c'], and
should be a list.
But note that FCompiler.executables is actually a dictionary of commands.
"""
__all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers',
'dummy_fortran_file']
import os
import sys
import re
from numpy.compat import open_latin1
from distutils.sysconfig import get_python_lib
from distutils.fancy_getopt import FancyGetopt
from distutils.errors import DistutilsModuleError, \
DistutilsExecError, CompileError, LinkError, DistutilsPlatformError
from distutils.util import split_quoted, strtobool
from numpy.distutils.ccompiler import CCompiler, gen_lib_options
from numpy.distutils import log
from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \
make_temp_file, get_shared_lib_extension
from numpy.distutils.exec_command import find_executable
from numpy.distutils import _shell_utils
from .environment import EnvironmentConfig
__metaclass__ = type
class CompilerNotFound(Exception):
pass
def flaglist(s):
if is_string(s):
return split_quoted(s)
else:
return s
def str2bool(s):
if is_string(s):
return strtobool(s)
return bool(s)
def is_sequence_of_strings(seq):
return is_sequence(seq) and all_strings(seq)
class FCompiler(CCompiler):
"""Abstract base class to define the interface that must be implemented
by real Fortran compiler classes.
Methods that subclasses may redefine:
update_executables(), find_executables(), get_version()
get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug()
get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(),
get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(),
get_flags_arch_f90(), get_flags_debug_f90(),
get_flags_fix(), get_flags_linker_so()
DON'T call these methods (except get_version) after
constructing a compiler instance or inside any other method.
All methods, except update_executables() and find_executables(),
may call the get_version() method.
After constructing a compiler instance, always call customize(dist=None)
method that finalizes compiler construction and makes the following
attributes available:
compiler_f77
compiler_f90
compiler_fix
linker_so
archiver
ranlib
libraries
library_dirs
"""
# These are the environment variables and distutils keys used.
# Each configuration description is
# (<hook name>, <environment variable>, <key in distutils.cfg>, <convert>, <append>)
# The hook names are handled by the self._environment_hook method.
# - names starting with 'self.' call methods in this class
# - names starting with 'exe.' return the key in the executables dict
# - names like 'flags.YYY' return self.get_flag_YYY()
# convert is either None or a function to convert a string to the
# appropriate type used.
distutils_vars = EnvironmentConfig(
distutils_section='config_fc',
noopt = (None, None, 'noopt', str2bool, False),
noarch = (None, None, 'noarch', str2bool, False),
debug = (None, None, 'debug', str2bool, False),
verbose = (None, None, 'verbose', str2bool, False),
)
command_vars = EnvironmentConfig(
distutils_section='config_fc',
compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None, False),
compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None, False),
compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None, False),
version_cmd = ('exe.version_cmd', None, None, None, False),
linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None, False),
linker_exe = ('exe.linker_exe', 'LD', 'ld', None, False),
archiver = (None, 'AR', 'ar', None, False),
ranlib = (None, 'RANLIB', 'ranlib', None, False),
)
flag_vars = EnvironmentConfig(
distutils_section='config_fc',
f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist, True),
f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist, True),
free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist, True),
fix = ('flags.fix', None, None, flaglist, False),
opt = ('flags.opt', 'FOPT', 'opt', flaglist, True),
opt_f77 = ('flags.opt_f77', None, None, flaglist, False),
opt_f90 = ('flags.opt_f90', None, None, flaglist, False),
arch = ('flags.arch', 'FARCH', 'arch', flaglist, False),
arch_f77 = ('flags.arch_f77', None, None, flaglist, False),
arch_f90 = ('flags.arch_f90', None, None, flaglist, False),
debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist, True),
debug_f77 = ('flags.debug_f77', None, None, flaglist, False),
debug_f90 = ('flags.debug_f90', None, None, flaglist, False),
flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist, True),
linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist, True),
linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist, True),
ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist, True),
)
language_map = {'.f': 'f77',
'.for': 'f77',
'.F': 'f77', # XXX: needs preprocessor
'.ftn': 'f77',
'.f77': 'f77',
'.f90': 'f90',
'.F90': 'f90', # XXX: needs preprocessor
'.f95': 'f90',
}
language_order = ['f90', 'f77']
# These will be set by the subclass
compiler_type = None
compiler_aliases = ()
version_pattern = None
possible_executables = []
executables = {
'version_cmd': ["f77", "-v"],
'compiler_f77': ["f77"],
'compiler_f90': ["f90"],
'compiler_fix': ["f90", "-fixed"],
'linker_so': ["f90", "-shared"],
'linker_exe': ["f90"],
'archiver': ["ar", "-cr"],
'ranlib': None,
}
# If compiler does not support compiling Fortran 90 then it can
# suggest using another compiler. For example, gnu would suggest
# gnu95 compiler type when there are F90 sources.
suggested_f90_compiler = None
compile_switch = "-c"
object_switch = "-o " # Ending space matters! It will be stripped
# but if it is missing then object_switch
# will be prefixed to object file name by
# string concatenation.
library_switch = "-o " # Ditto!
# Switch to specify where module files are created and searched
# for USE statement. Normally it is a string and also here ending
# space matters. See above.
module_dir_switch = None
# Switch to specify where module files are searched for USE statement.
module_include_switch = '-I'
pic_flags = [] # Flags to create position-independent code
src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90', '.FOR']
obj_extension = ".o"
shared_lib_extension = get_shared_lib_extension()
static_lib_extension = ".a" # or .lib
static_lib_format = "lib%s%s" # or %s%s
shared_lib_format = "%s%s"
exe_extension = ""
_exe_cache = {}
_executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90',
'compiler_fix', 'linker_so', 'linker_exe', 'archiver',
'ranlib']
# This will be set by new_fcompiler when called in
# command/{build_ext.py, build_clib.py, config.py} files.
c_compiler = None
# extra_{f77,f90}_compile_args are set by build_ext.build_extension method
extra_f77_compile_args = []
extra_f90_compile_args = []
def __init__(self, *args, **kw):
CCompiler.__init__(self, *args, **kw)
self.distutils_vars = self.distutils_vars.clone(self._environment_hook)
self.command_vars = self.command_vars.clone(self._environment_hook)
self.flag_vars = self.flag_vars.clone(self._environment_hook)
self.executables = self.executables.copy()
for e in self._executable_keys:
if e not in self.executables:
self.executables[e] = None
# Some methods depend on .customize() being called first, so
# this keeps track of whether that's happened yet.
self._is_customised = False
def __copy__(self):
obj = self.__new__(self.__class__)
obj.__dict__.update(self.__dict__)
obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook)
obj.command_vars = obj.command_vars.clone(obj._environment_hook)
obj.flag_vars = obj.flag_vars.clone(obj._environment_hook)
obj.executables = obj.executables.copy()
return obj
def copy(self):
return self.__copy__()
# Use properties for the attributes used by CCompiler. Setting them
# as attributes from the self.executables dictionary is error-prone,
# so we get them from there each time.
def _command_property(key):
def fget(self):
assert self._is_customised
return self.executables[key]
return property(fget=fget)
version_cmd = _command_property('version_cmd')
compiler_f77 = _command_property('compiler_f77')
compiler_f90 = _command_property('compiler_f90')
compiler_fix = _command_property('compiler_fix')
linker_so = _command_property('linker_so')
linker_exe = _command_property('linker_exe')
archiver = _command_property('archiver')
ranlib = _command_property('ranlib')
# Make our terminology consistent.
def set_executable(self, key, value):
self.set_command(key, value)
def set_commands(self, **kw):
for k, v in kw.items():
self.set_command(k, v)
def set_command(self, key, value):
if not key in self._executable_keys:
raise ValueError(
"unknown executable '%s' for class %s" %
(key, self.__class__.__name__))
if is_string(value):
value = split_quoted(value)
assert value is None or is_sequence_of_strings(value[1:]), (key, value)
self.executables[key] = value
######################################################################
## Methods that subclasses may redefine. But don't call these methods!
## They are private to FCompiler class and may return unexpected
## results if used elsewhere. So, you have been warned..
def find_executables(self):
"""Go through the self.executables dictionary, and attempt to
find and assign appropriate executables.
Executable names are looked for in the environment (environment
variables, the distutils.cfg, and command line), the 0th-element of
the command list, and the self.possible_executables list.
Also, if the 0th element is "<F77>" or "<F90>", the Fortran 77
or the Fortran 90 compiler executable is used, unless overridden
by an environment setting.
Subclasses should call this if overridden.
"""
assert self._is_customised
exe_cache = self._exe_cache
def cached_find_executable(exe):
if exe in exe_cache:
return exe_cache[exe]
fc_exe = find_executable(exe)
exe_cache[exe] = exe_cache[fc_exe] = fc_exe
return fc_exe
def verify_command_form(name, value):
if value is not None and not is_sequence_of_strings(value):
raise ValueError(
"%s value %r is invalid in class %s" %
(name, value, self.__class__.__name__))
def set_exe(exe_key, f77=None, f90=None):
cmd = self.executables.get(exe_key, None)
if not cmd:
return None
# Note that we get cmd[0] here if the environment doesn't
# have anything set
exe_from_environ = getattr(self.command_vars, exe_key)
if not exe_from_environ:
possibles = [f90, f77] + self.possible_executables
else:
possibles = [exe_from_environ] + self.possible_executables
seen = set()
unique_possibles = []
for e in possibles:
if e == '<F77>':
e = f77
elif e == '<F90>':
e = f90
if not e or e in seen:
continue
seen.add(e)
unique_possibles.append(e)
for exe in unique_possibles:
fc_exe = cached_find_executable(exe)
if fc_exe:
cmd[0] = fc_exe
return fc_exe
self.set_command(exe_key, None)
return None
ctype = self.compiler_type
f90 = set_exe('compiler_f90')
if not f90:
f77 = set_exe('compiler_f77')
if f77:
log.warn('%s: no Fortran 90 compiler found' % ctype)
else:
raise CompilerNotFound('%s: f90 nor f77' % ctype)
else:
f77 = set_exe('compiler_f77', f90=f90)
if not f77:
log.warn('%s: no Fortran 77 compiler found' % ctype)
set_exe('compiler_fix', f90=f90)
set_exe('linker_so', f77=f77, f90=f90)
set_exe('linker_exe', f77=f77, f90=f90)
set_exe('version_cmd', f77=f77, f90=f90)
set_exe('archiver')
set_exe('ranlib')
def update_executables(self):
"""Called at the beginning of customisation. Subclasses should
override this if they need to set up the executables dictionary.
Note that self.find_executables() is run afterwards, so the
self.executables dictionary values can contain <F77> or <F90> as
the command, which will be replaced by the found F77 or F90
compiler.
"""
pass
def get_flags(self):
"""List of flags common to all compiler types."""
return [] + self.pic_flags
def _get_command_flags(self, key):
cmd = self.executables.get(key, None)
if cmd is None:
return []
return cmd[1:]
def get_flags_f77(self):
"""List of Fortran 77 specific flags."""
return self._get_command_flags('compiler_f77')
def get_flags_f90(self):
"""List of Fortran 90 specific flags."""
return self._get_command_flags('compiler_f90')
def get_flags_free(self):
"""List of Fortran 90 free format specific flags."""
return []
def get_flags_fix(self):
"""List of Fortran 90 fixed format specific flags."""
return self._get_command_flags('compiler_fix')
def get_flags_linker_so(self):
"""List of linker flags to build a shared library."""
return self._get_command_flags('linker_so')
def get_flags_linker_exe(self):
"""List of linker flags to build an executable."""
return self._get_command_flags('linker_exe')
def get_flags_ar(self):
"""List of archiver flags. """
return self._get_command_flags('archiver')
def get_flags_opt(self):
"""List of architecture independent compiler flags."""
return []
def get_flags_arch(self):
"""List of architecture dependent compiler flags."""
return []
def get_flags_debug(self):
"""List of compiler flags to compile with debugging information."""
return []
get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt
get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch
get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug
def get_libraries(self):
"""List of compiler libraries."""
return self.libraries[:]
def get_library_dirs(self):
"""List of compiler library directories."""
return self.library_dirs[:]
def get_version(self, force=False, ok_status=[0]):
assert self._is_customised
version = CCompiler.get_version(self, force=force, ok_status=ok_status)
if version is None:
raise CompilerNotFound()
return version
############################################################
## Public methods:
def customize(self, dist = None):
"""Customize Fortran compiler.
This method gets Fortran compiler specific information from
(i) class definition, (ii) environment, (iii) distutils config
files, and (iv) command line (later overrides earlier).
This method should be always called after constructing a
compiler instance. But not in __init__ because Distribution
instance is needed for (iii) and (iv).
"""
log.info('customize %s' % (self.__class__.__name__))
self._is_customised = True
self.distutils_vars.use_distribution(dist)
self.command_vars.use_distribution(dist)
self.flag_vars.use_distribution(dist)
self.update_executables()
# find_executables takes care of setting the compiler commands,
# version_cmd, linker_so, linker_exe, ar, and ranlib
self.find_executables()
noopt = self.distutils_vars.get('noopt', False)
noarch = self.distutils_vars.get('noarch', noopt)
debug = self.distutils_vars.get('debug', False)
f77 = self.command_vars.compiler_f77
f90 = self.command_vars.compiler_f90
f77flags = []
f90flags = []
freeflags = []
fixflags = []
if f77:
f77 = _shell_utils.NativeParser.split(f77)
f77flags = self.flag_vars.f77
if f90:
f90 = _shell_utils.NativeParser.split(f90)
f90flags = self.flag_vars.f90
freeflags = self.flag_vars.free
# XXX Assuming that free format is default for f90 compiler.
fix = self.command_vars.compiler_fix
# NOTE: this and similar examples are probably just
# excluding --coverage flag when F90 = gfortran --coverage
# instead of putting that flag somewhere more appropriate
# this and similar examples where a Fortran compiler
# environment variable has been customized by CI or a user
# should perhaps eventually be more thoroughly tested and more
# robustly handled
if fix:
fix = _shell_utils.NativeParser.split(fix)
fixflags = self.flag_vars.fix + f90flags
oflags, aflags, dflags = [], [], []
# examine get_flags_<tag>_<compiler> for extra flags
# only add them if the method is different from get_flags_<tag>
def get_flags(tag, flags):
# note that self.flag_vars.<tag> calls self.get_flags_<tag>()
flags.extend(getattr(self.flag_vars, tag))
this_get = getattr(self, 'get_flags_' + tag)
for name, c, flagvar in [('f77', f77, f77flags),
('f90', f90, f90flags),
('f90', fix, fixflags)]:
t = '%s_%s' % (tag, name)
if c and this_get is not getattr(self, 'get_flags_' + t):
flagvar.extend(getattr(self.flag_vars, t))
if not noopt:
get_flags('opt', oflags)
if not noarch:
get_flags('arch', aflags)
if debug:
get_flags('debug', dflags)
fflags = self.flag_vars.flags + dflags + oflags + aflags
if f77:
self.set_commands(compiler_f77=f77+f77flags+fflags)
if f90:
self.set_commands(compiler_f90=f90+freeflags+f90flags+fflags)
if fix:
self.set_commands(compiler_fix=fix+fixflags+fflags)
#XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS
linker_so = self.linker_so
if linker_so:
linker_so_flags = self.flag_vars.linker_so
if sys.platform.startswith('aix'):
python_lib = get_python_lib(standard_lib=1)
ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix')
python_exp = os.path.join(python_lib, 'config', 'python.exp')
linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp]
self.set_commands(linker_so=linker_so+linker_so_flags)
linker_exe = self.linker_exe
if linker_exe:
linker_exe_flags = self.flag_vars.linker_exe
self.set_commands(linker_exe=linker_exe+linker_exe_flags)
ar = self.command_vars.archiver
if ar:
arflags = self.flag_vars.ar
self.set_commands(archiver=[ar]+arflags)
self.set_library_dirs(self.get_library_dirs())
self.set_libraries(self.get_libraries())
def dump_properties(self):
"""Print out the attributes of a compiler instance."""
props = []
for key in list(self.executables.keys()) + \
['version', 'libraries', 'library_dirs',
'object_switch', 'compile_switch']:
if hasattr(self, key):
v = getattr(self, key)
props.append((key, None, '= '+repr(v)))
props.sort()
pretty_printer = FancyGetopt(props)
for l in pretty_printer.generate_help("%s instance properties:" \
% (self.__class__.__name__)):
if l[:4]==' --':
l = ' ' + l[4:]
print(l)
###################
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compile 'src' to product 'obj'."""
src_flags = {}
if is_f_file(src) and not has_f90_header(src):
flavor = ':f77'
compiler = self.compiler_f77
src_flags = get_f77flags(src)
extra_compile_args = self.extra_f77_compile_args or []
elif is_free_format(src):
flavor = ':f90'
compiler = self.compiler_f90
if compiler is None:
raise DistutilsExecError('f90 not supported by %s needed for %s'\
% (self.__class__.__name__, src))
extra_compile_args = self.extra_f90_compile_args or []
else:
flavor = ':fix'
compiler = self.compiler_fix
if compiler is None:
raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\
% (self.__class__.__name__, src))
extra_compile_args = self.extra_f90_compile_args or []
if self.object_switch[-1]==' ':
o_args = [self.object_switch.strip(), obj]
else:
o_args = [self.object_switch.strip()+obj]
assert self.compile_switch.strip()
s_args = [self.compile_switch, src]
if extra_compile_args:
log.info('extra %s options: %r' \
% (flavor[1:], ' '.join(extra_compile_args)))
extra_flags = src_flags.get(self.compiler_type, [])
if extra_flags:
log.info('using compile options from source: %r' \
% ' '.join(extra_flags))
command = compiler + cc_args + extra_flags + s_args + o_args \
+ extra_postargs + extra_compile_args
display = '%s: %s' % (os.path.basename(compiler[0]) + flavor,
src)
try:
self.spawn(command, display=display)
except DistutilsExecError as e:
msg = str(e)
raise CompileError(msg)
def module_options(self, module_dirs, module_build_dir):
options = []
if self.module_dir_switch is not None:
if self.module_dir_switch[-1]==' ':
options.extend([self.module_dir_switch.strip(), module_build_dir])
else:
options.append(self.module_dir_switch.strip()+module_build_dir)
else:
print('XXX: module_build_dir=%r option ignored' % (module_build_dir))
print('XXX: Fix module_dir_switch for ', self.__class__.__name__)
if self.module_include_switch is not None:
for d in [module_build_dir]+module_dirs:
options.append('%s%s' % (self.module_include_switch, d))
else:
print('XXX: module_dirs=%r option ignored' % (module_dirs))
print('XXX: Fix module_include_switch for ', self.__class__.__name__)
return options
def library_option(self, lib):
return "-l" + lib
def library_dir_option(self, dir):
return "-L" + dir
def link(self, target_desc, objects,
output_filename, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
objects, output_dir = self._fix_object_args(objects, output_dir)
libraries, library_dirs, runtime_library_dirs = \
self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,
libraries)
if is_string(output_dir):
output_filename = os.path.join(output_dir, output_filename)
elif output_dir is not None:
raise TypeError("'output_dir' must be a string or None")
if self._need_link(objects, output_filename):
if self.library_switch[-1]==' ':
o_args = [self.library_switch.strip(), output_filename]
else:
o_args = [self.library_switch.strip()+output_filename]
if is_string(self.objects):
ld_args = objects + [self.objects]
else:
ld_args = objects + self.objects
ld_args = ld_args + lib_opts + o_args
if debug:
ld_args[:0] = ['-g']
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
if target_desc == CCompiler.EXECUTABLE:
linker = self.linker_exe[:]
else:
linker = self.linker_so[:]
command = linker + ld_args
try:
self.spawn(command)
except DistutilsExecError as e:
msg = str(e)
raise LinkError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def _environment_hook(self, name, hook_name):
if hook_name is None:
return None
if is_string(hook_name):
if hook_name.startswith('self.'):
hook_name = hook_name[5:]
hook = getattr(self, hook_name)
return hook()
elif hook_name.startswith('exe.'):
hook_name = hook_name[4:]
var = self.executables[hook_name]
if var:
return var[0]
else:
return None
elif hook_name.startswith('flags.'):
hook_name = hook_name[6:]
hook = getattr(self, 'get_flags_' + hook_name)
return hook()
else:
return hook_name()
def can_ccompiler_link(self, ccompiler):
"""
Check if the given C compiler can link objects produced by
this compiler.
"""
return True
def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir):
"""
Convert a set of object files that are not compatible with the default
linker, to a file that is compatible.
Parameters
----------
objects : list
List of object files to include.
output_dir : str
Output directory to place generated object files.
extra_dll_dir : str
Output directory to place extra DLL files that need to be
included on Windows.
Returns
-------
converted_objects : list of str
List of converted object files.
Note that the number of output files is not necessarily
the same as inputs.
"""
raise NotImplementedError()
## class FCompiler
_default_compilers = (
# sys.platform mappings
('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95',
'intelvem', 'intelem', 'flang')),
('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')),
('linux.*', ('gnu95', 'intel', 'lahey', 'pg', 'absoft', 'nag', 'vast', 'compaq',
'intele', 'intelem', 'gnu', 'g95', 'pathf95', 'nagfor')),
('darwin.*', ('gnu95', 'nag', 'absoft', 'ibm', 'intel', 'gnu', 'g95', 'pg')),
('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')),
('irix.*', ('mips', 'gnu', 'gnu95',)),
('aix.*', ('ibm', 'gnu', 'gnu95',)),
# os.name mappings
('posix', ('gnu', 'gnu95',)),
('nt', ('gnu', 'gnu95',)),
('mac', ('gnu95', 'gnu', 'pg')),
)
fcompiler_class = None
fcompiler_aliases = None
def load_all_fcompiler_classes():
"""Cache all the FCompiler classes found in modules in the
numpy.distutils.fcompiler package.
"""
from glob import glob
global fcompiler_class, fcompiler_aliases
if fcompiler_class is not None:
return
pys = os.path.join(os.path.dirname(__file__), '*.py')
fcompiler_class = {}
fcompiler_aliases = {}
for fname in glob(pys):
module_name, ext = os.path.splitext(os.path.basename(fname))
module_name = 'numpy.distutils.fcompiler.' + module_name
__import__ (module_name)
module = sys.modules[module_name]
if hasattr(module, 'compilers'):
for cname in module.compilers:
klass = getattr(module, cname)
desc = (klass.compiler_type, klass, klass.description)
fcompiler_class[klass.compiler_type] = desc
for alias in klass.compiler_aliases:
if alias in fcompiler_aliases:
raise ValueError("alias %r defined for both %s and %s"
% (alias, klass.__name__,
fcompiler_aliases[alias][1].__name__))
fcompiler_aliases[alias] = desc
def _find_existing_fcompiler(compiler_types,
osname=None, platform=None,
requiref90=False,
c_compiler=None):
from numpy.distutils.core import get_distribution
dist = get_distribution(always=True)
for compiler_type in compiler_types:
v = None
try:
c = new_fcompiler(plat=platform, compiler=compiler_type,
c_compiler=c_compiler)
c.customize(dist)
v = c.get_version()
if requiref90 and c.compiler_f90 is None:
v = None
new_compiler = c.suggested_f90_compiler
if new_compiler:
log.warn('Trying %r compiler as suggested by %r '
'compiler for f90 support.' % (compiler_type,
new_compiler))
c = new_fcompiler(plat=platform, compiler=new_compiler,
c_compiler=c_compiler)
c.customize(dist)
v = c.get_version()
if v is not None:
compiler_type = new_compiler
if requiref90 and c.compiler_f90 is None:
raise ValueError('%s does not support compiling f90 codes, '
'skipping.' % (c.__class__.__name__))
except DistutilsModuleError:
log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type)
except CompilerNotFound:
log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type)
if v is not None:
return compiler_type
return None
def available_fcompilers_for_platform(osname=None, platform=None):
if osname is None:
osname = os.name
if platform is None:
platform = sys.platform
matching_compiler_types = []
for pattern, compiler_type in _default_compilers:
if re.match(pattern, platform) or re.match(pattern, osname):
for ct in compiler_type:
if ct not in matching_compiler_types:
matching_compiler_types.append(ct)
if not matching_compiler_types:
matching_compiler_types.append('gnu')
return matching_compiler_types
def get_default_fcompiler(osname=None, platform=None, requiref90=False,
c_compiler=None):
"""Determine the default Fortran compiler to use for the given
platform."""
matching_compiler_types = available_fcompilers_for_platform(osname,
platform)
log.info("get_default_fcompiler: matching types: '%s'",
matching_compiler_types)
compiler_type = _find_existing_fcompiler(matching_compiler_types,
osname=osname,
platform=platform,
requiref90=requiref90,
c_compiler=c_compiler)
return compiler_type
# Flag to avoid rechecking for Fortran compiler every time
failed_fcompilers = set()
def new_fcompiler(plat=None,
compiler=None,
verbose=0,
dry_run=0,
force=0,
requiref90=False,
c_compiler = None):
"""Generate an instance of some FCompiler subclass for the supplied
platform/compiler combination.
"""
global failed_fcompilers
fcompiler_key = (plat, compiler)
if fcompiler_key in failed_fcompilers:
return None
load_all_fcompiler_classes()
if plat is None:
plat = os.name
if compiler is None:
compiler = get_default_fcompiler(plat, requiref90=requiref90,
c_compiler=c_compiler)
if compiler in fcompiler_class:
module_name, klass, long_description = fcompiler_class[compiler]
elif compiler in fcompiler_aliases:
module_name, klass, long_description = fcompiler_aliases[compiler]
else:
msg = "don't know how to compile Fortran code on platform '%s'" % plat
if compiler is not None:
msg = msg + " with '%s' compiler." % compiler
msg = msg + " Supported compilers are: %s)" \
% (','.join(fcompiler_class.keys()))
log.warn(msg)
failed_fcompilers.add(fcompiler_key)
return None
compiler = klass(verbose=verbose, dry_run=dry_run, force=force)
compiler.c_compiler = c_compiler
return compiler
def show_fcompilers(dist=None):
"""Print list of available compilers (used by the "--help-fcompiler"
option to "config_fc").
"""
if dist is None:
from distutils.dist import Distribution
from numpy.distutils.command.config_compiler import config_fc
dist = Distribution()
dist.script_name = os.path.basename(sys.argv[0])
dist.script_args = ['config_fc'] + sys.argv[1:]
try:
dist.script_args.remove('--help-fcompiler')
except ValueError:
pass
dist.cmdclass['config_fc'] = config_fc
dist.parse_config_files()
dist.parse_command_line()
compilers = []
compilers_na = []
compilers_ni = []
if not fcompiler_class:
load_all_fcompiler_classes()
platform_compilers = available_fcompilers_for_platform()
for compiler in platform_compilers:
v = None
log.set_verbosity(-2)
try:
c = new_fcompiler(compiler=compiler, verbose=dist.verbose)
c.customize(dist)
v = c.get_version()
except (DistutilsModuleError, CompilerNotFound) as e:
log.debug("show_fcompilers: %s not found" % (compiler,))
log.debug(repr(e))
if v is None:
compilers_na.append(("fcompiler="+compiler, None,
fcompiler_class[compiler][2]))
else:
c.dump_properties()
compilers.append(("fcompiler="+compiler, None,
fcompiler_class[compiler][2] + ' (%s)' % v))
compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers))
compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2])
for fc in compilers_ni]
compilers.sort()
compilers_na.sort()
compilers_ni.sort()
pretty_printer = FancyGetopt(compilers)
pretty_printer.print_help("Fortran compilers found:")
pretty_printer = FancyGetopt(compilers_na)
pretty_printer.print_help("Compilers available for this "
"platform, but not found:")
if compilers_ni:
pretty_printer = FancyGetopt(compilers_ni)
pretty_printer.print_help("Compilers not available on this platform:")
print("For compiler details, run 'config_fc --verbose' setup command.")
def dummy_fortran_file():
fo, name = make_temp_file(suffix='.f')
fo.write(" subroutine dummy()\n end\n")
fo.close()
return name[:-2]
is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match
_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search
_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search
_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search
_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]', re.I).match
def is_free_format(file):
"""Check if file is in free format Fortran."""
# f90 allows both fixed and free format, assuming fixed unless
# signs of free format are detected.
result = 0
f = open_latin1(file, 'r')
line = f.readline()
n = 10000 # the number of non-comment lines to scan for hints
if _has_f_header(line):
n = 0
elif _has_f90_header(line):
n = 0
result = 1
while n>0 and line:
line = line.rstrip()
if line and line[0]!='!':
n -= 1
if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&':
result = 1
break
line = f.readline()
f.close()
return result
def has_f90_header(src):
f = open_latin1(src, 'r')
line = f.readline()
f.close()
return _has_f90_header(line) or _has_fix_header(line)
_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P<fcname>\w+)\s*\)\s*=\s*(?P<fflags>.*)', re.I)
def get_f77flags(src):
"""
Search the first 20 lines of fortran 77 code for line pattern
`CF77FLAGS(<fcompiler type>)=<f77 flags>`
Return a dictionary {<fcompiler type>:<f77 flags>}.
"""
flags = {}
f = open_latin1(src, 'r')
i = 0
for line in f:
i += 1
if i>20: break
m = _f77flags_re.match(line)
if not m: continue
fcname = m.group('fcname').strip()
fflags = m.group('fflags').strip()
flags[fcname] = split_quoted(fflags)
f.close()
return flags
# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags
if __name__ == '__main__':
show_fcompilers()
|
bsd-3-clause
|
deepmind/open_spiel
|
open_spiel/python/algorithms/double_oracle_test.py
|
1
|
2223
|
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for open_spiel.python.algorithms.double_oracle."""
from absl.testing import absltest
import numpy as np
from open_spiel.python.algorithms import double_oracle
import pyspiel
class DoubleOracleTest(absltest.TestCase):
def test_rock_paper_scissors(self):
game = pyspiel.load_matrix_game("matrix_rps")
solver = double_oracle.DoubleOracleSolver(game)
solution, iteration, value = solver.solve(initial_strategies=[[0], [0]])
np.testing.assert_allclose(solution[0], np.ones(3)/3.)
np.testing.assert_allclose(solution[1], np.ones(3)/3.)
self.assertEqual(iteration, 3)
self.assertAlmostEqual(value, 0.0)
def test_single_step(self):
game = pyspiel.load_matrix_game("matrix_rps")
solver = double_oracle.DoubleOracleSolver(game)
solver.subgame_strategies = [[0], [0]]
best_response, best_response_utility = solver.step()
self.assertListEqual(best_response, [1, 1])
self.assertListEqual(best_response_utility, [1.0, 1.0])
def test_kuhn_poker(self):
game = pyspiel.extensive_to_matrix_game(pyspiel.load_game("kuhn_poker"))
solver = double_oracle.DoubleOracleSolver(game)
solution, iteration, value = solver.solve(initial_strategies=[[0], [0]])
# check if solution is Nash
exp_utilty = solution[0] @ solver.payoffs @ solution[1]
self.assertAlmostEqual(max(solver.payoffs[0] @ solution[1]), exp_utilty[0])
self.assertAlmostEqual(max(solution[0] @ solver.payoffs[1]), exp_utilty[1])
self.assertEqual(iteration, 8)
self.assertAlmostEqual(value, 0.0)
if __name__ == "__main__":
absltest.main()
|
apache-2.0
|
sankhesh/VTK
|
ThirdParty/Twisted/twisted/trial/runner.py
|
28
|
26163
|
# -*- test-case-name: twisted.trial.test.test_runner -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A miscellany of code used to run Trial tests.
Maintainer: Jonathan Lange
"""
__all__ = [
'TestSuite',
'DestructiveTestSuite', 'DryRunVisitor', 'ErrorHolder', 'LoggedSuite',
'TestHolder', 'TestLoader', 'TrialRunner', 'TrialSuite',
'filenameToModule', 'isPackage', 'isPackageDirectory', 'isTestCase',
'name', 'samefile', 'NOT_IN_TEST',
]
import os, types, warnings, sys, inspect, imp
import doctest, time
from twisted.python import reflect, log, failure, modules, filepath
from twisted.python.deprecate import deprecatedModuleAttribute
from twisted.python.versions import Version
from twisted.internet import defer
from twisted.trial import util, unittest
from twisted.trial.itrial import ITestCase
from twisted.trial.reporter import _ExitWrapper, UncleanWarningsReporterWrapper
# These are imported so that they remain in the public API for t.trial.runner
from twisted.trial.unittest import TestSuite
from zope.interface import implements
pyunit = __import__('unittest')
def isPackage(module):
"""Given an object return True if the object looks like a package"""
if not isinstance(module, types.ModuleType):
return False
basename = os.path.splitext(os.path.basename(module.__file__))[0]
return basename == '__init__'
def isPackageDirectory(dirname):
"""Is the directory at path 'dirname' a Python package directory?
Returns the name of the __init__ file (it may have a weird extension)
if dirname is a package directory. Otherwise, returns False"""
for ext in zip(*imp.get_suffixes())[0]:
initFile = '__init__' + ext
if os.path.exists(os.path.join(dirname, initFile)):
return initFile
return False
def samefile(filename1, filename2):
"""
A hacky implementation of C{os.path.samefile}. Used by L{filenameToModule}
when the platform doesn't provide C{os.path.samefile}. Do not use this.
"""
return os.path.abspath(filename1) == os.path.abspath(filename2)
def filenameToModule(fn):
"""
Given a filename, do whatever possible to return a module object matching
that file.
If the file in question is a module in Python path, properly import and
return that module. Otherwise, load the source manually.
@param fn: A filename.
@return: A module object.
@raise ValueError: If C{fn} does not exist.
"""
if not os.path.exists(fn):
raise ValueError("%r doesn't exist" % (fn,))
try:
ret = reflect.namedAny(reflect.filenameToModuleName(fn))
except (ValueError, AttributeError):
# Couldn't find module. The file 'fn' is not in PYTHONPATH
return _importFromFile(fn)
# ensure that the loaded module matches the file
retFile = os.path.splitext(ret.__file__)[0] + '.py'
# not all platforms (e.g. win32) have os.path.samefile
same = getattr(os.path, 'samefile', samefile)
if os.path.isfile(fn) and not same(fn, retFile):
del sys.modules[ret.__name__]
ret = _importFromFile(fn)
return ret
def _importFromFile(fn, moduleName=None):
fn = _resolveDirectory(fn)
if not moduleName:
moduleName = os.path.splitext(os.path.split(fn)[-1])[0]
if moduleName in sys.modules:
return sys.modules[moduleName]
fd = open(fn, 'r')
try:
module = imp.load_source(moduleName, fn, fd)
finally:
fd.close()
return module
def _resolveDirectory(fn):
if os.path.isdir(fn):
initFile = isPackageDirectory(fn)
if initFile:
fn = os.path.join(fn, initFile)
else:
raise ValueError('%r is not a package directory' % (fn,))
return fn
def _getMethodNameInClass(method):
"""
Find the attribute name on the method's class which refers to the method.
For some methods, notably decorators which have not had __name__ set correctly:
getattr(method.im_class, method.__name__) != method
"""
if getattr(method.im_class, method.__name__, object()) != method:
for alias in dir(method.im_class):
if getattr(method.im_class, alias, object()) == method:
return alias
return method.__name__
class DestructiveTestSuite(TestSuite):
"""
A test suite which remove the tests once run, to minimize memory usage.
"""
def run(self, result):
"""
Almost the same as L{TestSuite.run}, but with C{self._tests} being
empty at the end.
"""
while self._tests:
if result.shouldStop:
break
test = self._tests.pop(0)
test(result)
return result
# When an error occurs outside of any test, the user will see this string
# in place of a test's name.
NOT_IN_TEST = "<not in test>"
class LoggedSuite(TestSuite):
"""
Any errors logged in this suite will be reported to the L{TestResult}
object.
"""
def run(self, result):
"""
Run the suite, storing all errors in C{result}. If an error is logged
while no tests are running, then it will be added as an error to
C{result}.
@param result: A L{TestResult} object.
"""
observer = unittest._logObserver
observer._add()
super(LoggedSuite, self).run(result)
observer._remove()
for error in observer.getErrors():
result.addError(TestHolder(NOT_IN_TEST), error)
observer.flushErrors()
class TrialSuite(TestSuite):
"""
Suite to wrap around every single test in a C{trial} run. Used internally
by Trial to set up things necessary for Trial tests to work, regardless of
what context they are run in.
"""
def __init__(self, tests=(), forceGarbageCollection=False):
if forceGarbageCollection:
newTests = []
for test in tests:
test = unittest.decorate(
test, unittest._ForceGarbageCollectionDecorator)
newTests.append(test)
tests = newTests
suite = LoggedSuite(tests)
super(TrialSuite, self).__init__([suite])
def _bail(self):
from twisted.internet import reactor
d = defer.Deferred()
reactor.addSystemEventTrigger('after', 'shutdown',
lambda: d.callback(None))
reactor.fireSystemEvent('shutdown') # radix's suggestion
# As long as TestCase does crap stuff with the reactor we need to
# manually shutdown the reactor here, and that requires util.wait
# :(
# so that the shutdown event completes
unittest.TestCase('mktemp')._wait(d)
def run(self, result):
try:
TestSuite.run(self, result)
finally:
self._bail()
def name(thing):
"""
@param thing: an object from modules (instance of PythonModule,
PythonAttribute), a TestCase subclass, or an instance of a TestCase.
"""
if isTestCase(thing):
# TestCase subclass
theName = reflect.qual(thing)
else:
# thing from trial, or thing from modules.
# this monstrosity exists so that modules' objects do not have to
# implement id(). -jml
try:
theName = thing.id()
except AttributeError:
theName = thing.name
return theName
def isTestCase(obj):
"""
@return: C{True} if C{obj} is a class that contains test cases, C{False}
otherwise. Used to find all the tests in a module.
"""
try:
return issubclass(obj, pyunit.TestCase)
except TypeError:
return False
class TestHolder(object):
"""
Placeholder for a L{TestCase} inside a reporter. As far as a L{TestResult}
is concerned, this looks exactly like a unit test.
"""
implements(ITestCase)
failureException = None
def __init__(self, description):
"""
@param description: A string to be displayed L{TestResult}.
"""
self.description = description
def __call__(self, result):
return self.run(result)
def id(self):
return self.description
def countTestCases(self):
return 0
def run(self, result):
"""
This test is just a placeholder. Run the test successfully.
@param result: The C{TestResult} to store the results in.
@type result: L{twisted.trial.itrial.IReporter}.
"""
result.startTest(self)
result.addSuccess(self)
result.stopTest(self)
def shortDescription(self):
return self.description
class ErrorHolder(TestHolder):
"""
Used to insert arbitrary errors into a test suite run. Provides enough
methods to look like a C{TestCase}, however, when it is run, it simply adds
an error to the C{TestResult}. The most common use-case is for when a
module fails to import.
"""
def __init__(self, description, error):
"""
@param description: A string used by C{TestResult}s to identify this
error. Generally, this is the name of a module that failed to import.
@param error: The error to be added to the result. Can be an `exc_info`
tuple or a L{twisted.python.failure.Failure}.
"""
super(ErrorHolder, self).__init__(description)
self.error = util.excInfoOrFailureToExcInfo(error)
def __repr__(self):
return "<ErrorHolder description=%r error=%r>" % (
self.description, self.error[1])
def run(self, result):
"""
Run the test, reporting the error.
@param result: The C{TestResult} to store the results in.
@type result: L{twisted.trial.itrial.IReporter}.
"""
result.startTest(self)
result.addError(self, self.error)
result.stopTest(self)
class TestLoader(object):
"""
I find tests inside function, modules, files -- whatever -- then return
them wrapped inside a Test (either a L{TestSuite} or a L{TestCase}).
@ivar methodPrefix: A string prefix. C{TestLoader} will assume that all the
methods in a class that begin with C{methodPrefix} are test cases.
@ivar modulePrefix: A string prefix. Every module in a package that begins
with C{modulePrefix} is considered a module full of tests.
@ivar forceGarbageCollection: A flag applied to each C{TestCase} loaded.
See L{unittest.TestCase} for more information.
@ivar sorter: A key function used to sort C{TestCase}s, test classes,
modules and packages.
@ivar suiteFactory: A callable which is passed a list of tests (which
themselves may be suites of tests). Must return a test suite.
"""
methodPrefix = 'test'
modulePrefix = 'test_'
def __init__(self):
self.suiteFactory = TestSuite
self.sorter = name
self._importErrors = []
def sort(self, xs):
"""
Sort the given things using L{sorter}.
@param xs: A list of test cases, class or modules.
"""
return sorted(xs, key=self.sorter)
def findTestClasses(self, module):
"""Given a module, return all Trial test classes"""
classes = []
for name, val in inspect.getmembers(module):
if isTestCase(val):
classes.append(val)
return self.sort(classes)
def findByName(self, name):
"""
Return a Python object given a string describing it.
@param name: a string which may be either a filename or a
fully-qualified Python name.
@return: If C{name} is a filename, return the module. If C{name} is a
fully-qualified Python name, return the object it refers to.
"""
if os.path.exists(name):
return filenameToModule(name)
return reflect.namedAny(name)
def loadModule(self, module):
"""
Return a test suite with all the tests from a module.
Included are TestCase subclasses and doctests listed in the module's
__doctests__ module. If that's not good for you, put a function named
either C{testSuite} or C{test_suite} in your module that returns a
TestSuite, and I'll use the results of that instead.
If C{testSuite} and C{test_suite} are both present, then I'll use
C{testSuite}.
"""
## XXX - should I add an optional parameter to disable the check for
## a custom suite.
## OR, should I add another method
if not isinstance(module, types.ModuleType):
raise TypeError("%r is not a module" % (module,))
if hasattr(module, 'testSuite'):
return module.testSuite()
elif hasattr(module, 'test_suite'):
return module.test_suite()
suite = self.suiteFactory()
for testClass in self.findTestClasses(module):
suite.addTest(self.loadClass(testClass))
if not hasattr(module, '__doctests__'):
return suite
docSuite = self.suiteFactory()
for doctest in module.__doctests__:
docSuite.addTest(self.loadDoctests(doctest))
return self.suiteFactory([suite, docSuite])
loadTestsFromModule = loadModule
def loadClass(self, klass):
"""
Given a class which contains test cases, return a sorted list of
C{TestCase} instances.
"""
if not (isinstance(klass, type) or isinstance(klass, types.ClassType)):
raise TypeError("%r is not a class" % (klass,))
if not isTestCase(klass):
raise ValueError("%r is not a test case" % (klass,))
names = self.getTestCaseNames(klass)
tests = self.sort([self._makeCase(klass, self.methodPrefix+name)
for name in names])
return self.suiteFactory(tests)
loadTestsFromTestCase = loadClass
def getTestCaseNames(self, klass):
"""
Given a class that contains C{TestCase}s, return a list of names of
methods that probably contain tests.
"""
return reflect.prefixedMethodNames(klass, self.methodPrefix)
def loadMethod(self, method):
"""
Given a method of a C{TestCase} that represents a test, return a
C{TestCase} instance for that test.
"""
if not isinstance(method, types.MethodType):
raise TypeError("%r not a method" % (method,))
return self._makeCase(method.im_class, _getMethodNameInClass(method))
def _makeCase(self, klass, methodName):
return klass(methodName)
def loadPackage(self, package, recurse=False):
"""
Load tests from a module object representing a package, and return a
TestSuite containing those tests.
Tests are only loaded from modules whose name begins with 'test_'
(or whatever C{modulePrefix} is set to).
@param package: a types.ModuleType object (or reasonable facsimilie
obtained by importing) which may contain tests.
@param recurse: A boolean. If True, inspect modules within packages
within the given package (and so on), otherwise, only inspect modules
in the package itself.
@raise: TypeError if 'package' is not a package.
@return: a TestSuite created with my suiteFactory, containing all the
tests.
"""
if not isPackage(package):
raise TypeError("%r is not a package" % (package,))
pkgobj = modules.getModule(package.__name__)
if recurse:
discovery = pkgobj.walkModules()
else:
discovery = pkgobj.iterModules()
discovered = []
for disco in discovery:
if disco.name.split(".")[-1].startswith(self.modulePrefix):
discovered.append(disco)
suite = self.suiteFactory()
for modinfo in self.sort(discovered):
try:
module = modinfo.load()
except:
thingToAdd = ErrorHolder(modinfo.name, failure.Failure())
else:
thingToAdd = self.loadModule(module)
suite.addTest(thingToAdd)
return suite
def loadDoctests(self, module):
"""
Return a suite of tests for all the doctests defined in C{module}.
@param module: A module object or a module name.
"""
if isinstance(module, str):
try:
module = reflect.namedAny(module)
except:
return ErrorHolder(module, failure.Failure())
if not inspect.ismodule(module):
warnings.warn("trial only supports doctesting modules")
return
extraArgs = {}
if sys.version_info > (2, 4):
# Work around Python issue2604: DocTestCase.tearDown clobbers globs
def saveGlobals(test):
"""
Save C{test.globs} and replace it with a copy so that if
necessary, the original will be available for the next test
run.
"""
test._savedGlobals = getattr(test, '_savedGlobals', test.globs)
test.globs = test._savedGlobals.copy()
extraArgs['setUp'] = saveGlobals
return doctest.DocTestSuite(module, **extraArgs)
def loadAnything(self, thing, recurse=False):
"""
Given a Python object, return whatever tests that are in it. Whatever
'in' might mean.
@param thing: A Python object. A module, method, class or package.
@param recurse: Whether or not to look in subpackages of packages.
Defaults to False.
@return: A C{TestCase} or C{TestSuite}.
"""
if isinstance(thing, types.ModuleType):
if isPackage(thing):
return self.loadPackage(thing, recurse)
return self.loadModule(thing)
elif isinstance(thing, types.ClassType):
return self.loadClass(thing)
elif isinstance(thing, type):
return self.loadClass(thing)
elif isinstance(thing, types.MethodType):
return self.loadMethod(thing)
raise TypeError("No loader for %r. Unrecognized type" % (thing,))
def loadByName(self, name, recurse=False):
"""
Given a string representing a Python object, return whatever tests
are in that object.
If C{name} is somehow inaccessible (e.g. the module can't be imported,
there is no Python object with that name etc) then return an
L{ErrorHolder}.
@param name: The fully-qualified name of a Python object.
"""
try:
thing = self.findByName(name)
except:
return ErrorHolder(name, failure.Failure())
return self.loadAnything(thing, recurse)
loadTestsFromName = loadByName
def loadByNames(self, names, recurse=False):
"""
Construct a TestSuite containing all the tests found in 'names', where
names is a list of fully qualified python names and/or filenames. The
suite returned will have no duplicate tests, even if the same object
is named twice.
"""
things = []
errors = []
for name in names:
try:
things.append(self.findByName(name))
except:
errors.append(ErrorHolder(name, failure.Failure()))
suites = [self.loadAnything(thing, recurse)
for thing in self._uniqueTests(things)]
suites.extend(errors)
return self.suiteFactory(suites)
def _uniqueTests(self, things):
"""
Gather unique suite objects from loaded things. This will guarantee
uniqueness of inherited methods on TestCases which would otherwise hash
to same value and collapse to one test unexpectedly if using simpler
means: e.g. set().
"""
seen = set()
for thing in things:
if isinstance(thing, types.MethodType):
thing = (thing, thing.im_class)
else:
thing = (thing,)
if thing not in seen:
yield thing[0]
seen.add(thing)
class DryRunVisitor(object):
"""
A visitor that makes a reporter think that every test visited has run
successfully.
"""
deprecatedModuleAttribute(
Version("Twisted", 13, 0, 0),
"Trial no longer has support for visitors",
"twisted.trial.runner", "DryRunVisitor")
def __init__(self, reporter):
"""
@param reporter: A C{TestResult} object.
"""
self.reporter = reporter
def markSuccessful(self, testCase):
"""
Convince the reporter that this test has been run successfully.
"""
self.reporter.startTest(testCase)
self.reporter.addSuccess(testCase)
self.reporter.stopTest(testCase)
class TrialRunner(object):
"""
A specialised runner that the trial front end uses.
"""
DEBUG = 'debug'
DRY_RUN = 'dry-run'
def _setUpTestdir(self):
self._tearDownLogFile()
currentDir = os.getcwd()
base = filepath.FilePath(self.workingDirectory)
testdir, self._testDirLock = util._unusedTestDirectory(base)
os.chdir(testdir.path)
return currentDir
def _tearDownTestdir(self, oldDir):
os.chdir(oldDir)
self._testDirLock.unlock()
_log = log
def _makeResult(self):
reporter = self.reporterFactory(self.stream, self.tbformat,
self.rterrors, self._log)
if self._exitFirst:
reporter = _ExitWrapper(reporter)
if self.uncleanWarnings:
reporter = UncleanWarningsReporterWrapper(reporter)
return reporter
def __init__(self, reporterFactory,
mode=None,
logfile='test.log',
stream=sys.stdout,
profile=False,
tracebackFormat='default',
realTimeErrors=False,
uncleanWarnings=False,
workingDirectory=None,
forceGarbageCollection=False,
debugger=None,
exitFirst=False):
self.reporterFactory = reporterFactory
self.logfile = logfile
self.mode = mode
self.stream = stream
self.tbformat = tracebackFormat
self.rterrors = realTimeErrors
self.uncleanWarnings = uncleanWarnings
self._result = None
self.workingDirectory = workingDirectory or '_trial_temp'
self._logFileObserver = None
self._logFileObject = None
self._forceGarbageCollection = forceGarbageCollection
self.debugger = debugger
self._exitFirst = exitFirst
if profile:
self.run = util.profiled(self.run, 'profile.data')
def _tearDownLogFile(self):
if self._logFileObserver is not None:
log.removeObserver(self._logFileObserver.emit)
self._logFileObserver = None
if self._logFileObject is not None:
self._logFileObject.close()
self._logFileObject = None
def _setUpLogFile(self):
self._tearDownLogFile()
if self.logfile == '-':
logFile = sys.stdout
else:
logFile = file(self.logfile, 'a')
self._logFileObject = logFile
self._logFileObserver = log.FileLogObserver(logFile)
log.startLoggingWithObserver(self._logFileObserver.emit, 0)
def run(self, test):
"""
Run the test or suite and return a result object.
"""
test = unittest.decorate(test, ITestCase)
return self._runWithoutDecoration(test, self._forceGarbageCollection)
def _runWithoutDecoration(self, test, forceGarbageCollection=False):
"""
Private helper that runs the given test but doesn't decorate it.
"""
result = self._makeResult()
# decorate the suite with reactor cleanup and log starting
# This should move out of the runner and be presumed to be
# present
suite = TrialSuite([test], forceGarbageCollection)
startTime = time.time()
if self.mode == self.DRY_RUN:
for single in unittest._iterateTests(suite):
result.startTest(single)
result.addSuccess(single)
result.stopTest(single)
else:
if self.mode == self.DEBUG:
run = lambda: self.debugger.runcall(suite.run, result)
else:
run = lambda: suite.run(result)
oldDir = self._setUpTestdir()
try:
self._setUpLogFile()
run()
finally:
self._tearDownLogFile()
self._tearDownTestdir(oldDir)
endTime = time.time()
done = getattr(result, 'done', None)
if done is None:
warnings.warn(
"%s should implement done() but doesn't. Falling back to "
"printErrors() and friends." % reflect.qual(result.__class__),
category=DeprecationWarning, stacklevel=3)
result.printErrors()
result.writeln(result.separator)
result.writeln('Ran %d tests in %.3fs', result.testsRun,
endTime - startTime)
result.write('\n')
result.printSummary()
else:
result.done()
return result
def runUntilFailure(self, test):
"""
Repeatedly run C{test} until it fails.
"""
count = 0
while True:
count += 1
self.stream.write("Test Pass %d\n" % (count,))
if count == 1:
result = self.run(test)
else:
result = self._runWithoutDecoration(test)
if result.testsRun == 0:
break
if not result.wasSuccessful():
break
return result
|
bsd-3-clause
|
fabrice-lecuyer/QuantLib-SWIG
|
Python/examples/bermudan-swaption.py
|
2
|
7897
|
# Copyright (C) 2004, 2005, 2006, 2007 StatPro Italia srl
#
# This file is part of QuantLib, a free-software/open-source library
# for financial quantitative analysts and developers - http://quantlib.org/
#
# QuantLib is free software: you can redistribute it and/or modify it under the
# terms of the QuantLib license. You should have received a copy of the
# license along with this program; if not, please email
# <[email protected]>. The license is also available online at
# <http://quantlib.org/license.shtml>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the license for more details.
from QuantLib import *
swaptionVols = [ # maturity, length, volatility
(Period(1, Years), Period(5, Years), 0.1148),
(Period(2, Years), Period(4, Years), 0.1108),
(Period(3, Years), Period(3, Years), 0.1070),
(Period(4, Years), Period(2, Years), 0.1021),
(Period(5, Years), Period(1, Years), 0.1000) ]
def formatVol(v, digits = 2):
format = '%%.%df %%%%' % digits
return format % (v * 100)
def formatPrice(p, digits = 2):
format = '%%.%df' % digits
return format % p
def calibrate(model, helpers, l, name):
format = '%12s |%12s |%12s |%12s |%12s'
header = format % ('maturity','length','volatility','implied','error')
rule = '-' * len(header)
dblrule = '=' * len(header)
print('')
print(dblrule)
print(name)
print(rule)
method = Simplex(l);
model.calibrate(helpers, method, EndCriteria(1000, 250, 1e-7, 1e-7, 1e-7))
print('Parameters: %s' % model.params())
print(rule)
print(header)
print(rule)
totalError = 0.0
for swaption, helper in zip(swaptionVols, helpers):
maturity, length, vol = swaption
NPV = helper.modelValue()
implied = helper.impliedVolatility(NPV, 1.0e-4, 1000, 0.05, 0.50)
error = implied - vol
totalError += abs(error)
print(format % (maturity, length,
formatVol(vol,4), formatVol(implied,4),
formatVol(error,4)))
averageError = totalError/len(helpers)
print(rule)
format = '%%%ds' % len(header)
print(format % ('Average error: ' + formatVol(averageError,4)))
print(dblrule)
todaysDate = Date(15,February,2002)
Settings.instance().evaluationDate = todaysDate
calendar = TARGET()
settlementDate = Date(19,February,2002);
# flat yield term structure impling 1x5 swap at 5%
rate = QuoteHandle(SimpleQuote(0.04875825))
termStructure = YieldTermStructureHandle(
FlatForward(settlementDate,rate,Actual365Fixed()))
# define the ATM/OTM/ITM swaps
swapEngine = DiscountingSwapEngine(termStructure)
fixedLegFrequency = Annual
fixedLegTenor = Period(1,Years)
fixedLegConvention = Unadjusted
floatingLegConvention = ModifiedFollowing
fixedLegDayCounter = Thirty360(Thirty360.European);
floatingLegFrequency = Semiannual
floatingLegTenor = Period(6,Months)
payFixed = VanillaSwap.Payer
fixingDays = 2
index = Euribor6M(termStructure)
floatingLegDayCounter = index.dayCounter()
swapStart = calendar.advance(settlementDate,1,Years,floatingLegConvention)
swapEnd = calendar.advance(swapStart,5,Years,floatingLegConvention)
fixedSchedule = Schedule(swapStart, swapEnd,
fixedLegTenor, calendar,
fixedLegConvention, fixedLegConvention,
DateGeneration.Forward, False)
floatingSchedule = Schedule(swapStart, swapEnd,
floatingLegTenor, calendar,
floatingLegConvention, floatingLegConvention,
DateGeneration.Forward, False)
dummy = VanillaSwap(payFixed, 100.0,
fixedSchedule, 0.0, fixedLegDayCounter,
floatingSchedule, index, 0.0,
floatingLegDayCounter)
dummy.setPricingEngine(swapEngine)
atmRate = dummy.fairRate()
atmSwap = VanillaSwap(payFixed, 1000.0,
fixedSchedule, atmRate, fixedLegDayCounter,
floatingSchedule, index, 0.0,
floatingLegDayCounter)
otmSwap = VanillaSwap(payFixed, 1000.0,
fixedSchedule, atmRate*1.2, fixedLegDayCounter,
floatingSchedule, index, 0.0,
floatingLegDayCounter)
itmSwap = VanillaSwap(payFixed, 1000.0,
fixedSchedule, atmRate*0.8, fixedLegDayCounter,
floatingSchedule, index, 0.0,
floatingLegDayCounter)
atmSwap.setPricingEngine(swapEngine)
otmSwap.setPricingEngine(swapEngine)
itmSwap.setPricingEngine(swapEngine)
helpers = [ SwaptionHelper(maturity, length,
QuoteHandle(SimpleQuote(vol)),
index, index.tenor(), index.dayCounter(),
index.dayCounter(), termStructure)
for maturity, length, vol in swaptionVols ]
times = {}
for h in helpers:
for t in h.times():
times[t] = 1
times = sorted(times.keys())
grid = TimeGrid(times, 30)
G2model = G2(termStructure)
HWmodel = HullWhite(termStructure)
HWmodel2 = HullWhite(termStructure)
BKmodel = BlackKarasinski(termStructure)
print("Calibrating...")
for h in helpers:
h.setPricingEngine(G2SwaptionEngine(G2model,6.0,16))
calibrate(G2model, helpers, 0.05, "G2 (analytic formulae)")
for h in helpers:
h.setPricingEngine(JamshidianSwaptionEngine(HWmodel))
calibrate(HWmodel, helpers, 0.05, "Hull-White (analytic formulae)")
for h in helpers:
h.setPricingEngine(TreeSwaptionEngine(HWmodel2,grid))
calibrate(HWmodel2, helpers, 0.05, "Hull-White (numerical calibration)")
for h in helpers:
h.setPricingEngine(TreeSwaptionEngine(BKmodel,grid))
calibrate(BKmodel, helpers, 0.05, "Black-Karasinski (numerical calibration)")
# price Bermudan swaptions on defined swaps
bermudanDates = [ d for d in fixedSchedule ][:-1]
exercise = BermudanExercise(bermudanDates)
format = '%17s |%17s |%17s |%17s'
header = format % ('model', 'in-the-money', 'at-the-money', 'out-of-the-money')
rule = '-' * len(header)
dblrule = '=' * len(header)
print('')
print(dblrule)
print('Pricing Bermudan swaptions...')
print(rule)
print(header)
print(rule)
atmSwaption = Swaption(atmSwap, exercise)
otmSwaption = Swaption(otmSwap, exercise)
itmSwaption = Swaption(itmSwap, exercise)
atmSwaption.setPricingEngine(TreeSwaptionEngine(G2model, 50))
otmSwaption.setPricingEngine(TreeSwaptionEngine(G2model, 50))
itmSwaption.setPricingEngine(TreeSwaptionEngine(G2model, 50))
print(format % ('G2 analytic', formatPrice(itmSwaption.NPV()),
formatPrice(atmSwaption.NPV()), formatPrice(otmSwaption.NPV())))
atmSwaption.setPricingEngine(TreeSwaptionEngine(HWmodel, 50))
otmSwaption.setPricingEngine(TreeSwaptionEngine(HWmodel, 50))
itmSwaption.setPricingEngine(TreeSwaptionEngine(HWmodel, 50))
print(format % ('HW analytic', formatPrice(itmSwaption.NPV()),
formatPrice(atmSwaption.NPV()), formatPrice(otmSwaption.NPV())))
atmSwaption.setPricingEngine(TreeSwaptionEngine(HWmodel2, 50))
otmSwaption.setPricingEngine(TreeSwaptionEngine(HWmodel2, 50))
itmSwaption.setPricingEngine(TreeSwaptionEngine(HWmodel2, 50))
print(format % ('HW numerical', formatPrice(itmSwaption.NPV()),
formatPrice(atmSwaption.NPV()), formatPrice(otmSwaption.NPV())))
atmSwaption.setPricingEngine(TreeSwaptionEngine(BKmodel, 50))
otmSwaption.setPricingEngine(TreeSwaptionEngine(BKmodel, 50))
itmSwaption.setPricingEngine(TreeSwaptionEngine(BKmodel, 50))
print(format % ('BK numerical', formatPrice(itmSwaption.NPV()),
formatPrice(atmSwaption.NPV()), formatPrice(otmSwaption.NPV())))
print(dblrule)
|
bsd-3-clause
|
jamespcole/home-assistant
|
homeassistant/components/cloud/__init__.py
|
1
|
6414
|
"""Component to integrate the Home Assistant cloud."""
import logging
import voluptuous as vol
from homeassistant.auth.const import GROUP_ID_ADMIN
from homeassistant.components.alexa import smart_home as alexa_sh
from homeassistant.components.google_assistant import const as ga_c
from homeassistant.const import (
CONF_MODE, CONF_NAME, CONF_REGION, EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP)
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv, entityfilter
from homeassistant.loader import bind_hass
from homeassistant.util.aiohttp import MockRequest
from . import http_api
from .const import (
CONF_ACME_DIRECTORY_SERVER, CONF_ALEXA, CONF_ALIASES,
CONF_CLOUDHOOK_CREATE_URL, CONF_COGNITO_CLIENT_ID, CONF_ENTITY_CONFIG,
CONF_FILTER, CONF_GOOGLE_ACTIONS, CONF_GOOGLE_ACTIONS_SYNC_URL,
CONF_RELAYER, CONF_REMOTE_API_URL, CONF_SUBSCRIPTION_INFO_URL,
CONF_USER_POOL_ID, DOMAIN, MODE_DEV, MODE_PROD)
from .prefs import CloudPreferences
REQUIREMENTS = ['hass-nabucasa==0.11']
DEPENDENCIES = ['http']
_LOGGER = logging.getLogger(__name__)
DEFAULT_MODE = MODE_PROD
SERVICE_REMOTE_CONNECT = 'remote_connect'
SERVICE_REMOTE_DISCONNECT = 'remote_disconnect'
ALEXA_ENTITY_SCHEMA = vol.Schema({
vol.Optional(alexa_sh.CONF_DESCRIPTION): cv.string,
vol.Optional(alexa_sh.CONF_DISPLAY_CATEGORIES): cv.string,
vol.Optional(alexa_sh.CONF_NAME): cv.string,
})
GOOGLE_ENTITY_SCHEMA = vol.Schema({
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ALIASES): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ga_c.CONF_ROOM_HINT): cv.string,
})
ASSISTANT_SCHEMA = vol.Schema({
vol.Optional(CONF_FILTER, default=dict): entityfilter.FILTER_SCHEMA,
})
ALEXA_SCHEMA = ASSISTANT_SCHEMA.extend({
vol.Optional(CONF_ENTITY_CONFIG): {cv.entity_id: ALEXA_ENTITY_SCHEMA}
})
GACTIONS_SCHEMA = ASSISTANT_SCHEMA.extend({
vol.Optional(CONF_ENTITY_CONFIG): {cv.entity_id: GOOGLE_ENTITY_SCHEMA},
})
# pylint: disable=no-value-for-parameter
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_MODE, default=DEFAULT_MODE):
vol.In([MODE_DEV, MODE_PROD]),
# Change to optional when we include real servers
vol.Optional(CONF_COGNITO_CLIENT_ID): str,
vol.Optional(CONF_USER_POOL_ID): str,
vol.Optional(CONF_REGION): str,
vol.Optional(CONF_RELAYER): str,
vol.Optional(CONF_GOOGLE_ACTIONS_SYNC_URL): vol.Url(),
vol.Optional(CONF_SUBSCRIPTION_INFO_URL): vol.Url(),
vol.Optional(CONF_CLOUDHOOK_CREATE_URL): vol.Url(),
vol.Optional(CONF_REMOTE_API_URL): vol.Url(),
vol.Optional(CONF_ACME_DIRECTORY_SERVER): vol.Url(),
vol.Optional(CONF_ALEXA): ALEXA_SCHEMA,
vol.Optional(CONF_GOOGLE_ACTIONS): GACTIONS_SCHEMA,
}),
}, extra=vol.ALLOW_EXTRA)
class CloudNotAvailable(HomeAssistantError):
"""Raised when an action requires the cloud but it's not available."""
@bind_hass
@callback
def async_is_logged_in(hass) -> bool:
"""Test if user is logged in."""
return DOMAIN in hass.data and hass.data[DOMAIN].is_logged_in
@bind_hass
@callback
def async_active_subscription(hass) -> bool:
"""Test if user has an active subscription."""
return \
async_is_logged_in(hass) and not hass.data[DOMAIN].subscription_expired
@bind_hass
async def async_create_cloudhook(hass, webhook_id: str) -> str:
"""Create a cloudhook."""
if not async_is_logged_in(hass):
raise CloudNotAvailable
hook = await hass.data[DOMAIN].cloudhooks.async_create(webhook_id, True)
return hook['cloudhook_url']
@bind_hass
async def async_delete_cloudhook(hass, webhook_id: str) -> None:
"""Delete a cloudhook."""
if DOMAIN not in hass.data:
raise CloudNotAvailable
await hass.data[DOMAIN].cloudhooks.async_delete(webhook_id)
@bind_hass
@callback
def async_remote_ui_url(hass) -> str:
"""Get the remote UI URL."""
if not async_is_logged_in(hass):
raise CloudNotAvailable
return "https://" + hass.data[DOMAIN].remote.instance_domain
def is_cloudhook_request(request):
"""Test if a request came from a cloudhook.
Async friendly.
"""
return isinstance(request, MockRequest)
async def async_setup(hass, config):
"""Initialize the Home Assistant cloud."""
from hass_nabucasa import Cloud
from .client import CloudClient
# Process configs
if DOMAIN in config:
kwargs = dict(config[DOMAIN])
else:
kwargs = {CONF_MODE: DEFAULT_MODE}
# Alexa/Google custom config
alexa_conf = kwargs.pop(CONF_ALEXA, None) or ALEXA_SCHEMA({})
google_conf = kwargs.pop(CONF_GOOGLE_ACTIONS, None) or GACTIONS_SCHEMA({})
# Cloud settings
prefs = CloudPreferences(hass)
await prefs.async_initialize()
# Cloud user
if not prefs.cloud_user:
user = await hass.auth.async_create_system_user(
'Home Assistant Cloud', [GROUP_ID_ADMIN])
await prefs.async_update(cloud_user=user.id)
# Initialize Cloud
websession = hass.helpers.aiohttp_client.async_get_clientsession()
client = CloudClient(hass, prefs, websession, alexa_conf, google_conf)
cloud = hass.data[DOMAIN] = Cloud(client, **kwargs)
async def _startup(event):
"""Startup event."""
await cloud.start()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _startup)
async def _shutdown(event):
"""Shutdown event."""
await cloud.stop()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown)
async def _service_handler(service):
"""Handle service for cloud."""
if service.service == SERVICE_REMOTE_CONNECT:
await cloud.remote.connect()
await prefs.async_update(remote_enabled=True)
elif service.service == SERVICE_REMOTE_DISCONNECT:
await cloud.remote.disconnect()
await prefs.async_update(remote_enabled=False)
hass.services.async_register(
DOMAIN, SERVICE_REMOTE_CONNECT, _service_handler)
hass.services.async_register(
DOMAIN, SERVICE_REMOTE_DISCONNECT, _service_handler)
await http_api.async_setup(hass)
hass.async_create_task(hass.helpers.discovery.async_load_platform(
'binary_sensor', DOMAIN, {}, config))
return True
|
apache-2.0
|
ODM2/ODM2StreamingDataLoader
|
src/wizard/view/clsDataConfigPanel.py
|
1
|
7327
|
import sys
import wx
#import wx.lib.agw.ultimatelistctrl as ULC
from ObjectListView import ObjectListView, ColumnDefn
import wx.lib.mixins.gridlabelrenderer as glr
from src.wizard.controller.frmVirtualList import VirtualList
from src.wizard.controller.frmVirtualGrid import VirtualGrid, GridBase
#from lib.ObjectListView.ObjectListView import VirtualObjectListView
class MyGrid(VirtualGrid, glr.GridWithLabelRenderersMixin):
def __init__(self, *args, **kw):
VirtualGrid.__init__(self, *args, **kw)
glr.GridWithLabelRenderersMixin.__init__(self)
class MyColLabelRenderer(glr.GridLabelRenderer):
def __init__(self, bgcolor):
self._bgcolor = bgcolor
def Draw(self, grid, dc, rect, col):
dc.SetBrush(wx.Brush(self._bgcolor))
dc.SetPen(wx.TRANSPARENT_PEN)
dc.DrawRectangleRect(rect)
hAlign, vAlign = grid.GetColLabelAlignment()
text = grid.GetColLabelValue(col)
self.DrawBorder(grid, dc, rect)
self.DrawText(grid, dc, rect, text, hAlign, vAlign)
class DataConfigPanelView(wx.Panel):
def __init__( self, parent ):
wx.Panel.__init__ ( self, parent, id = wx.ID_ANY, pos = wx.DefaultPosition, size = wx.Size( 972,569 ), style = wx.TAB_TRAVERSAL )
bSizerMain = wx.BoxSizer( wx.VERTICAL )
bSizerTop = wx.BoxSizer( wx.HORIZONTAL )
bSizerTopLeft = wx.BoxSizer( wx.VERTICAL )
sbSizerTime = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u"Date Time Column:" ), wx.VERTICAL )
choiceTimeColChoices = []
self.choiceTimeCol = wx.Choice( sbSizerTime.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.Size( 160,-1 ), choiceTimeColChoices, 0 )
self.choiceTimeCol.SetSelection( 0 )
sbSizerTime.Add( self.choiceTimeCol, 0, wx.ALL, 10 )
bSizerUTC = wx.BoxSizer(wx.HORIZONTAL)
lblUTC = wx.StaticText(sbSizerTime.GetStaticBox(), wx.ID_ANY,u"UTC Offset", wx.DefaultPosition, wx.DefaultSize, 0)
bSizerUTC.Add(lblUTC, 0, wx.ALL, 10)
bSizerUTC.AddSpacer((0, 0), 1, wx.EXPAND)
self.spinUTCOffset = wx.SpinCtrl( sbSizerTime.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.SP_ARROW_KEYS, 0, 10, 0 )
self.spinUTCOffset.SetMinSize( wx.Size( 50,-1 ) )
self.spinUTCOffset.SetRange(-12,14)
bSizerUTC.Add(self.spinUTCOffset, 0, wx.ALL, 10)
sbSizerTime.Add(bSizerUTC, 0, wx.EXPAND)
bSizerTopLeft.Add( sbSizerTime, 0, wx.EXPAND, 10 )
sbSizerSpacing = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u"Intended Time Spacing:" ), wx.VERTICAL )
self.spinTimeSpacing = wx.SpinCtrl( sbSizerSpacing.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.SP_ARROW_KEYS, 0, 10, 0 )
self.spinTimeSpacing.SetMinSize( wx.Size( 160,-1 ) )
self.spinTimeSpacing.SetRange(0,100)
sbSizerSpacing.Add( self.spinTimeSpacing, 0, wx.ALL, 10 )
bSizerUnit = wx.BoxSizer(wx.HORIZONTAL)
lblUnitID = wx.StaticText(sbSizerSpacing.GetStaticBox(), wx.ID_ANY,u"Unit", wx.DefaultPosition, wx.DefaultSize, 0)
bSizerUnit.Add(lblUnitID, 0, wx.ALL, 10)
bSizerUnit.AddSpacer((0, 0), 1, wx.EXPAND)
self.choiceUnitID = wx.Choice( sbSizerSpacing.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.Size( 120,-1 ), choiceTimeColChoices, 0)
self.choiceUnitID.SetSelection(0)
bSizerUnit.Add(self.choiceUnitID, 0, wx.ALL, 10)
sbSizerSpacing.Add(bSizerUnit, 0, wx.EXPAND)
bSizerTopLeft.Add( sbSizerSpacing, 0, wx.EXPAND, 10)
bSizerTop.Add(bSizerTopLeft, 0, wx.EXPAND)
sbSizerData = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u"Data Columns:" ), wx.VERTICAL )
#self.m_listCtrl1 = VirtualGrid(self, id=wx.ID_ANY,
# pos=wx.DefaultPosition, size=wx.Size(-1, 250))
if sys.platform == 'darwin':
self.m_listCtrl1 = MyGrid(self, id=wx.ID_ANY,
pos=wx.DefaultPosition, size=wx.Size(820, 250))
else:
self.m_listCtrl1 = MyGrid(self, id=wx.ID_ANY,
pos=wx.DefaultPosition, size=wx.Size(-1, 250))
sbSizerData.Add(self.m_listCtrl1, 0, wx.ALL, 10)
bSizerTop.Add( sbSizerData, 1, wx.EXPAND, 10 )
bSizerMain.Add( bSizerTop, 0, wx.EXPAND|wx.ALL, 10 )
bSizerBottom = wx.BoxSizer( wx.VERTICAL )
sbSizerMappings = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u"Mappings:" ), wx.VERTICAL )
# ObjectListView table.
self.lstMappings = \
ObjectListView(sbSizerMappings.GetStaticBox(),
id=wx.ID_ANY,
pos=wx.DefaultPosition,
size=wx.Size(-1,200),
style=wx.LC_REPORT|wx.SUNKEN_BORDER)
# Customize the list control's message
# when it is empty.
self.lstMappings.oddRowsBackColor = wx.Colour(255, 248, 229)
self.lstMappings.evenRowsBackColor = wx.Colour(204, 229, 255)
self.lstMappings.SetEmptyListMsg(\
"No columns mapped")
self.lstMappings.SetObjects(None)
self.lstMappings.SetColumns([
ColumnDefn('Data Column','left',150,'variableName'),
ColumnDefn('Result ID','left',70,'resultID'),
ColumnDefn('Samp. Feat. Code','left',110,'samplingFeatureCode'),
ColumnDefn('Samp. Feat. Name','left',110,'samplingFeatureName'),
ColumnDefn('Variable Code','left',100,'variableCode'),
ColumnDefn('Variable Name','left',100,'variableNameCV'),
ColumnDefn('Units Name','left',80,'unitsName'),
ColumnDefn('Method Code','left',100,'methodCode'),
ColumnDefn('Method Name','left',100,'methodName'),
ColumnDefn('Proc. Level Code','left',110,'processingLevelCode'),])
sbSizerMappings.Add(self.lstMappings, 1, wx.EXPAND | wx.ALL)
bSizerBottom.Add( sbSizerMappings, 0, wx.EXPAND|wx.ALL)
bSizerMain.Add( bSizerBottom, 1, wx.EXPAND|wx.ALL, 10 )
self.SetSizer( bSizerMain )
self.Layout()
self.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onColClick, self.m_listCtrl1)
self.Bind(wx.grid.EVT_GRID_CELL_LEFT_CLICK, self.onCellClick, self.m_listCtrl1)
self.Bind(wx.grid.EVT_GRID_CELL_RIGHT_CLICK, self.onCellClick, self.m_listCtrl1)
self.Bind(wx.grid.EVT_GRID_LABEL_LEFT_DCLICK, self.onColDoubleClick, self.m_listCtrl1)
self.Bind(wx.EVT_CHOICE, self.onTimeChoice, self.choiceTimeCol)
self.Bind(wx.EVT_CHOICE, self.onSelectUnit, self.choiceUnitID)
def onSelectUnit(self, event):
event.Skip()
def onAddNew(self, event):
event.Skip()
def onColClick(self, event):
event.Skip()
def onCellClick(self, event):
event.Skip()
def onTimeSelect(self, event):
event.Skip()
def onTimeChoice(self, event):
event.Skip()
def onColDoubleClick(self, event):
event.Skip()
|
bsd-3-clause
|
amosonn/distributed
|
distributed/bokeh/workers/main.py
|
2
|
2796
|
#!/usr/bin/env python
from __future__ import print_function, division, absolute_import
import json
import os
from bokeh.io import curdoc
from bokeh.layouts import column, row
from toolz import valmap
from tornado import gen
from distributed.core import rpc
from distributed.bokeh.worker_monitor import (worker_table_plot,
worker_table_update, processing_plot, processing_update)
from distributed.utils import log_errors
import distributed.bokeh
SIZING_MODE = 'scale_width'
WIDTH = 600
messages = distributed.bokeh.messages # global message store
doc = curdoc()
dask_dir = os.path.join(os.path.expanduser('~'), '.dask')
options_path = os.path.join(dask_dir, '.dask-web-ui.json')
if os.path.exists(options_path):
with open(options_path, 'r') as f:
options = json.load(f)
else:
options = {'host': '127.0.0.1',
'tcp-port': 8786,
'http-port': 9786}
scheduler = rpc(ip=options['host'], port=options['tcp-port'])
worker_source, [mem, table] = worker_table_plot(width=WIDTH)
def worker_update():
with log_errors():
try:
msg = messages['workers']['deque'][-1]
except IndexError:
return
worker_table_update(worker_source, msg)
doc.add_periodic_callback(worker_update, messages['workers']['interval'])
"""
def f(_, old, new):
host = worker_source.data['host']
hosts = [host[i] for i in new['1d']['indices']]
@gen.coroutine
def _():
results = yield scheduler.broadcast(hosts=hosts, msg={'op': 'health'})
text = json.dumps(results, indent=2)
paragraph.text = text
doc.add_next_tick_callback(_)
worker_source.on_change('selected', f)
"""
processing_source, processing_plot = processing_plot(width=WIDTH, height=150)
def processing_plot_update():
with log_errors():
msg = messages['processing']
if not msg['ncores']:
return
data = processing_update(msg)
x_range = processing_plot.x_range
max_right = max(data['right'])
min_left = min(data['left'][:-1])
cores = max(data['ncores'])
if min_left < x_range.start: # not out there enough, jump ahead
x_range.start = min_left - 2
elif x_range.start < 2 * min_left - cores: # way out there, walk back
x_range.start = x_range.start * 0.95 + min_left * 0.05
if x_range.end < max_right:
x_range.end = max_right + 2
elif x_range.end > 2 * max_right + cores: # way out there, walk back
x_range.end = x_range.end * 0.95 + max_right * 0.05
processing_source.data.update(data)
doc.add_periodic_callback(processing_plot_update, 200)
layout = column(
processing_plot,
mem,
table,
sizing_mode=SIZING_MODE
)
doc.add_root(layout)
|
bsd-3-clause
|
tedi3231/openerp
|
build/lib/openerp/addons/google_base_account/wizard/__init__.py
|
62
|
1076
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import google_login
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
saquiba2/numpytry
|
numpy/distutils/fcompiler/__init__.py
|
152
|
38302
|
"""numpy.distutils.fcompiler
Contains FCompiler, an abstract base class that defines the interface
for the numpy.distutils Fortran compiler abstraction model.
Terminology:
To be consistent, where the term 'executable' is used, it means the single
file, like 'gcc', that is executed, and should be a string. In contrast,
'command' means the entire command line, like ['gcc', '-c', 'file.c'], and
should be a list.
But note that FCompiler.executables is actually a dictionary of commands.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers',
'dummy_fortran_file']
import os
import sys
import re
import types
try:
set
except NameError:
from sets import Set as set
from numpy.compat import open_latin1
from distutils.sysconfig import get_python_lib
from distutils.fancy_getopt import FancyGetopt
from distutils.errors import DistutilsModuleError, \
DistutilsExecError, CompileError, LinkError, DistutilsPlatformError
from distutils.util import split_quoted, strtobool
from numpy.distutils.ccompiler import CCompiler, gen_lib_options
from numpy.distutils import log
from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \
make_temp_file, get_shared_lib_extension
from numpy.distutils.environment import EnvironmentConfig
from numpy.distutils.exec_command import find_executable
from numpy.distutils.compat import get_exception
__metaclass__ = type
class CompilerNotFound(Exception):
pass
def flaglist(s):
if is_string(s):
return split_quoted(s)
else:
return s
def str2bool(s):
if is_string(s):
return strtobool(s)
return bool(s)
def is_sequence_of_strings(seq):
return is_sequence(seq) and all_strings(seq)
class FCompiler(CCompiler):
"""Abstract base class to define the interface that must be implemented
by real Fortran compiler classes.
Methods that subclasses may redefine:
update_executables(), find_executables(), get_version()
get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug()
get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(),
get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(),
get_flags_arch_f90(), get_flags_debug_f90(),
get_flags_fix(), get_flags_linker_so()
DON'T call these methods (except get_version) after
constructing a compiler instance or inside any other method.
All methods, except update_executables() and find_executables(),
may call the get_version() method.
After constructing a compiler instance, always call customize(dist=None)
method that finalizes compiler construction and makes the following
attributes available:
compiler_f77
compiler_f90
compiler_fix
linker_so
archiver
ranlib
libraries
library_dirs
"""
# These are the environment variables and distutils keys used.
# Each configuration descripition is
# (<hook name>, <environment variable>, <key in distutils.cfg>, <convert>)
# The hook names are handled by the self._environment_hook method.
# - names starting with 'self.' call methods in this class
# - names starting with 'exe.' return the key in the executables dict
# - names like 'flags.YYY' return self.get_flag_YYY()
# convert is either None or a function to convert a string to the
# appropiate type used.
distutils_vars = EnvironmentConfig(
distutils_section='config_fc',
noopt = (None, None, 'noopt', str2bool),
noarch = (None, None, 'noarch', str2bool),
debug = (None, None, 'debug', str2bool),
verbose = (None, None, 'verbose', str2bool),
)
command_vars = EnvironmentConfig(
distutils_section='config_fc',
compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None),
compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None),
compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None),
version_cmd = ('exe.version_cmd', None, None, None),
linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None),
linker_exe = ('exe.linker_exe', 'LD', 'ld', None),
archiver = (None, 'AR', 'ar', None),
ranlib = (None, 'RANLIB', 'ranlib', None),
)
flag_vars = EnvironmentConfig(
distutils_section='config_fc',
f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist),
f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist),
free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist),
fix = ('flags.fix', None, None, flaglist),
opt = ('flags.opt', 'FOPT', 'opt', flaglist),
opt_f77 = ('flags.opt_f77', None, None, flaglist),
opt_f90 = ('flags.opt_f90', None, None, flaglist),
arch = ('flags.arch', 'FARCH', 'arch', flaglist),
arch_f77 = ('flags.arch_f77', None, None, flaglist),
arch_f90 = ('flags.arch_f90', None, None, flaglist),
debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist),
debug_f77 = ('flags.debug_f77', None, None, flaglist),
debug_f90 = ('flags.debug_f90', None, None, flaglist),
flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist),
linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist),
linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist),
ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist),
)
language_map = {'.f': 'f77',
'.for': 'f77',
'.F': 'f77', # XXX: needs preprocessor
'.ftn': 'f77',
'.f77': 'f77',
'.f90': 'f90',
'.F90': 'f90', # XXX: needs preprocessor
'.f95': 'f90',
}
language_order = ['f90', 'f77']
# These will be set by the subclass
compiler_type = None
compiler_aliases = ()
version_pattern = None
possible_executables = []
executables = {
'version_cmd': ["f77", "-v"],
'compiler_f77': ["f77"],
'compiler_f90': ["f90"],
'compiler_fix': ["f90", "-fixed"],
'linker_so': ["f90", "-shared"],
'linker_exe': ["f90"],
'archiver': ["ar", "-cr"],
'ranlib': None,
}
# If compiler does not support compiling Fortran 90 then it can
# suggest using another compiler. For example, gnu would suggest
# gnu95 compiler type when there are F90 sources.
suggested_f90_compiler = None
compile_switch = "-c"
object_switch = "-o " # Ending space matters! It will be stripped
# but if it is missing then object_switch
# will be prefixed to object file name by
# string concatenation.
library_switch = "-o " # Ditto!
# Switch to specify where module files are created and searched
# for USE statement. Normally it is a string and also here ending
# space matters. See above.
module_dir_switch = None
# Switch to specify where module files are searched for USE statement.
module_include_switch = '-I'
pic_flags = [] # Flags to create position-independent code
src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90', '.FOR']
obj_extension = ".o"
shared_lib_extension = get_shared_lib_extension()
static_lib_extension = ".a" # or .lib
static_lib_format = "lib%s%s" # or %s%s
shared_lib_format = "%s%s"
exe_extension = ""
_exe_cache = {}
_executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90',
'compiler_fix', 'linker_so', 'linker_exe', 'archiver',
'ranlib']
# This will be set by new_fcompiler when called in
# command/{build_ext.py, build_clib.py, config.py} files.
c_compiler = None
# extra_{f77,f90}_compile_args are set by build_ext.build_extension method
extra_f77_compile_args = []
extra_f90_compile_args = []
def __init__(self, *args, **kw):
CCompiler.__init__(self, *args, **kw)
self.distutils_vars = self.distutils_vars.clone(self._environment_hook)
self.command_vars = self.command_vars.clone(self._environment_hook)
self.flag_vars = self.flag_vars.clone(self._environment_hook)
self.executables = self.executables.copy()
for e in self._executable_keys:
if e not in self.executables:
self.executables[e] = None
# Some methods depend on .customize() being called first, so
# this keeps track of whether that's happened yet.
self._is_customised = False
def __copy__(self):
obj = self.__new__(self.__class__)
obj.__dict__.update(self.__dict__)
obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook)
obj.command_vars = obj.command_vars.clone(obj._environment_hook)
obj.flag_vars = obj.flag_vars.clone(obj._environment_hook)
obj.executables = obj.executables.copy()
return obj
def copy(self):
return self.__copy__()
# Use properties for the attributes used by CCompiler. Setting them
# as attributes from the self.executables dictionary is error-prone,
# so we get them from there each time.
def _command_property(key):
def fget(self):
assert self._is_customised
return self.executables[key]
return property(fget=fget)
version_cmd = _command_property('version_cmd')
compiler_f77 = _command_property('compiler_f77')
compiler_f90 = _command_property('compiler_f90')
compiler_fix = _command_property('compiler_fix')
linker_so = _command_property('linker_so')
linker_exe = _command_property('linker_exe')
archiver = _command_property('archiver')
ranlib = _command_property('ranlib')
# Make our terminology consistent.
def set_executable(self, key, value):
self.set_command(key, value)
def set_commands(self, **kw):
for k, v in kw.items():
self.set_command(k, v)
def set_command(self, key, value):
if not key in self._executable_keys:
raise ValueError(
"unknown executable '%s' for class %s" %
(key, self.__class__.__name__))
if is_string(value):
value = split_quoted(value)
assert value is None or is_sequence_of_strings(value[1:]), (key, value)
self.executables[key] = value
######################################################################
## Methods that subclasses may redefine. But don't call these methods!
## They are private to FCompiler class and may return unexpected
## results if used elsewhere. So, you have been warned..
def find_executables(self):
"""Go through the self.executables dictionary, and attempt to
find and assign appropiate executables.
Executable names are looked for in the environment (environment
variables, the distutils.cfg, and command line), the 0th-element of
the command list, and the self.possible_executables list.
Also, if the 0th element is "<F77>" or "<F90>", the Fortran 77
or the Fortran 90 compiler executable is used, unless overridden
by an environment setting.
Subclasses should call this if overriden.
"""
assert self._is_customised
exe_cache = self._exe_cache
def cached_find_executable(exe):
if exe in exe_cache:
return exe_cache[exe]
fc_exe = find_executable(exe)
exe_cache[exe] = exe_cache[fc_exe] = fc_exe
return fc_exe
def verify_command_form(name, value):
if value is not None and not is_sequence_of_strings(value):
raise ValueError(
"%s value %r is invalid in class %s" %
(name, value, self.__class__.__name__))
def set_exe(exe_key, f77=None, f90=None):
cmd = self.executables.get(exe_key, None)
if not cmd:
return None
# Note that we get cmd[0] here if the environment doesn't
# have anything set
exe_from_environ = getattr(self.command_vars, exe_key)
if not exe_from_environ:
possibles = [f90, f77] + self.possible_executables
else:
possibles = [exe_from_environ] + self.possible_executables
seen = set()
unique_possibles = []
for e in possibles:
if e == '<F77>':
e = f77
elif e == '<F90>':
e = f90
if not e or e in seen:
continue
seen.add(e)
unique_possibles.append(e)
for exe in unique_possibles:
fc_exe = cached_find_executable(exe)
if fc_exe:
cmd[0] = fc_exe
return fc_exe
self.set_command(exe_key, None)
return None
ctype = self.compiler_type
f90 = set_exe('compiler_f90')
if not f90:
f77 = set_exe('compiler_f77')
if f77:
log.warn('%s: no Fortran 90 compiler found' % ctype)
else:
raise CompilerNotFound('%s: f90 nor f77' % ctype)
else:
f77 = set_exe('compiler_f77', f90=f90)
if not f77:
log.warn('%s: no Fortran 77 compiler found' % ctype)
set_exe('compiler_fix', f90=f90)
set_exe('linker_so', f77=f77, f90=f90)
set_exe('linker_exe', f77=f77, f90=f90)
set_exe('version_cmd', f77=f77, f90=f90)
set_exe('archiver')
set_exe('ranlib')
def update_executables(elf):
"""Called at the beginning of customisation. Subclasses should
override this if they need to set up the executables dictionary.
Note that self.find_executables() is run afterwards, so the
self.executables dictionary values can contain <F77> or <F90> as
the command, which will be replaced by the found F77 or F90
compiler.
"""
pass
def get_flags(self):
"""List of flags common to all compiler types."""
return [] + self.pic_flags
def _get_command_flags(self, key):
cmd = self.executables.get(key, None)
if cmd is None:
return []
return cmd[1:]
def get_flags_f77(self):
"""List of Fortran 77 specific flags."""
return self._get_command_flags('compiler_f77')
def get_flags_f90(self):
"""List of Fortran 90 specific flags."""
return self._get_command_flags('compiler_f90')
def get_flags_free(self):
"""List of Fortran 90 free format specific flags."""
return []
def get_flags_fix(self):
"""List of Fortran 90 fixed format specific flags."""
return self._get_command_flags('compiler_fix')
def get_flags_linker_so(self):
"""List of linker flags to build a shared library."""
return self._get_command_flags('linker_so')
def get_flags_linker_exe(self):
"""List of linker flags to build an executable."""
return self._get_command_flags('linker_exe')
def get_flags_ar(self):
"""List of archiver flags. """
return self._get_command_flags('archiver')
def get_flags_opt(self):
"""List of architecture independent compiler flags."""
return []
def get_flags_arch(self):
"""List of architecture dependent compiler flags."""
return []
def get_flags_debug(self):
"""List of compiler flags to compile with debugging information."""
return []
get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt
get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch
get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug
def get_libraries(self):
"""List of compiler libraries."""
return self.libraries[:]
def get_library_dirs(self):
"""List of compiler library directories."""
return self.library_dirs[:]
def get_version(self, force=False, ok_status=[0]):
assert self._is_customised
version = CCompiler.get_version(self, force=force, ok_status=ok_status)
if version is None:
raise CompilerNotFound()
return version
############################################################
## Public methods:
def customize(self, dist = None):
"""Customize Fortran compiler.
This method gets Fortran compiler specific information from
(i) class definition, (ii) environment, (iii) distutils config
files, and (iv) command line (later overrides earlier).
This method should be always called after constructing a
compiler instance. But not in __init__ because Distribution
instance is needed for (iii) and (iv).
"""
log.info('customize %s' % (self.__class__.__name__))
self._is_customised = True
self.distutils_vars.use_distribution(dist)
self.command_vars.use_distribution(dist)
self.flag_vars.use_distribution(dist)
self.update_executables()
# find_executables takes care of setting the compiler commands,
# version_cmd, linker_so, linker_exe, ar, and ranlib
self.find_executables()
noopt = self.distutils_vars.get('noopt', False)
noarch = self.distutils_vars.get('noarch', noopt)
debug = self.distutils_vars.get('debug', False)
f77 = self.command_vars.compiler_f77
f90 = self.command_vars.compiler_f90
f77flags = []
f90flags = []
freeflags = []
fixflags = []
if f77:
f77flags = self.flag_vars.f77
if f90:
f90flags = self.flag_vars.f90
freeflags = self.flag_vars.free
# XXX Assuming that free format is default for f90 compiler.
fix = self.command_vars.compiler_fix
if fix:
fixflags = self.flag_vars.fix + f90flags
oflags, aflags, dflags = [], [], []
# examine get_flags_<tag>_<compiler> for extra flags
# only add them if the method is different from get_flags_<tag>
def get_flags(tag, flags):
# note that self.flag_vars.<tag> calls self.get_flags_<tag>()
flags.extend(getattr(self.flag_vars, tag))
this_get = getattr(self, 'get_flags_' + tag)
for name, c, flagvar in [('f77', f77, f77flags),
('f90', f90, f90flags),
('f90', fix, fixflags)]:
t = '%s_%s' % (tag, name)
if c and this_get is not getattr(self, 'get_flags_' + t):
flagvar.extend(getattr(self.flag_vars, t))
if not noopt:
get_flags('opt', oflags)
if not noarch:
get_flags('arch', aflags)
if debug:
get_flags('debug', dflags)
fflags = self.flag_vars.flags + dflags + oflags + aflags
if f77:
self.set_commands(compiler_f77=[f77]+f77flags+fflags)
if f90:
self.set_commands(compiler_f90=[f90]+freeflags+f90flags+fflags)
if fix:
self.set_commands(compiler_fix=[fix]+fixflags+fflags)
#XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS
linker_so = self.linker_so
if linker_so:
linker_so_flags = self.flag_vars.linker_so
if sys.platform.startswith('aix'):
python_lib = get_python_lib(standard_lib=1)
ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix')
python_exp = os.path.join(python_lib, 'config', 'python.exp')
linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp]
self.set_commands(linker_so=linker_so+linker_so_flags)
linker_exe = self.linker_exe
if linker_exe:
linker_exe_flags = self.flag_vars.linker_exe
self.set_commands(linker_exe=linker_exe+linker_exe_flags)
ar = self.command_vars.archiver
if ar:
arflags = self.flag_vars.ar
self.set_commands(archiver=[ar]+arflags)
self.set_library_dirs(self.get_library_dirs())
self.set_libraries(self.get_libraries())
def dump_properties(self):
"""Print out the attributes of a compiler instance."""
props = []
for key in list(self.executables.keys()) + \
['version', 'libraries', 'library_dirs',
'object_switch', 'compile_switch']:
if hasattr(self, key):
v = getattr(self, key)
props.append((key, None, '= '+repr(v)))
props.sort()
pretty_printer = FancyGetopt(props)
for l in pretty_printer.generate_help("%s instance properties:" \
% (self.__class__.__name__)):
if l[:4]==' --':
l = ' ' + l[4:]
print(l)
###################
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compile 'src' to product 'obj'."""
src_flags = {}
if is_f_file(src) and not has_f90_header(src):
flavor = ':f77'
compiler = self.compiler_f77
src_flags = get_f77flags(src)
extra_compile_args = self.extra_f77_compile_args or []
elif is_free_format(src):
flavor = ':f90'
compiler = self.compiler_f90
if compiler is None:
raise DistutilsExecError('f90 not supported by %s needed for %s'\
% (self.__class__.__name__, src))
extra_compile_args = self.extra_f90_compile_args or []
else:
flavor = ':fix'
compiler = self.compiler_fix
if compiler is None:
raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\
% (self.__class__.__name__, src))
extra_compile_args = self.extra_f90_compile_args or []
if self.object_switch[-1]==' ':
o_args = [self.object_switch.strip(), obj]
else:
o_args = [self.object_switch.strip()+obj]
assert self.compile_switch.strip()
s_args = [self.compile_switch, src]
if extra_compile_args:
log.info('extra %s options: %r' \
% (flavor[1:], ' '.join(extra_compile_args)))
extra_flags = src_flags.get(self.compiler_type, [])
if extra_flags:
log.info('using compile options from source: %r' \
% ' '.join(extra_flags))
command = compiler + cc_args + extra_flags + s_args + o_args \
+ extra_postargs + extra_compile_args
display = '%s: %s' % (os.path.basename(compiler[0]) + flavor,
src)
try:
self.spawn(command, display=display)
except DistutilsExecError:
msg = str(get_exception())
raise CompileError(msg)
def module_options(self, module_dirs, module_build_dir):
options = []
if self.module_dir_switch is not None:
if self.module_dir_switch[-1]==' ':
options.extend([self.module_dir_switch.strip(), module_build_dir])
else:
options.append(self.module_dir_switch.strip()+module_build_dir)
else:
print('XXX: module_build_dir=%r option ignored' % (module_build_dir))
print('XXX: Fix module_dir_switch for ', self.__class__.__name__)
if self.module_include_switch is not None:
for d in [module_build_dir]+module_dirs:
options.append('%s%s' % (self.module_include_switch, d))
else:
print('XXX: module_dirs=%r option ignored' % (module_dirs))
print('XXX: Fix module_include_switch for ', self.__class__.__name__)
return options
def library_option(self, lib):
return "-l" + lib
def library_dir_option(self, dir):
return "-L" + dir
def link(self, target_desc, objects,
output_filename, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
objects, output_dir = self._fix_object_args(objects, output_dir)
libraries, library_dirs, runtime_library_dirs = \
self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,
libraries)
if is_string(output_dir):
output_filename = os.path.join(output_dir, output_filename)
elif output_dir is not None:
raise TypeError("'output_dir' must be a string or None")
if self._need_link(objects, output_filename):
if self.library_switch[-1]==' ':
o_args = [self.library_switch.strip(), output_filename]
else:
o_args = [self.library_switch.strip()+output_filename]
if is_string(self.objects):
ld_args = objects + [self.objects]
else:
ld_args = objects + self.objects
ld_args = ld_args + lib_opts + o_args
if debug:
ld_args[:0] = ['-g']
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
if target_desc == CCompiler.EXECUTABLE:
linker = self.linker_exe[:]
else:
linker = self.linker_so[:]
command = linker + ld_args
try:
self.spawn(command)
except DistutilsExecError:
msg = str(get_exception())
raise LinkError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def _environment_hook(self, name, hook_name):
if hook_name is None:
return None
if is_string(hook_name):
if hook_name.startswith('self.'):
hook_name = hook_name[5:]
hook = getattr(self, hook_name)
return hook()
elif hook_name.startswith('exe.'):
hook_name = hook_name[4:]
var = self.executables[hook_name]
if var:
return var[0]
else:
return None
elif hook_name.startswith('flags.'):
hook_name = hook_name[6:]
hook = getattr(self, 'get_flags_' + hook_name)
return hook()
else:
return hook_name()
## class FCompiler
_default_compilers = (
# sys.platform mappings
('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95',
'intelvem', 'intelem')),
('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')),
('linux.*', ('gnu95', 'intel', 'lahey', 'pg', 'absoft', 'nag', 'vast', 'compaq',
'intele', 'intelem', 'gnu', 'g95', 'pathf95')),
('darwin.*', ('gnu95', 'nag', 'absoft', 'ibm', 'intel', 'gnu', 'g95', 'pg')),
('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')),
('irix.*', ('mips', 'gnu', 'gnu95',)),
('aix.*', ('ibm', 'gnu', 'gnu95',)),
# os.name mappings
('posix', ('gnu', 'gnu95',)),
('nt', ('gnu', 'gnu95',)),
('mac', ('gnu95', 'gnu', 'pg')),
)
fcompiler_class = None
fcompiler_aliases = None
def load_all_fcompiler_classes():
"""Cache all the FCompiler classes found in modules in the
numpy.distutils.fcompiler package.
"""
from glob import glob
global fcompiler_class, fcompiler_aliases
if fcompiler_class is not None:
return
pys = os.path.join(os.path.dirname(__file__), '*.py')
fcompiler_class = {}
fcompiler_aliases = {}
for fname in glob(pys):
module_name, ext = os.path.splitext(os.path.basename(fname))
module_name = 'numpy.distutils.fcompiler.' + module_name
__import__ (module_name)
module = sys.modules[module_name]
if hasattr(module, 'compilers'):
for cname in module.compilers:
klass = getattr(module, cname)
desc = (klass.compiler_type, klass, klass.description)
fcompiler_class[klass.compiler_type] = desc
for alias in klass.compiler_aliases:
if alias in fcompiler_aliases:
raise ValueError("alias %r defined for both %s and %s"
% (alias, klass.__name__,
fcompiler_aliases[alias][1].__name__))
fcompiler_aliases[alias] = desc
def _find_existing_fcompiler(compiler_types,
osname=None, platform=None,
requiref90=False,
c_compiler=None):
from numpy.distutils.core import get_distribution
dist = get_distribution(always=True)
for compiler_type in compiler_types:
v = None
try:
c = new_fcompiler(plat=platform, compiler=compiler_type,
c_compiler=c_compiler)
c.customize(dist)
v = c.get_version()
if requiref90 and c.compiler_f90 is None:
v = None
new_compiler = c.suggested_f90_compiler
if new_compiler:
log.warn('Trying %r compiler as suggested by %r '
'compiler for f90 support.' % (compiler_type,
new_compiler))
c = new_fcompiler(plat=platform, compiler=new_compiler,
c_compiler=c_compiler)
c.customize(dist)
v = c.get_version()
if v is not None:
compiler_type = new_compiler
if requiref90 and c.compiler_f90 is None:
raise ValueError('%s does not support compiling f90 codes, '
'skipping.' % (c.__class__.__name__))
except DistutilsModuleError:
log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type)
except CompilerNotFound:
log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type)
if v is not None:
return compiler_type
return None
def available_fcompilers_for_platform(osname=None, platform=None):
if osname is None:
osname = os.name
if platform is None:
platform = sys.platform
matching_compiler_types = []
for pattern, compiler_type in _default_compilers:
if re.match(pattern, platform) or re.match(pattern, osname):
for ct in compiler_type:
if ct not in matching_compiler_types:
matching_compiler_types.append(ct)
if not matching_compiler_types:
matching_compiler_types.append('gnu')
return matching_compiler_types
def get_default_fcompiler(osname=None, platform=None, requiref90=False,
c_compiler=None):
"""Determine the default Fortran compiler to use for the given
platform."""
matching_compiler_types = available_fcompilers_for_platform(osname,
platform)
compiler_type = _find_existing_fcompiler(matching_compiler_types,
osname=osname,
platform=platform,
requiref90=requiref90,
c_compiler=c_compiler)
return compiler_type
# Flag to avoid rechecking for Fortran compiler every time
failed_fcompilers = set()
def new_fcompiler(plat=None,
compiler=None,
verbose=0,
dry_run=0,
force=0,
requiref90=False,
c_compiler = None):
"""Generate an instance of some FCompiler subclass for the supplied
platform/compiler combination.
"""
global failed_fcompilers
fcompiler_key = (plat, compiler)
if fcompiler_key in failed_fcompilers:
return None
load_all_fcompiler_classes()
if plat is None:
plat = os.name
if compiler is None:
compiler = get_default_fcompiler(plat, requiref90=requiref90,
c_compiler=c_compiler)
if compiler in fcompiler_class:
module_name, klass, long_description = fcompiler_class[compiler]
elif compiler in fcompiler_aliases:
module_name, klass, long_description = fcompiler_aliases[compiler]
else:
msg = "don't know how to compile Fortran code on platform '%s'" % plat
if compiler is not None:
msg = msg + " with '%s' compiler." % compiler
msg = msg + " Supported compilers are: %s)" \
% (','.join(fcompiler_class.keys()))
log.warn(msg)
failed_fcompilers.add(fcompiler_key)
return None
compiler = klass(verbose=verbose, dry_run=dry_run, force=force)
compiler.c_compiler = c_compiler
return compiler
def show_fcompilers(dist=None):
"""Print list of available compilers (used by the "--help-fcompiler"
option to "config_fc").
"""
if dist is None:
from distutils.dist import Distribution
from numpy.distutils.command.config_compiler import config_fc
dist = Distribution()
dist.script_name = os.path.basename(sys.argv[0])
dist.script_args = ['config_fc'] + sys.argv[1:]
try:
dist.script_args.remove('--help-fcompiler')
except ValueError:
pass
dist.cmdclass['config_fc'] = config_fc
dist.parse_config_files()
dist.parse_command_line()
compilers = []
compilers_na = []
compilers_ni = []
if not fcompiler_class:
load_all_fcompiler_classes()
platform_compilers = available_fcompilers_for_platform()
for compiler in platform_compilers:
v = None
log.set_verbosity(-2)
try:
c = new_fcompiler(compiler=compiler, verbose=dist.verbose)
c.customize(dist)
v = c.get_version()
except (DistutilsModuleError, CompilerNotFound):
e = get_exception()
log.debug("show_fcompilers: %s not found" % (compiler,))
log.debug(repr(e))
if v is None:
compilers_na.append(("fcompiler="+compiler, None,
fcompiler_class[compiler][2]))
else:
c.dump_properties()
compilers.append(("fcompiler="+compiler, None,
fcompiler_class[compiler][2] + ' (%s)' % v))
compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers))
compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2])
for fc in compilers_ni]
compilers.sort()
compilers_na.sort()
compilers_ni.sort()
pretty_printer = FancyGetopt(compilers)
pretty_printer.print_help("Fortran compilers found:")
pretty_printer = FancyGetopt(compilers_na)
pretty_printer.print_help("Compilers available for this "
"platform, but not found:")
if compilers_ni:
pretty_printer = FancyGetopt(compilers_ni)
pretty_printer.print_help("Compilers not available on this platform:")
print("For compiler details, run 'config_fc --verbose' setup command.")
def dummy_fortran_file():
fo, name = make_temp_file(suffix='.f')
fo.write(" subroutine dummy()\n end\n")
fo.close()
return name[:-2]
is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match
_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search
_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search
_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search
_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]', re.I).match
def is_free_format(file):
"""Check if file is in free format Fortran."""
# f90 allows both fixed and free format, assuming fixed unless
# signs of free format are detected.
result = 0
f = open_latin1(file, 'r')
line = f.readline()
n = 10000 # the number of non-comment lines to scan for hints
if _has_f_header(line):
n = 0
elif _has_f90_header(line):
n = 0
result = 1
while n>0 and line:
line = line.rstrip()
if line and line[0]!='!':
n -= 1
if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&':
result = 1
break
line = f.readline()
f.close()
return result
def has_f90_header(src):
f = open_latin1(src, 'r')
line = f.readline()
f.close()
return _has_f90_header(line) or _has_fix_header(line)
_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P<fcname>\w+)\s*\)\s*=\s*(?P<fflags>.*)', re.I)
def get_f77flags(src):
"""
Search the first 20 lines of fortran 77 code for line pattern
`CF77FLAGS(<fcompiler type>)=<f77 flags>`
Return a dictionary {<fcompiler type>:<f77 flags>}.
"""
flags = {}
f = open_latin1(src, 'r')
i = 0
for line in f:
i += 1
if i>20: break
m = _f77flags_re.match(line)
if not m: continue
fcname = m.group('fcname').strip()
fflags = m.group('fflags').strip()
flags[fcname] = split_quoted(fflags)
f.close()
return flags
# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags
if __name__ == '__main__':
show_fcompilers()
|
bsd-3-clause
|
clemkoa/scikit-learn
|
sklearn/covariance/graph_lasso_.py
|
7
|
27246
|
"""GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..exceptions import ConvergenceWarning
from ..utils.validation import check_random_state, check_array
from ..utils import deprecated
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..model_selection import check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
"""Evaluation of the graph-lasso objective function
the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
"""
p = precision_.shape[0]
cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
enet_tol=1e-4, max_iter=100, verbose=False,
return_costs=False, eps=np.finfo(np.float64).eps,
return_n_iter=False):
"""l1-penalized covariance estimator
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = - 2. * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return emp_cov, precision_, (cost, d_gap), 0
else:
return emp_cov, precision_, (cost, d_gap)
else:
if return_n_iter:
return emp_cov, linalg.inv(emp_cov), 0
else:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = linalg.pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
# be robust to the max_iter=0 edge case, see:
# https://github.com/scikit-learn/scikit-learn/issues/4134
d_gap = np.inf
# set a sub_covariance buffer
sub_covariance = np.ascontiguousarray(covariance_[1:, 1:])
for i in range(max_iter):
for idx in range(n_features):
# To keep the contiguous matrix `sub_covariance` equal to
# covariance_[indices != idx].T[indices != idx]
# we only need to update 1 column and 1 line when idx changes
if idx > 0:
di = idx - 1
sub_covariance[di] = covariance_[di][indices != idx]
sub_covariance[:, di] = covariance_[:, di][indices != idx]
else:
sub_covariance[:] = covariance_[1:, 1:]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, enet_tol, check_random_state(None), False)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
eps=eps, method='lars', return_path=False)
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return covariance_, precision_, costs, i + 1
else:
return covariance_, precision_, costs
else:
if return_n_iter:
return covariance_, precision_, i + 1
else:
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alpha : positive float, default 0.01
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
mode : {'cd', 'lars'}, default 'cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, default 1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, default 100
The maximum number of iterations.
verbose : boolean, default False
If verbose is True, the objective function and dual gap are
plotted at each iteration.
assume_centered : boolean, default False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, enet_tol=1e-4,
max_iter=100, verbose=False, assume_centered=False):
super(GraphLasso, self).__init__(assume_centered=assume_centered)
self.alpha = alpha
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
def fit(self, X, y=None):
"""Fits the GraphLasso model to X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
y : (ignored)
"""
# Covariance does not make sense for a single feature
X = check_array(X, ensure_min_features=2, ensure_min_samples=2,
estimator=self)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True)
return self
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
enet_tol=enet_tol, max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements : strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
Maximum number of iterations.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs : int, optional
number of jobs to run in parallel (default 1).
verbose : boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
alpha_ : float
Penalization parameter selected.
cv_alphas_ : list of float
All penalization parameters explored.
grid_scores_ : 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
n_iter_ : int
Number of iterations run for the optimal alpha.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
enet_tol=1e-4, max_iter=100, mode='cd', n_jobs=1,
verbose=False, assume_centered=False):
super(GraphLassoCV, self).__init__(
mode=mode, tol=tol, verbose=verbose, enet_tol=enet_tol,
max_iter=max_iter, assume_centered=assume_centered)
self.alphas = alphas
self.n_refinements = n_refinements
self.cv = cv
self.n_jobs = n_jobs
@property
@deprecated("Attribute grid_scores was deprecated in version 0.19 and "
"will be removed in 0.21. Use ``grid_scores_`` instead")
def grid_scores(self):
return self.grid_scores_
def fit(self, X, y=None):
"""Fits the GraphLasso covariance model to X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
y : (ignored)
"""
# Covariance does not make sense for a single feature
X = check_array(X, ensure_min_features=2, estimator=self)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(delayed(graph_lasso_path)(X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol,
enet_tol=self.enet_tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv.split(X, y))
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float64).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores_ = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=inner_verbose, return_n_iter=True)
return self
|
bsd-3-clause
|
nicememory/pie
|
pyglet/tests/unit/media/test_riff.py
|
1
|
2225
|
"""
Test the internal RIFF reader.
"""
import os
import unittest
from pyglet.media.sources.riff import WaveSource
test_data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'media'))
class RiffTest(unittest.TestCase):
def test_pcm_16_11025_1ch(self):
file_name = os.path.join(test_data_dir, 'alert_pcm_16_11025_1ch.wav')
source = WaveSource(file_name)
self._check_audio_format(source, 1, 16, 11025)
self._check_data(source, 11584, 0.525)
def test_pcm_16_22050_1ch(self):
file_name = os.path.join(test_data_dir, 'alert_pcm_16_22050_1ch.wav')
source = WaveSource(file_name)
self._check_audio_format(source, 1, 16, 22050)
self._check_data(source, 23166, 0.525)
def test_pcm_8_22050_1ch(self):
file_name = os.path.join(test_data_dir, 'alert_pcm_8_22050_1ch.wav')
source = WaveSource(file_name)
self._check_audio_format(source, 1, 8, 22050)
self._check_data(source, 11583, 0.525)
def test_seek(self):
file_name = os.path.join(test_data_dir, 'alert_pcm_16_22050_1ch.wav')
source = WaveSource(file_name)
seek_time = 0.3
seek_bytes = seek_time * source.audio_format.bytes_per_second
source.seek(seek_time)
self._check_data(source, 23166-seek_bytes, 0.225)
def _check_audio_format(self, source, channels, sample_size, sample_rate):
self.assertEqual(channels, source.audio_format.channels)
self.assertEqual(sample_size, source.audio_format.sample_size)
self.assertEqual(sample_rate, source.audio_format.sample_rate)
def _check_data(self, source, expected_bytes, expected_duration):
received_bytes = 0
received_seconds = 0.
bytes_to_read = 1024
while True:
data = source.get_audio_data(bytes_to_read)
if data is None:
break
received_bytes += data.length
received_seconds += data.duration
self.assertEqual(data.length, len(data.data))
self.assertAlmostEqual(expected_duration, received_seconds, places=3)
self.assertAlmostEqual(expected_bytes, received_bytes, delta=5)
|
apache-2.0
|
pemryan/DAKOTA
|
packages/plplot/examples/python/x08.py
|
1
|
1883
|
#!/usr/bin/env python
#
# x08c.c
#
# 3-d plot demo.
from Numeric import *
import math
#import pl
import sys
import os
module_dir = "@MODULE_DIR@"
if module_dir[0] == '@':
module_dir = os.getcwd ()
sys.path.insert (0, module_dir)
XPTS = 35 # Data points in x
YPTS = 46 # Data points in y
opt = [1, 2, 3, 3]
alt = [60.0, 20.0, 60.0, 60.0]
az = [30.0, 60.0, 120.0, 160.0]
title = ["#frPLplot Example 8 - Alt=60, Az=30, Opt=1",
"#frPLplot Example 8 - Alt=20, Az=60, Opt=2",
"#frPLplot Example 8 - Alt=60, Az=120, Opt=3",
"#frPLplot Example 8 - Alt=60, Az=160, Opt=3"]
# main
#
# Does a series of 3-d plots for a given data set, with different
# viewing options in each plot.
def main(w):
## # Parse and process command line arguments
##
## pl.ParseOpts(sys.argv, pl.PARSE_FULL)
##
## # Initialize plplot
##
## pl.init()
## x = []
## y = []
## z = []
x = zeros( XPTS, 'd' ); y = zeros( YPTS, 'd' )
#z = zeros( (XPTS, YPTS), 'd' )
z = reshape( zeros( XPTS*YPTS, 'd' ), (XPTS, YPTS) )
for i in range(XPTS):
x[i] = float(i - (XPTS / 2)) / float(XPTS / 2)
for i in range(YPTS):
y[i] = float(i - (YPTS / 2)) / float(YPTS / 2)
for i in range(XPTS):
xx = x[i]
## zz = []
for j in range(YPTS):
yy = y[j]
r = math.sqrt(xx * xx + yy * yy)
## zz.append(math.exp(-r * r) *
## math.cos(2.0 * math.pi * r))
z[i,j] = math.exp(-r * r) * math.cos(2.0 * math.pi * r)
## z.append(zz)
for k in range(4):
w.pladv(0)
w.plvpor(0.0, 1.0, 0.0, 0.9)
w.plwind(-1.0, 1.0, -0.9, 1.1)
w.plcol(1)
w.plw3d(1.0, 1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0,
alt[k], az[k])
w.plbox3("bnstu", "x axis", 0.0, 0,
"bnstu", "y axis", 0.0, 0,
"bcdmnstuv", "z axis", 0.0, 0)
w.plcol(2)
w.plot3d(x, y, z, opt[k], 1)
w.plcol(3)
w.plmtex("t", 1.0, 0.5, 0.5, title[k])
w.pleop()
## pl.end()
##
##main()
|
lgpl-2.1
|
MaxMorgenstern/EmeraldAI
|
EmeraldAI/Pipelines/ScopeAnalyzer/AnalyzeScope.py
|
1
|
4673
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from EmeraldAI.Logic.Singleton import Singleton
from EmeraldAI.Logic.NLP.SentenceResolver import SentenceResolver
from EmeraldAI.Entities.ContextParameter import ContextParameter
from EmeraldAI.Entities.User import User
from EmeraldAI.Config.Config import Config
from EmeraldAI.Logic.Logger import FileLogger
class AnalyzeScope(object):
__metaclass__ = Singleton
def __init__(self):
self.__RemoveBeforeRequirementCalculation = Config().GetBoolean("Pipeline.ScopeAnalyzer", "RemoveLowPrioritySentencesBeforeRequirement") #False
self.__RemoveAfterRequirementCalculation = Config().GetBoolean("Pipeline.ScopeAnalyzer", "RemoveLowPrioritySentencesAfterRequirement") #True
self.__RemoveStopwordOnlySentences = Config().GetBoolean("Pipeline.ScopeAnalyzer", "RemoveStopwordOnlySentences") #True
def Process(self, PipelineArgs):
FileLogger().Info("AnalyzeScope, Process()")
sentenceList = {}
wordParameterList = []
user = User().LoadObject()
for word in PipelineArgs.WordList:
if(len(word.SynonymList) > 0):
wordList = "'{0}'".format("', '".join(word.SynonymList))
sentenceList = SentenceResolver().GetSentencesByKeyword(sentenceList, wordList, word.NormalizedWord, word.Language, True, (user.Admin or user.Trainer))
sentenceList = SentenceResolver().GetSentencesByKeyword(sentenceList, "'{0}'".format(word.NormalizedWord), word.NormalizedWord, word.Language, False, (user.Admin or user.Trainer))
wordParameterList += list(set(word.ParameterList) - set(wordParameterList))
sentenceList = SentenceResolver().GetSentencesByParameter(sentenceList, wordParameterList, PipelineArgs.Language, (user.Admin or user.Trainer))
contextParameter = ContextParameter().LoadObject(240)
contextParameter.UpdateParameter("Wordtype", wordParameterList)
contextParameter.SaveObject()
if self.__RemoveStopwordOnlySentences:
sentenceList = SentenceResolver().RemoveStopwordOnlySentences(sentenceList)
if contextParameter.InteractionName is not None:
sentenceList = SentenceResolver().GetSentenceByInteraction(sentenceList, contextParameter.InteractionName, PipelineArgs.Language, (user.Admin or user.Trainer))
else:
SentenceResolver().AddInteractionBonus(sentenceList)
sentenceParameterList = None
for sentenceID in sentenceList.iterkeys():
if sentenceList[sentenceID].HasInteraction():
for word in PipelineArgs.WordList:
for parameter in word.ParameterList:
interactionName = "{0}{1}".format(sentenceList[sentenceID].InteractionName, parameter)
contextParameter.InteractionData[interactionName.title()] = word.Word
if sentenceParameterList is None:
sentenceParameterList = PipelineArgs.GetInputSentenceParameter()
for sentenceParameter in sentenceParameterList:
interactionName = "{0}{1}".format(sentenceList[sentenceID].InteractionName, sentenceParameter)
dateObject = PipelineArgs.GetParsedStentenceDate()
formatter = "%d.%m.%Y H:%M:%S"
if sentenceParameter is "date":
formatter = "%d.%m.%Y"
if sentenceParameter is "time":
formatter = "%H:%M"
contextParameter.InteractionData[interactionName.title()] = dateObject.strftime(formatter)
contextParameter.SaveObject()
if self.__RemoveBeforeRequirementCalculation:
sentenceList = SentenceResolver().RemoveLowPrioritySentences(sentenceList)
contextParameterDict = contextParameter.GetParameterDictionary()
calculationResult = SentenceResolver().CalculateRequirement(sentenceList, contextParameterDict)
sentenceList = calculationResult["sentenceList"]
if self.__RemoveAfterRequirementCalculation:
sentenceList = SentenceResolver().RemoveLowPrioritySentences(sentenceList, True)
sentenceList = SentenceResolver().AddActionBonus(sentenceList)
sentenceList = SentenceResolver().AddSentencePriority(sentenceList)
sentenceList = SentenceResolver().CalculateCategory(sentenceList, contextParameterDict["Category"])
PipelineArgs.SentenceList = sentenceList
FileLogger().Info("AnalyzeScope, Process(), SentenceList: {0}".format(PipelineArgs.SentenceList))
return PipelineArgs
|
apache-2.0
|
google/llvm-propeller
|
lldb/third_party/Python/module/pexpect-4.6/pexpect/popen_spawn.py
|
16
|
6161
|
"""Provides an interface like pexpect.spawn interface using subprocess.Popen
"""
import os
import threading
import subprocess
import sys
import time
import signal
import shlex
try:
from queue import Queue, Empty # Python 3
except ImportError:
from Queue import Queue, Empty # Python 2
from .spawnbase import SpawnBase, PY3
from .exceptions import EOF
from .utils import string_types
class PopenSpawn(SpawnBase):
def __init__(self, cmd, timeout=30, maxread=2000, searchwindowsize=None,
logfile=None, cwd=None, env=None, encoding=None,
codec_errors='strict', preexec_fn=None):
super(PopenSpawn, self).__init__(timeout=timeout, maxread=maxread,
searchwindowsize=searchwindowsize, logfile=logfile,
encoding=encoding, codec_errors=codec_errors)
# Note that `SpawnBase` initializes `self.crlf` to `\r\n`
# because the default behaviour for a PTY is to convert
# incoming LF to `\r\n` (see the `onlcr` flag and
# https://stackoverflow.com/a/35887657/5397009). Here we set
# it to `os.linesep` because that is what the spawned
# application outputs by default and `popen` doesn't translate
# anything.
if encoding is None:
self.crlf = os.linesep.encode ("ascii")
else:
self.crlf = self.string_type (os.linesep)
kwargs = dict(bufsize=0, stdin=subprocess.PIPE,
stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
cwd=cwd, preexec_fn=preexec_fn, env=env)
if sys.platform == 'win32':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
kwargs['startupinfo'] = startupinfo
kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
if isinstance(cmd, string_types) and sys.platform != 'win32':
cmd = shlex.split(cmd, posix=os.name == 'posix')
self.proc = subprocess.Popen(cmd, **kwargs)
self.pid = self.proc.pid
self.closed = False
self._buf = self.string_type()
self._read_queue = Queue()
self._read_thread = threading.Thread(target=self._read_incoming)
self._read_thread.setDaemon(True)
self._read_thread.start()
_read_reached_eof = False
def read_nonblocking(self, size, timeout):
buf = self._buf
if self._read_reached_eof:
# We have already finished reading. Use up any buffered data,
# then raise EOF
if buf:
self._buf = buf[size:]
return buf[:size]
else:
self.flag_eof = True
raise EOF('End Of File (EOF).')
if timeout == -1:
timeout = self.timeout
elif timeout is None:
timeout = 1e6
t0 = time.time()
while (time.time() - t0) < timeout and size and len(buf) < size:
try:
incoming = self._read_queue.get_nowait()
except Empty:
break
else:
if incoming is None:
self._read_reached_eof = True
break
buf += self._decoder.decode(incoming, final=False)
r, self._buf = buf[:size], buf[size:]
self._log(r, 'read')
return r
def _read_incoming(self):
"""Run in a thread to move output from a pipe to a queue."""
fileno = self.proc.stdout.fileno()
while 1:
buf = b''
try:
buf = os.read(fileno, 1024)
except OSError as e:
self._log(e, 'read')
if not buf:
# This indicates we have reached EOF
self._read_queue.put(None)
return
self._read_queue.put(buf)
def write(self, s):
'''This is similar to send() except that there is no return value.
'''
self.send(s)
def writelines(self, sequence):
'''This calls write() for each element in the sequence.
The sequence can be any iterable object producing strings, typically a
list of strings. This does not add line separators. There is no return
value.
'''
for s in sequence:
self.send(s)
def send(self, s):
'''Send data to the subprocess' stdin.
Returns the number of bytes written.
'''
s = self._coerce_send_string(s)
self._log(s, 'send')
b = self._encoder.encode(s, final=False)
if PY3:
return self.proc.stdin.write(b)
else:
# On Python 2, .write() returns None, so we return the length of
# bytes written ourselves. This assumes they all got written.
self.proc.stdin.write(b)
return len(b)
def sendline(self, s=''):
'''Wraps send(), sending string ``s`` to child process, with os.linesep
automatically appended. Returns number of bytes written. '''
n = self.send(s)
return n + self.send(self.linesep)
def wait(self):
'''Wait for the subprocess to finish.
Returns the exit code.
'''
status = self.proc.wait()
if status >= 0:
self.exitstatus = status
self.signalstatus = None
else:
self.exitstatus = None
self.signalstatus = -status
self.terminated = True
return status
def kill(self, sig):
'''Sends a Unix signal to the subprocess.
Use constants from the :mod:`signal` module to specify which signal.
'''
if sys.platform == 'win32':
if sig in [signal.SIGINT, signal.CTRL_C_EVENT]:
sig = signal.CTRL_C_EVENT
elif sig in [signal.SIGBREAK, signal.CTRL_BREAK_EVENT]:
sig = signal.CTRL_BREAK_EVENT
else:
sig = signal.SIGTERM
os.kill(self.proc.pid, sig)
def sendeof(self):
'''Closes the stdin pipe from the writing end.'''
self.proc.stdin.close()
|
apache-2.0
|
apyrgio/ganeti
|
test/py/cmdlib/testsupport/utils_mock.py
|
11
|
1826
|
#
#
# Copyright (C) 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Support for mocking the utils module"""
from cmdlib.testsupport.util import patchModule
# pylint: disable=C0103
def patchUtils(module_under_test):
"""Patches the L{ganeti.utils} module for tests.
This function is meant to be used as a decorator for test methods.
@type module_under_test: string
@param module_under_test: the module within cmdlib which is tested. The
"ganeti.cmdlib" prefix is optional.
"""
return patchModule(module_under_test, "utils")
|
bsd-2-clause
|
hperala/kontuwikibot
|
tests/upload_tests.py
|
1
|
1395
|
# -*- coding: utf-8 -*-
"""
Site upload test.
These tests write to the wiki.
"""
#
# (C) Pywikibot team, 2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id: 4d1b1ea2f42aee542722240c9618cc72b4f1bcbb $'
import os
import pywikibot
from tests import _images_dir
from tests.aspects import unittest, TestCase
class TestUpload(TestCase):
"""Test cases for upload."""
write = True
family = 'wikipedia'
code = 'test'
def test_png(self):
"""Test uploading a png using Site.upload."""
page = pywikibot.FilePage(self.site, 'MP_sounds-pwb.png')
local_filename = os.path.join(_images_dir, 'MP_sounds.png')
self.site.upload(page, source_filename=local_filename,
comment='pywikibot test',
ignore_warnings=True)
def test_png_chunked(self):
"""Test uploading a png in two chunks using Site.upload."""
page = pywikibot.FilePage(self.site, 'MP_sounds-pwb-chunked.png')
local_filename = os.path.join(_images_dir, 'MP_sounds.png')
self.site.upload(page, source_filename=local_filename,
comment='pywikibot test',
ignore_warnings=True, chunk_size=1024)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
|
mit
|
zlorb/mitmproxy
|
mitmproxy/net/http/url.py
|
4
|
3398
|
import urllib.parse
from typing import Sequence
from typing import Tuple
from mitmproxy.net import check
def parse(url):
"""
URL-parsing function that checks that
- port is an integer 0-65535
- host is a valid IDNA-encoded hostname with no null-bytes
- path is valid ASCII
Args:
A URL (as bytes or as unicode)
Returns:
A (scheme, host, port, path) tuple
Raises:
ValueError, if the URL is not properly formatted.
"""
parsed = urllib.parse.urlparse(url)
if not parsed.hostname:
raise ValueError("No hostname given")
if isinstance(url, bytes):
host = parsed.hostname
# this should not raise a ValueError,
# but we try to be very forgiving here and accept just everything.
else:
host = parsed.hostname.encode("idna")
if isinstance(parsed, urllib.parse.ParseResult):
parsed = parsed.encode("ascii")
port = parsed.port # Returns None if port number invalid in Py3.5. Will throw ValueError in Py3.6
if not port:
port = 443 if parsed.scheme == b"https" else 80
full_path = urllib.parse.urlunparse(
(b"", b"", parsed.path, parsed.params, parsed.query, parsed.fragment)
)
if not full_path.startswith(b"/"):
full_path = b"/" + full_path
if not check.is_valid_host(host):
raise ValueError("Invalid Host")
return parsed.scheme, host, port, full_path
def unparse(scheme, host, port, path=""):
"""
Returns a URL string, constructed from the specified components.
Args:
All args must be str.
"""
if path == "*":
path = ""
return "%s://%s%s" % (scheme, hostport(scheme, host, port), path)
def encode(s: Sequence[Tuple[str, str]], similar_to: str=None) -> str:
"""
Takes a list of (key, value) tuples and returns a urlencoded string.
If similar_to is passed, the output is formatted similar to the provided urlencoded string.
"""
remove_trailing_equal = False
if similar_to:
remove_trailing_equal = any("=" not in param for param in similar_to.split("&"))
encoded = urllib.parse.urlencode(s, False, errors="surrogateescape")
if encoded and remove_trailing_equal:
encoded = encoded.replace("=&", "&")
if encoded[-1] == '=':
encoded = encoded[:-1]
return encoded
def decode(s):
"""
Takes a urlencoded string and returns a list of surrogate-escaped (key, value) tuples.
"""
return urllib.parse.parse_qsl(s, keep_blank_values=True, errors='surrogateescape')
def quote(b: str, safe: str="/") -> str:
"""
Returns:
An ascii-encodable str.
"""
return urllib.parse.quote(b, safe=safe, errors="surrogateescape")
def unquote(s: str) -> str:
"""
Args:
s: A surrogate-escaped str
Returns:
A surrogate-escaped str
"""
return urllib.parse.unquote(s, errors="surrogateescape")
def hostport(scheme, host, port):
"""
Returns the host component, with a port specifcation if needed.
"""
if (port, scheme) in [(80, "http"), (443, "https"), (80, b"http"), (443, b"https")]:
return host
else:
if isinstance(host, bytes):
return b"%s:%d" % (host, port)
else:
return "%s:%d" % (host, port)
|
mit
|
aospcus/android_kernel_htc_flounder
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
|
12980
|
5411
|
# SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
|
gpl-2.0
|
linjoahow/2015cdaa-w11
|
static/Brython3.1.1-20150328-091302/Lib/sys.py
|
408
|
4998
|
# hack to return special attributes
from _sys import *
from javascript import JSObject
has_local_storage=__BRYTHON__.has_local_storage
has_session_storage = __BRYTHON__.has_session_storage
has_json=__BRYTHON__.has_json
brython_debug_mode = __BRYTHON__.debug
argv = ['__main__']
base_exec_prefix = __BRYTHON__.brython_path
base_prefix = __BRYTHON__.brython_path
builtin_module_names=__BRYTHON__.builtin_module_names
byteorder='little'
def exc_info():
exc = __BRYTHON__.exception_stack[-1]
return (exc.__class__,exc,exc.traceback)
exec_prefix = __BRYTHON__.brython_path
executable = __BRYTHON__.brython_path+'/brython.js'
def exit(i=None):
raise SystemExit('')
class flag_class:
def __init__(self):
self.debug=0
self.inspect=0
self.interactive=0
self.optimize=0
self.dont_write_bytecode=0
self.no_user_site=0
self.no_site=0
self.ignore_environment=0
self.verbose=0
self.bytes_warning=0
self.quiet=0
self.hash_randomization=1
flags=flag_class()
def getfilesystemencoding(*args,**kw):
"""getfilesystemencoding() -> string
Return the encoding used to convert Unicode filenames in
operating system filenames."""
return 'utf-8'
maxsize=2147483647
maxunicode=1114111
path = __BRYTHON__.path
#path_hooks = list(JSObject(__BRYTHON__.path_hooks))
meta_path=__BRYTHON__.meta_path
platform="brython"
prefix = __BRYTHON__.brython_path
version = '.'.join(str(x) for x in __BRYTHON__.version_info[:3])
version += " (default, %s) \n[Javascript 1.5] on Brython" % __BRYTHON__.compiled_date
hexversion = 0x03000000 # python 3.0
class __version_info(object):
def __init__(self, version_info):
self.version_info = version_info
self.major = version_info[0]
self.minor = version_info[1]
self.micro = version_info[2]
self.releaselevel = version_info[3]
self.serial = version_info[4]
def __getitem__(self, index):
if isinstance(self.version_info[index], list):
return tuple(self.version_info[index])
return self.version_info[index]
def hexversion(self):
try:
return '0%d0%d0%d' % (self.major, self.minor, self.micro)
finally: #probably some invalid char in minor (rc, etc)
return '0%d0000' % (self.major)
def __str__(self):
_s="sys.version(major=%d, minor=%d, micro=%d, releaselevel='%s', serial=%d)"
return _s % (self.major, self.minor, self.micro,
self.releaselevel, self.serial)
#return str(self.version_info)
def __eq__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) == other
raise Error("Error! I don't know how to compare!")
def __ge__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) >= other
raise Error("Error! I don't know how to compare!")
def __gt__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) > other
raise Error("Error! I don't know how to compare!")
def __le__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) <= other
raise Error("Error! I don't know how to compare!")
def __lt__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) < other
raise Error("Error! I don't know how to compare!")
def __ne__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) != other
raise Error("Error! I don't know how to compare!")
#eventually this needs to be the real python version such as 3.0, 3.1, etc
version_info=__version_info(__BRYTHON__.version_info)
class _implementation:
def __init__(self):
self.name='brython'
self.version = __version_info(__BRYTHON__.implementation)
self.hexversion = self.version.hexversion()
self.cache_tag=None
def __repr__(self):
return "namespace(name='%s' version=%s hexversion='%s')" % (self.name, self.version, self.hexversion)
def __str__(self):
return "namespace(name='%s' version=%s hexversion='%s')" % (self.name, self.version, self.hexversion)
implementation=_implementation()
class _hash_info:
def __init__(self):
self.width=32,
self.modulus=2147483647
self.inf=314159
self.nan=0
self.imag=1000003
self.algorithm='siphash24'
self.hash_bits=64
self.seed_bits=128
cutoff=0
def __repr(self):
#fix me
return "sys.hash_info(width=32, modulus=2147483647, inf=314159, nan=0, imag=1000003, algorithm='siphash24', hash_bits=64, seed_bits=128, cutoff=0)"
hash_info=_hash_info()
warnoptions=[]
def getfilesystemencoding():
return 'utf-8'
#delete objects not in python sys module namespace
del JSObject
del _implementation
|
gpl-3.0
|
krikru/tensorflow-opencl
|
tensorflow/python/framework/file_system_test.py
|
18
|
2120
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import load_library
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class FileSystemTest(test.TestCase):
def setUp(self):
file_system_library = os.path.join(resource_loader.get_data_files_path(),
"test_file_system.so")
load_library.load_file_system_library(file_system_library)
def testBasic(self):
with self.test_session() as sess:
reader = io_ops.WholeFileReader("test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queue.enqueue_many([["test://foo"]]).run()
queue.close().run()
key, value = sess.run(reader.read(queue))
self.assertEqual(key, compat.as_bytes("test://foo"))
self.assertEqual(value, compat.as_bytes("AAAAAAAAAA"))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
NotLeet/n0tb0t
|
tests/unit_tests/test_commands.py
|
2
|
6471
|
from enum import Enum, auto
from inspect import getsourcefile
import os
import sys
from unittest.mock import Mock
import pytest
from collections import deque
current_path = os.path.abspath(getsourcefile(lambda: 0))
current_dir = os.path.dirname(current_path)
root_dir = os.path.join(current_dir, os.pardir, os.pardir)
sys.path.append(root_dir)
import src.core_modules.commands as commands
from src.message import Message
from src.models import Command
class Service:
@staticmethod
def get_message_content(message):
return message.content
@staticmethod
def get_mod_status(message):
return message.is_mod
class MessageTypes(Enum):
PUBLIC = auto()
PRIVATE = auto()
@pytest.fixture
def mock_db_session():
return Mock()
@pytest.fixture
def command_mixin_obj():
command_mixin_obj = commands.CommandsMixin()
command_mixin_obj.public_message_queue = deque()
command_mixin_obj.command_queue = deque()
command_mixin_obj.service = Service()
return command_mixin_obj
def test_add_command(command_mixin_obj, mock_db_session):
query_val = mock_db_session.query.return_value
filter_val = query_val.filter.return_value
filter_val.one_or_none.return_value = None
command_mixin_obj.add_command(Message(content="!add_command !test this is a test", message_type=MessageTypes.PUBLIC), mock_db_session)
mock_db_session.add.assert_called()
assert command_mixin_obj.public_message_queue[0] == 'Command added.'
assert len(command_mixin_obj.command_queue) == 1
def test_add_command_no_bang(command_mixin_obj, mock_db_session):
command_mixin_obj.add_command(Message(content="!add_command test this is a test", message_type=MessageTypes.PUBLIC), mock_db_session)
mock_db_session.add.assert_not_called()
assert command_mixin_obj.public_message_queue[0] == 'Sorry, the command needs to have an ! in it.'
assert len(command_mixin_obj.command_queue) == 0
def test_add_command_collision(command_mixin_obj, mock_db_session):
query_val = mock_db_session.query.return_value
filter_val = query_val.filter.return_value
filter_val.one_or_none.return_value = Command(call='test', response="This is a test")
command_mixin_obj.add_command(Message(content="!add_command !test this is a test", message_type=MessageTypes.PUBLIC), mock_db_session)
assert command_mixin_obj.public_message_queue[0] == 'Sorry, that command already exists. Please delete it first.'
assert len(command_mixin_obj.command_queue) == 0
def test_edit_command(command_mixin_obj, mock_db_session):
query_val = mock_db_session.query.return_value
filter_val = query_val.filter.return_value
filter_val.one_or_none.return_value = Command(call='test', response="This is a test")
command_mixin_obj.edit_command(Message(content="!edit_command !test this is now different", message_type=MessageTypes.PUBLIC), mock_db_session)
assert command_mixin_obj.public_message_queue[0] == 'Command edited.'
assert len(command_mixin_obj.command_queue) == 1
def test_edit_nonexistent_command(command_mixin_obj, mock_db_session):
query_val = mock_db_session.query.return_value
filter_val = query_val.filter.return_value
filter_val.one_or_none.return_value = None
command_mixin_obj.edit_command(Message(content="!edit_command !test this is now different", message_type=MessageTypes.PUBLIC), mock_db_session)
assert command_mixin_obj.public_message_queue[0] == 'Sorry, that command does not exist.'
assert len(command_mixin_obj.command_queue) == 0
def test_delete_command(command_mixin_obj, mock_db_session):
query_val = mock_db_session.query.return_value
filter_val = query_val.filter.return_value
filter_val.one_or_none.return_value = Command(call='test', response="This is a test")
command_mixin_obj.delete_command(Message(content="!delete_command !test", message_type=MessageTypes.PUBLIC), mock_db_session)
mock_db_session.delete.assert_called()
assert command_mixin_obj.public_message_queue[0] == 'Command deleted.'
assert len(command_mixin_obj.command_queue) == 1
def test_delete_nonexistent_command(command_mixin_obj, mock_db_session):
query_val = mock_db_session.query.return_value
filter_val = query_val.filter.return_value
filter_val.one_or_none.return_value = None
command_mixin_obj.delete_command(Message(content="!delete_command !test", message_type=MessageTypes.PUBLIC), mock_db_session)
assert command_mixin_obj.public_message_queue[0] == "Sorry, that command doesn't exist."
assert len(command_mixin_obj.command_queue) == 0
# These next three are functionally similar to preexisting test functions but do subtly different things
# There is a function called "command" with the ability to do nearly everything.
# These test that functionality.
# Perhaps in the future we should add more test functions for delete nobang, etc.
def test_command_add(command_mixin_obj, mock_db_session):
query_val = mock_db_session.query.return_value
filter_val = query_val.filter.return_value
filter_val.one_or_none.return_value = None
command_mixin_obj.command(Message(content="!command add !test this is a test", message_type=MessageTypes.PUBLIC), mock_db_session)
mock_db_session.add.assert_called()
assert command_mixin_obj.public_message_queue[0] == 'Command added.'
assert len(command_mixin_obj.command_queue) == 1
def test_command_edit(command_mixin_obj, mock_db_session):
query_val = mock_db_session.query.return_value
filter_val = query_val.filter.return_value
filter_val.one_or_none.return_value = Command(call='test', response="This is a test")
command_mixin_obj.command(Message(content="!command edit !test this is a new test", message_type=MessageTypes.PUBLIC), mock_db_session)
assert command_mixin_obj.public_message_queue[0] == 'Command edited.'
assert len(command_mixin_obj.command_queue) == 1
def test_command_delete(command_mixin_obj, mock_db_session):
query_val = mock_db_session.query.return_value
filter_val = query_val.filter.return_value
filter_val.one_or_none.return_value = Command(call='test', response="This is a test")
command_mixin_obj.command(Message(content="!command delete !test", message_type=MessageTypes.PUBLIC), mock_db_session)
mock_db_session.delete.assert_called()
assert command_mixin_obj.public_message_queue[0] == 'Command deleted.'
assert len(command_mixin_obj.command_queue) == 1
|
gpl-3.0
|
seppius-xbmc-repo/ru
|
plugin.video.shura.tv/resources/lib/demjson.py
|
1
|
90082
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
r""" A JSON data encoder and decoder.
This Python module implements the JSON (http://json.org/) data
encoding format; a subset of ECMAScript (aka JavaScript) for encoding
primitive data types (numbers, strings, booleans, lists, and
associative arrays) in a language-neutral simple text-based syntax.
It can encode or decode between JSON formatted strings and native
Python data types. Normally you would use the encode() and decode()
functions defined by this module, but if you want more control over
the processing you can use the JSON class.
This implementation tries to be as completely cormforming to all
intricacies of the standards as possible. It can operate in strict
mode (which only allows JSON-compliant syntax) or a non-strict mode
(which allows much more of the whole ECMAScript permitted syntax).
This includes complete support for Unicode strings (including
surrogate-pairs for non-BMP characters), and all number formats
including negative zero and IEEE 754 non-numbers such a NaN or
Infinity.
The JSON/ECMAScript to Python type mappings are:
---JSON--- ---Python---
null None
undefined undefined (note 1)
Boolean (true,false) bool (True or False)
Integer int or long (note 2)
Float float
String str or unicode ( "..." or u"..." )
Array [a, ...] list ( [...] )
Object {a:b, ...} dict ( {...} )
-- Note 1. an 'undefined' object is declared in this module which
represents the native Python value for this type when in
non-strict mode.
-- Note 2. some ECMAScript integers may be up-converted to Python
floats, such as 1e+40. Also integer -0 is converted to
float -0, so as to preserve the sign (which ECMAScript requires).
In addition, when operating in non-strict mode, several IEEE 754
non-numbers are also handled, and are mapped to specific Python
objects declared in this module:
NaN (not a number) nan (float('nan'))
Infinity, +Infinity inf (float('inf'))
-Infinity neginf (float('-inf'))
When encoding Python objects into JSON, you may use types other than
native lists or dictionaries, as long as they support the minimal
interfaces required of all sequences or mappings. This means you can
use generators and iterators, tuples, UserDict subclasses, etc.
To make it easier to produce JSON encoded representations of user
defined classes, if the object has a method named json_equivalent(),
then it will call that method and attempt to encode the object
returned from it instead. It will do this recursively as needed and
before any attempt to encode the object using it's default
strategies. Note that any json_equivalent() method should return
"equivalent" Python objects to be encoded, not an already-encoded
JSON-formatted string. There is no such aid provided to decode
JSON back into user-defined classes as that would dramatically
complicate the interface.
When decoding strings with this module it may operate in either
strict or non-strict mode. The strict mode only allows syntax which
is conforming to RFC 4627 (JSON), while the non-strict allows much
more of the permissible ECMAScript syntax.
The following are permitted when processing in NON-STRICT mode:
* Unicode format control characters are allowed anywhere in the input.
* All Unicode line terminator characters are recognized.
* All Unicode white space characters are recognized.
* The 'undefined' keyword is recognized.
* Hexadecimal number literals are recognized (e.g., 0xA6, 0177).
* String literals may use either single or double quote marks.
* Strings may contain \x (hexadecimal) escape sequences, as well as the
\v and \0 escape sequences.
* Lists may have omitted (elided) elements, e.g., [,,,,,], with
missing elements interpreted as 'undefined' values.
* Object properties (dictionary keys) can be of any of the
types: string literals, numbers, or identifiers (the later of
which are treated as if they are string literals)---as permitted
by ECMAScript. JSON only permits strings literals as keys.
Concerning non-strict and non-ECMAScript allowances:
* Octal numbers: If you allow the 'octal_numbers' behavior (which
is never enabled by default), then you can use octal integers
and octal character escape sequences (per the ECMAScript
standard Annex B.1.2). This behavior is allowed, if enabled,
because it was valid JavaScript at one time.
* Multi-line string literals: Strings which are more than one
line long (contain embedded raw newline characters) are never
permitted. This is neither valid JSON nor ECMAScript. Some other
JSON implementations may allow this, but this module considers
that behavior to be a mistake.
References:
* JSON (JavaScript Object Notation)
<http://json.org/>
* RFC 4627. The application/json Media Type for JavaScript Object Notation (JSON)
<http://www.ietf.org/rfc/rfc4627.txt>
* ECMA-262 3rd edition (1999)
<http://www.ecma-international.org/publications/files/ecma-st/ECMA-262.pdf>
* IEEE 754-1985: Standard for Binary Floating-Point Arithmetic.
<http://www.cs.berkeley.edu/~ejr/Projects/ieee754/>
"""
__author__ = "Deron Meranda <http://deron.meranda.us/>"
__date__ = "2008-12-17"
__version__ = "1.4"
__credits__ = """Copyright (c) 2006-2008 Deron E. Meranda <http://deron.meranda.us/>
Licensed under GNU LGPL 3.0 (GNU Lesser General Public License) or
later. See LICENSE.txt included with this software.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
or <http://www.fsf.org/licensing/>.
"""
# ------------------------------
# useful global constants
content_type = 'application/json'
file_ext = 'json'
hexdigits = '0123456789ABCDEFabcdef'
octaldigits = '01234567'
# ----------------------------------------------------------------------
# Decimal and float types.
#
# If a JSON number can not be stored in a Python float without loosing
# precision and the Python has the decimal type, then we will try to
# use decimal instead of float. To make this determination we need to
# know the limits of the float type, but Python doesn't have an easy
# way to tell what the largest floating-point number it supports. So,
# we detemine the precision and scale of the float type by testing it.
try:
# decimal module was introduced in Python 2.4
import decimal
except ImportError:
decimal = None
def determine_float_precision():
"""Returns a tuple (significant_digits, max_exponent) for the float type.
"""
import math
# Just count the digits in pi. The last two decimal digits
# may only be partial digits, so discount for them.
whole, frac = repr(math.pi).split('.')
sigdigits = len(whole) + len(frac) - 2
# This is a simple binary search. We find the largest exponent
# that the float() type can handle without going infinite or
# raising errors.
maxexp = None
minv = 0; maxv = 1000
while True:
if minv+1 == maxv:
maxexp = minv - 1
break
elif maxv < minv:
maxexp = None
break
m = (minv + maxv) // 2
try:
f = repr(float( '1e+%d' % m ))
except ValueError:
f = None
else:
if not f or f[0] < '0' or f[0] > '9':
f = None
if not f:
# infinite
maxv = m
else:
minv = m
return sigdigits, maxexp
float_sigdigits, float_maxexp = determine_float_precision()
# ----------------------------------------------------------------------
# The undefined value.
#
# ECMAScript has an undefined value (similar to yet distinct from null).
# Neither Python or strict JSON have support undefined, but to allow
# JavaScript behavior we must simulate it.
class _undefined_class(object):
"""Represents the ECMAScript 'undefined' value."""
__slots__ = []
def __repr__(self):
return self.__module__ + '.undefined'
def __str__(self):
return 'undefined'
def __nonzero__(self):
return False
undefined = _undefined_class()
del _undefined_class
# ----------------------------------------------------------------------
# Non-Numbers: NaN, Infinity, -Infinity
#
# ECMAScript has official support for non-number floats, although
# strict JSON does not. Python doesn't either. So to support the
# full JavaScript behavior we must try to add them into Python, which
# is unfortunately a bit of black magic. If our python implementation
# happens to be built on top of IEEE 754 we can probably trick python
# into using real floats. Otherwise we must simulate it with classes.
def _nonnumber_float_constants():
"""Try to return the Nan, Infinity, and -Infinity float values.
This is unnecessarily complex because there is no standard
platform- independent way to do this in Python as the language
(opposed to some implementation of it) doesn't discuss
non-numbers. We try various strategies from the best to the
worst.
If this Python interpreter uses the IEEE 754 floating point
standard then the returned values will probably be real instances
of the 'float' type. Otherwise a custom class object is returned
which will attempt to simulate the correct behavior as much as
possible.
"""
try:
# First, try (mostly portable) float constructor. Works under
# Linux x86 (gcc) and some Unices.
nan = float('nan')
inf = float('inf')
neginf = float('-inf')
except ValueError:
try:
# Try the AIX (PowerPC) float constructors
nan = float('NaNQ')
inf = float('INF')
neginf = float('-INF')
except ValueError:
try:
# Next, try binary unpacking. Should work under
# platforms using IEEE 754 floating point.
import struct, sys
xnan = '7ff8000000000000'.decode('hex') # Quiet NaN
xinf = '7ff0000000000000'.decode('hex')
xcheck = 'bdc145651592979d'.decode('hex') # -3.14159e-11
# Could use float.__getformat__, but it is a new python feature,
# so we use sys.byteorder.
if sys.byteorder == 'big':
nan = struct.unpack('d', xnan)[0]
inf = struct.unpack('d', xinf)[0]
check = struct.unpack('d', xcheck)[0]
else:
nan = struct.unpack('d', xnan[::-1])[0]
inf = struct.unpack('d', xinf[::-1])[0]
check = struct.unpack('d', xcheck[::-1])[0]
neginf = - inf
if check != -3.14159e-11:
raise ValueError('Unpacking raw IEEE 754 floats does not work')
except (ValueError, TypeError):
# Punt, make some fake classes to simulate. These are
# not perfect though. For instance nan * 1.0 == nan,
# as expected, but 1.0 * nan == 0.0, which is wrong.
class nan(float):
"""An approximation of the NaN (not a number) floating point number."""
def __repr__(self): return 'nan'
def __str__(self): return 'nan'
def __add__(self,x): return self
def __radd__(self,x): return self
def __sub__(self,x): return self
def __rsub__(self,x): return self
def __mul__(self,x): return self
def __rmul__(self,x): return self
def __div__(self,x): return self
def __rdiv__(self,x): return self
def __divmod__(self,x): return (self,self)
def __rdivmod__(self,x): return (self,self)
def __mod__(self,x): return self
def __rmod__(self,x): return self
def __pow__(self,exp): return self
def __rpow__(self,exp): return self
def __neg__(self): return self
def __pos__(self): return self
def __abs__(self): return self
def __lt__(self,x): return False
def __le__(self,x): return False
def __eq__(self,x): return False
def __neq__(self,x): return True
def __ge__(self,x): return False
def __gt__(self,x): return False
def __complex__(self,*a): raise NotImplementedError('NaN can not be converted to a complex')
if decimal:
nan = decimal.Decimal('NaN')
else:
nan = nan()
class inf(float):
"""An approximation of the +Infinity floating point number."""
def __repr__(self): return 'inf'
def __str__(self): return 'inf'
def __add__(self,x): return self
def __radd__(self,x): return self
def __sub__(self,x): return self
def __rsub__(self,x): return self
def __mul__(self,x):
if x is neginf or x < 0:
return neginf
elif x == 0:
return nan
else:
return self
def __rmul__(self,x): return self.__mul__(x)
def __div__(self,x):
if x == 0:
raise ZeroDivisionError('float division')
elif x < 0:
return neginf
else:
return self
def __rdiv__(self,x):
if x is inf or x is neginf or x is nan:
return nan
return 0.0
def __divmod__(self,x):
if x == 0:
raise ZeroDivisionError('float divmod()')
elif x < 0:
return (nan,nan)
else:
return (self,self)
def __rdivmod__(self,x):
if x is inf or x is neginf or x is nan:
return (nan, nan)
return (0.0, x)
def __mod__(self,x):
if x == 0:
raise ZeroDivisionError('float modulo')
else:
return nan
def __rmod__(self,x):
if x is inf or x is neginf or x is nan:
return nan
return x
def __pow__(self, exp):
if exp == 0:
return 1.0
else:
return self
def __rpow__(self, x):
if -1 < x < 1: return 0.0
elif x == 1.0: return 1.0
elif x is nan or x is neginf or x < 0:
return nan
else:
return self
def __neg__(self): return neginf
def __pos__(self): return self
def __abs__(self): return self
def __lt__(self,x): return False
def __le__(self,x):
if x is self:
return True
else:
return False
def __eq__(self,x):
if x is self:
return True
else:
return False
def __neq__(self,x):
if x is self:
return False
else:
return True
def __ge__(self,x): return True
def __gt__(self,x): return True
def __complex__(self,*a): raise NotImplementedError('Infinity can not be converted to a complex')
if decimal:
inf = decimal.Decimal('Infinity')
else:
inf = inf()
class neginf(float):
"""An approximation of the -Infinity floating point number."""
def __repr__(self): return '-inf'
def __str__(self): return '-inf'
def __add__(self,x): return self
def __radd__(self,x): return self
def __sub__(self,x): return self
def __rsub__(self,x): return self
def __mul__(self,x):
if x is self or x < 0:
return inf
elif x == 0:
return nan
else:
return self
def __rmul__(self,x): return self.__mul__(self)
def __div__(self,x):
if x == 0:
raise ZeroDivisionError('float division')
elif x < 0:
return inf
else:
return self
def __rdiv__(self,x):
if x is inf or x is neginf or x is nan:
return nan
return -0.0
def __divmod__(self,x):
if x == 0:
raise ZeroDivisionError('float divmod()')
elif x < 0:
return (nan,nan)
else:
return (self,self)
def __rdivmod__(self,x):
if x is inf or x is neginf or x is nan:
return (nan, nan)
return (-0.0, x)
def __mod__(self,x):
if x == 0:
raise ZeroDivisionError('float modulo')
else:
return nan
def __rmod__(self,x):
if x is inf or x is neginf or x is nan:
return nan
return x
def __pow__(self,exp):
if exp == 0:
return 1.0
else:
return self
def __rpow__(self, x):
if x is nan or x is inf or x is inf:
return nan
return 0.0
def __neg__(self): return inf
def __pos__(self): return self
def __abs__(self): return inf
def __lt__(self,x): return True
def __le__(self,x): return True
def __eq__(self,x):
if x is self:
return True
else:
return False
def __neq__(self,x):
if x is self:
return False
else:
return True
def __ge__(self,x):
if x is self:
return True
else:
return False
def __gt__(self,x): return False
def __complex__(self,*a): raise NotImplementedError('-Infinity can not be converted to a complex')
if decimal:
neginf = decimal.Decimal('-Infinity')
else:
neginf = neginf(0)
return nan, inf, neginf
nan, inf, neginf = _nonnumber_float_constants()
del _nonnumber_float_constants
# ----------------------------------------------------------------------
# String processing helpers
unsafe_string_chars = '"\\' + ''.join([chr(i) for i in range(0x20)])
def skipstringsafe( s, start=0, end=None ):
i = start
#if end is None:
# end = len(s)
while i < end and s[i] not in unsafe_string_chars:
#c = s[i]
#if c in unsafe_string_chars:
# break
i += 1
return i
def skipstringsafe_slow( s, start=0, end=None ):
i = start
if end is None:
end = len(s)
while i < end:
c = s[i]
if c == '"' or c == '\\' or ord(c) <= 0x1f:
break
i += 1
return i
def extend_list_with_sep( orig_seq, extension_seq, sepchar='' ):
if not sepchar:
orig_seq.extend( extension_seq )
else:
for i, x in enumerate(extension_seq):
if i > 0:
orig_seq.append( sepchar )
orig_seq.append( x )
def extend_and_flatten_list_with_sep( orig_seq, extension_seq, separator='' ):
for i, part in enumerate(extension_seq):
if i > 0 and separator:
orig_seq.append( separator )
orig_seq.extend( part )
# ----------------------------------------------------------------------
# Unicode helpers
#
# JSON requires that all JSON implementations must support the UTF-32
# encoding (as well as UTF-8 and UTF-16). But earlier versions of
# Python did not provide a UTF-32 codec. So we must implement UTF-32
# ourselves in case we need it.
def utf32le_encode( obj, errors='strict' ):
"""Encodes a Unicode string into a UTF-32LE encoded byte string."""
import struct
try:
import cStringIO as sio
except ImportError:
import StringIO as sio
f = sio.StringIO()
write = f.write
pack = struct.pack
for c in obj:
n = ord(c)
if 0xD800 <= n <= 0xDFFF: # surrogate codepoints are prohibited by UTF-32
if errors == 'ignore':
continue
elif errors == 'replace':
n = ord('?')
else:
cname = 'U+%04X'%n
raise UnicodeError('UTF-32 can not encode surrogate characters',cname)
write( pack('<L', n) )
return f.getvalue()
def utf32be_encode( obj, errors='strict' ):
"""Encodes a Unicode string into a UTF-32BE encoded byte string."""
import struct
try:
import cStringIO as sio
except ImportError:
import StringIO as sio
f = sio.StringIO()
write = f.write
pack = struct.pack
for c in obj:
n = ord(c)
if 0xD800 <= n <= 0xDFFF: # surrogate codepoints are prohibited by UTF-32
if errors == 'ignore':
continue
elif errors == 'replace':
n = ord('?')
else:
cname = 'U+%04X'%n
raise UnicodeError('UTF-32 can not encode surrogate characters',cname)
write( pack('>L', n) )
return f.getvalue()
def utf32le_decode( obj, errors='strict' ):
"""Decodes a UTF-32LE byte string into a Unicode string."""
if len(obj) % 4 != 0:
raise UnicodeError('UTF-32 decode error, data length not a multiple of 4 bytes')
import struct
unpack = struct.unpack
chars = []
i = 0
for i in range(0, len(obj), 4):
seq = obj[i:i+4]
n = unpack('<L',seq)[0]
chars.append( unichr(n) )
return u''.join( chars )
def utf32be_decode( obj, errors='strict' ):
"""Decodes a UTF-32BE byte string into a Unicode string."""
if len(obj) % 4 != 0:
raise UnicodeError('UTF-32 decode error, data length not a multiple of 4 bytes')
import struct
unpack = struct.unpack
chars = []
i = 0
for i in range(0, len(obj), 4):
seq = obj[i:i+4]
n = unpack('>L',seq)[0]
chars.append( unichr(n) )
return u''.join( chars )
def auto_unicode_decode( s ):
"""Takes a string and tries to convert it to a Unicode string.
This will return a Python unicode string type corresponding to the
input string (either str or unicode). The character encoding is
guessed by looking for either a Unicode BOM prefix, or by the
rules specified by RFC 4627. When in doubt it is assumed the
input is encoded in UTF-8 (the default for JSON).
"""
if isinstance(s, unicode):
return s
if len(s) < 4:
return s.decode('utf8') # not enough bytes, assume default of utf-8
# Look for BOM marker
import codecs
bom2 = s[:2]
bom4 = s[:4]
a, b, c, d = map(ord, s[:4]) # values of first four bytes
if bom4 == codecs.BOM_UTF32_LE:
encoding = 'utf-32le'
s = s[4:]
elif bom4 == codecs.BOM_UTF32_BE:
encoding = 'utf-32be'
s = s[4:]
elif bom2 == codecs.BOM_UTF16_LE:
encoding = 'utf-16le'
s = s[2:]
elif bom2 == codecs.BOM_UTF16_BE:
encoding = 'utf-16be'
s = s[2:]
# No BOM, so autodetect encoding used by looking at first four bytes
# according to RFC 4627 section 3.
elif a==0 and b==0 and c==0 and d!=0: # UTF-32BE
encoding = 'utf-32be'
elif a==0 and b!=0 and c==0 and d!=0: # UTF-16BE
encoding = 'utf-16be'
elif a!=0 and b==0 and c==0 and d==0: # UTF-32LE
encoding = 'utf-32le'
elif a!=0 and b==0 and c!=0 and d==0: # UTF-16LE
encoding = 'utf-16le'
else: #if a!=0 and b!=0 and c!=0 and d!=0: # UTF-8
# JSON spec says default is UTF-8, so always guess it
# if we can't guess otherwise
encoding = 'utf8'
# Make sure the encoding is supported by Python
try:
cdk = codecs.lookup(encoding)
except LookupError:
if encoding.startswith('utf-32') \
or encoding.startswith('ucs4') \
or encoding.startswith('ucs-4'):
# Python doesn't natively have a UTF-32 codec, but JSON
# requires that it be supported. So we must decode these
# manually.
if encoding.endswith('le'):
unis = utf32le_decode(s)
else:
unis = utf32be_decode(s)
else:
raise JSONDecodeError('this python has no codec for this character encoding',encoding)
else:
# Convert to unicode using a standard codec
unis = s.decode(encoding)
return unis
def surrogate_pair_as_unicode( c1, c2 ):
"""Takes a pair of unicode surrogates and returns the equivalent unicode character.
The input pair must be a surrogate pair, with c1 in the range
U+D800 to U+DBFF and c2 in the range U+DC00 to U+DFFF.
"""
n1, n2 = ord(c1), ord(c2)
if n1 < 0xD800 or n1 > 0xDBFF or n2 < 0xDC00 or n2 > 0xDFFF:
raise JSONDecodeError('illegal Unicode surrogate pair',(c1,c2))
a = n1 - 0xD800
b = n2 - 0xDC00
v = (a << 10) | b
v += 0x10000
return unichr(v)
def unicode_as_surrogate_pair( c ):
"""Takes a single unicode character and returns a sequence of surrogate pairs.
The output of this function is a tuple consisting of one or two unicode
characters, such that if the input character is outside the BMP range
then the output is a two-character surrogate pair representing that character.
If the input character is inside the BMP then the output tuple will have
just a single character...the same one.
"""
n = ord(c)
if n < 0x10000:
return (unichr(n),) # in BMP, surrogate pair not required
v = n - 0x10000
vh = (v >> 10) & 0x3ff # highest 10 bits
vl = v & 0x3ff # lowest 10 bits
w1 = 0xD800 | vh
w2 = 0xDC00 | vl
return (unichr(w1), unichr(w2))
# ----------------------------------------------------------------------
# Type identification
def isnumbertype( obj ):
"""Is the object of a Python number type (excluding complex)?"""
return isinstance(obj, (int,long,float)) \
and not isinstance(obj, bool) \
or obj is nan or obj is inf or obj is neginf
def isstringtype( obj ):
"""Is the object of a Python string type?"""
if isinstance(obj, basestring):
return True
# Must also check for some other pseudo-string types
import types, UserString
return isinstance(obj, types.StringTypes) \
or isinstance(obj, UserString.UserString) \
or isinstance(obj, UserString.MutableString)
# ----------------------------------------------------------------------
# Numeric helpers
def decode_hex( hexstring ):
"""Decodes a hexadecimal string into it's integer value."""
# We don't use the builtin 'hex' codec in python since it can
# not handle odd numbers of digits, nor raise the same type
# of exceptions we want to.
n = 0
for c in hexstring:
if '0' <= c <= '9':
d = ord(c) - ord('0')
elif 'a' <= c <= 'f':
d = ord(c) - ord('a') + 10
elif 'A' <= c <= 'F':
d = ord(c) - ord('A') + 10
else:
raise JSONDecodeError('not a hexadecimal number',hexstring)
# Could use ((n << 4 ) | d), but python 2.3 issues a FutureWarning.
n = (n * 16) + d
return n
def decode_octal( octalstring ):
"""Decodes an octal string into it's integer value."""
n = 0
for c in octalstring:
if '0' <= c <= '7':
d = ord(c) - ord('0')
else:
raise JSONDecodeError('not an octal number',octalstring)
# Could use ((n << 3 ) | d), but python 2.3 issues a FutureWarning.
n = (n * 8) + d
return n
# ----------------------------------------------------------------------
# Exception classes.
class JSONError(ValueError):
"""Our base class for all JSON-related errors.
"""
def pretty_description(self):
err = self.args[0]
if len(self.args) > 1:
err += ': '
for anum, a in enumerate(self.args[1:]):
if anum > 1:
err += ', '
astr = repr(a)
if len(astr) > 20:
astr = astr[:20] + '...'
err += astr
return err
class JSONDecodeError(JSONError):
"""An exception class raised when a JSON decoding error (syntax error) occurs."""
class JSONEncodeError(JSONError):
"""An exception class raised when a python object can not be encoded as a JSON string."""
#----------------------------------------------------------------------
# The main JSON encoder/decoder class.
class JSON(object):
"""An encoder/decoder for JSON data streams.
Usually you will call the encode() or decode() methods. The other
methods are for lower-level processing.
Whether the JSON parser runs in strict mode (which enforces exact
compliance with the JSON spec) or the more forgiving non-string mode
can be affected by setting the 'strict' argument in the object's
initialization; or by assigning True or False to the 'strict'
property of the object.
You can also adjust a finer-grained control over strictness by
allowing or preventing specific behaviors. You can get a list of
all the available behaviors by accessing the 'behaviors' property.
Likewise the allowed_behaviors and prevented_behaviors list which
behaviors will be allowed and which will not. Call the allow()
or prevent() methods to adjust these.
"""
_escapes_json = { # character escapes in JSON
'"': '"',
'/': '/',
'\\': '\\',
'b': '\b',
'f': '\f',
'n': '\n',
'r': '\r',
't': '\t',
}
_escapes_js = { # character escapes in Javascript
'"': '"',
'\'': '\'',
'\\': '\\',
'b': '\b',
'f': '\f',
'n': '\n',
'r': '\r',
't': '\t',
'v': '\v',
'0': '\x00'
}
# Following is a reverse mapping of escape characters, used when we
# output JSON. Only those escapes which are always safe (e.g., in JSON)
# are here. It won't hurt if we leave questionable ones out.
_rev_escapes = {'\n': '\\n',
'\t': '\\t',
'\b': '\\b',
'\r': '\\r',
'\f': '\\f',
'"': '\\"',
'\\': '\\\\'}
def __init__(self, strict=False, compactly=True, escape_unicode=False):
"""Creates a JSON encoder/decoder object.
If 'strict' is set to True, then only strictly-conforming JSON
output will be produced. Note that this means that some types
of values may not be convertable and will result in a
JSONEncodeError exception.
If 'compactly' is set to True, then the resulting string will
have all extraneous white space removed; if False then the
string will be "pretty printed" with whitespace and indentation
added to make it more readable.
If 'escape_unicode' is set to True, then all non-ASCII characters
will be represented as a unicode escape sequence; if False then
the actual real unicode character will be inserted if possible.
The 'escape_unicode' can also be a function, which when called
with a single argument of a unicode character will return True
if the character should be escaped or False if it should not.
If you wish to extend the encoding to ba able to handle
additional types, you should subclass this class and override
the encode_default() method.
"""
import sys
self._set_strictness(strict)
self._encode_compactly = compactly
try:
# see if we were passed a predicate function
b = escape_unicode(u'A')
self._encode_unicode_as_escapes = escape_unicode
except (ValueError, NameError, TypeError):
# Just set to True or False. We could use lambda x:True
# to make it more consistent (always a function), but it
# will be too slow, so we'll make explicit tests later.
self._encode_unicode_as_escapes = bool(escape_unicode)
self._sort_dictionary_keys = True
# The following is a boolean map of the first 256 characters
# which will quickly tell us which of those characters never
# need to be escaped.
self._asciiencodable = [32 <= c < 128 and not self._rev_escapes.has_key(chr(c))
for c in range(0,255)]
def _set_strictness(self, strict):
"""Changes the strictness behavior.
Pass True to be very strict about JSON syntax, or False to be looser.
"""
self._allow_any_type_at_start = not strict
self._allow_all_numeric_signs = not strict
self._allow_comments = not strict
self._allow_control_char_in_string = not strict
self._allow_hex_numbers = not strict
self._allow_initial_decimal_point = not strict
self._allow_js_string_escapes = not strict
self._allow_non_numbers = not strict
self._allow_nonescape_characters = not strict # "\z" -> "z"
self._allow_nonstring_keys = not strict
self._allow_omitted_array_elements = not strict
self._allow_single_quoted_strings = not strict
self._allow_trailing_comma_in_literal = not strict
self._allow_undefined_values = not strict
self._allow_unicode_format_control_chars = not strict
self._allow_unicode_whitespace = not strict
# Always disable this by default
self._allow_octal_numbers = False
def allow(self, behavior):
"""Allow the specified behavior (turn off a strictness check).
The list of all possible behaviors is available in the behaviors property.
You can see which behaviors are currently allowed by accessing the
allowed_behaviors property.
"""
p = '_allow_' + behavior
if hasattr(self, p):
setattr(self, p, True)
else:
raise AttributeError('Behavior is not known',behavior)
def prevent(self, behavior):
"""Prevent the specified behavior (turn on a strictness check).
The list of all possible behaviors is available in the behaviors property.
You can see which behaviors are currently prevented by accessing the
prevented_behaviors property.
"""
p = '_allow_' + behavior
if hasattr(self, p):
setattr(self, p, False)
else:
raise AttributeError('Behavior is not known',behavior)
def _get_behaviors(self):
return sorted([ n[len('_allow_'):] for n in self.__dict__ \
if n.startswith('_allow_')])
behaviors = property(_get_behaviors,
doc='List of known behaviors that can be passed to allow() or prevent() methods')
def _get_allowed_behaviors(self):
return sorted([ n[len('_allow_'):] for n in self.__dict__ \
if n.startswith('_allow_') and getattr(self,n)])
allowed_behaviors = property(_get_allowed_behaviors,
doc='List of known behaviors that are currently allowed')
def _get_prevented_behaviors(self):
return sorted([ n[len('_allow_'):] for n in self.__dict__ \
if n.startswith('_allow_') and not getattr(self,n)])
prevented_behaviors = property(_get_prevented_behaviors,
doc='List of known behaviors that are currently prevented')
def _is_strict(self):
return not self.allowed_behaviors
strict = property(_is_strict, _set_strictness,
doc='True if adherence to RFC 4627 syntax is strict, or False is more generous ECMAScript syntax is permitted')
def isws(self, c):
"""Determines if the given character is considered as white space.
Note that Javscript is much more permissive on what it considers
to be whitespace than does JSON.
Ref. ECMAScript section 7.2
"""
if not self._allow_unicode_whitespace:
return c in ' \t\n\r'
else:
if not isinstance(c,unicode):
c = unicode(c)
if c in u' \t\n\r\f\v':
return True
import unicodedata
return unicodedata.category(c) == 'Zs'
def islineterm(self, c):
"""Determines if the given character is considered a line terminator.
Ref. ECMAScript section 7.3
"""
if c == '\r' or c == '\n':
return True
if c == u'\u2028' or c == u'\u2029': # unicodedata.category(c) in ['Zl', 'Zp']
return True
return False
def strip_format_control_chars(self, txt):
"""Filters out all Unicode format control characters from the string.
ECMAScript permits any Unicode "format control characters" to
appear at any place in the source code. They are to be
ignored as if they are not there before any other lexical
tokenization occurs. Note that JSON does not allow them.
Ref. ECMAScript section 7.1.
"""
import unicodedata
txt2 = filter( lambda c: unicodedata.category(unicode(c)) != 'Cf',
txt )
return txt2
def decode_null(self, s, i=0):
"""Intermediate-level decoder for ECMAScript 'null' keyword.
Takes a string and a starting index, and returns a Python
None object and the index of the next unparsed character.
"""
if i < len(s) and s[i:i+4] == 'null':
return None, i+4
raise JSONDecodeError('literal is not the JSON "null" keyword', s)
def encode_undefined(self):
"""Produces the ECMAScript 'undefined' keyword."""
return 'undefined'
def encode_null(self):
"""Produces the JSON 'null' keyword."""
return 'null'
def decode_boolean(self, s, i=0):
"""Intermediate-level decode for JSON boolean literals.
Takes a string and a starting index, and returns a Python bool
(True or False) and the index of the next unparsed character.
"""
if s[i:i+4] == 'true':
return True, i+4
elif s[i:i+5] == 'false':
return False, i+5
raise JSONDecodeError('literal value is not a JSON boolean keyword',s)
def encode_boolean(self, b):
"""Encodes the Python boolean into a JSON Boolean literal."""
if bool(b):
return 'true'
return 'false'
def decode_number(self, s, i=0, imax=None):
"""Intermediate-level decoder for JSON numeric literals.
Takes a string and a starting index, and returns a Python
suitable numeric type and the index of the next unparsed character.
The returned numeric type can be either of a Python int,
long, or float. In addition some special non-numbers may
also be returned such as nan, inf, and neginf (technically
which are Python floats, but have no numeric value.)
Ref. ECMAScript section 8.5.
"""
if imax is None:
imax = len(s)
# Detect initial sign character(s)
if not self._allow_all_numeric_signs:
if s[i] == '+' or (s[i] == '-' and i+1 < imax and \
s[i+1] in '+-'):
raise JSONDecodeError('numbers in strict JSON may only have a single "-" as a sign prefix',s[i:])
sign = +1
j = i # j will point after the sign prefix
while j < imax and s[j] in '+-':
if s[j] == '-': sign = sign * -1
j += 1
# Check for ECMAScript symbolic non-numbers
if s[j:j+3] == 'NaN':
if self._allow_non_numbers:
return nan, j+3
else:
raise JSONDecodeError('NaN literals are not allowed in strict JSON')
elif s[j:j+8] == 'Infinity':
if self._allow_non_numbers:
if sign < 0:
return neginf, j+8
else:
return inf, j+8
else:
raise JSONDecodeError('Infinity literals are not allowed in strict JSON')
elif s[j:j+2] in ('0x','0X'):
if self._allow_hex_numbers:
k = j+2
while k < imax and s[k] in hexdigits:
k += 1
n = sign * decode_hex( s[j+2:k] )
return n, k
else:
raise JSONDecodeError('hexadecimal literals are not allowed in strict JSON',s[i:])
else:
# Decimal (or octal) number, find end of number.
# General syntax is: \d+[\.\d+][e[+-]?\d+]
k = j # will point to end of digit sequence
could_be_octal = ( k+1 < imax and s[k] == '0' ) # first digit is 0
decpt = None # index into number of the decimal point, if any
ept = None # index into number of the e|E exponent start, if any
esign = '+' # sign of exponent
sigdigits = 0 # number of significant digits (approx, counts end zeros)
while k < imax and (s[k].isdigit() or s[k] in '.+-eE'):
c = s[k]
if c not in octaldigits:
could_be_octal = False
if c == '.':
if decpt is not None or ept is not None:
break
else:
decpt = k-j
elif c in 'eE':
if ept is not None:
break
else:
ept = k-j
elif c in '+-':
if not ept:
break
esign = c
else: #digit
if not ept:
sigdigits += 1
k += 1
number = s[j:k] # The entire number as a string
#print 'NUMBER IS: ', repr(number), ', sign', sign, ', esign', esign, \
# ', sigdigits', sigdigits, \
# ', decpt', decpt, ', ept', ept
# Handle octal integers first as an exception. If octal
# is not enabled (the ECMAScipt standard) then just do
# nothing and treat the string as a decimal number.
if could_be_octal and self._allow_octal_numbers:
n = sign * decode_octal( number )
return n, k
# A decimal number. Do a quick check on JSON syntax restrictions.
if number[0] == '.' and not self._allow_initial_decimal_point:
raise JSONDecodeError('numbers in strict JSON must have at least one digit before the decimal point',s[i:])
elif number[0] == '0' and \
len(number) > 1 and number[1].isdigit():
if self._allow_octal_numbers:
raise JSONDecodeError('initial zero digit is only allowed for octal integers',s[i:])
else:
raise JSONDecodeError('initial zero digit must not be followed by other digits (octal numbers are not permitted)',s[i:])
# Make sure decimal point is followed by a digit
if decpt is not None:
if decpt+1 >= len(number) or not number[decpt+1].isdigit():
raise JSONDecodeError('decimal point must be followed by at least one digit',s[i:])
# Determine the exponential part
if ept is not None:
if ept+1 >= len(number):
raise JSONDecodeError('exponent in number is truncated',s[i:])
try:
exponent = int(number[ept+1:])
except ValueError:
raise JSONDecodeError('not a valid exponent in number',s[i:])
##print 'EXPONENT', exponent
else:
exponent = 0
# Try to make an int/long first.
if decpt is None and exponent >= 0:
# An integer
if ept:
n = int(number[:ept])
else:
n = int(number)
n *= sign
if exponent:
n *= 10**exponent
if n == 0 and sign < 0:
# minus zero, must preserve negative sign so make a float
n = -0.0
else:
try:
if decimal and (abs(exponent) > float_maxexp or sigdigits > float_sigdigits):
try:
n = decimal.Decimal(number)
n = n.normalize()
except decimal.Overflow:
if sign<0:
n = neginf
else:
n = inf
else:
n *= sign
else:
n = float(number) * sign
except ValueError:
raise JSONDecodeError('not a valid JSON numeric literal', s[i:j])
return n, k
def encode_number(self, n):
"""Encodes a Python numeric type into a JSON numeric literal.
The special non-numeric values of float('nan'), float('inf')
and float('-inf') are translated into appropriate JSON
literals.
Note that Python complex types are not handled, as there is no
ECMAScript equivalent type.
"""
if isinstance(n, complex):
if n.imag:
raise JSONEncodeError('Can not encode a complex number that has a non-zero imaginary part',n)
n = n.real
if isinstance(n, (int,long)):
return str(n)
if decimal and isinstance(n, decimal.Decimal):
return str(n)
global nan, inf, neginf
if n is nan:
return 'NaN'
elif n is inf:
return 'Infinity'
elif n is neginf:
return '-Infinity'
elif isinstance(n, float):
# Check for non-numbers.
# In python nan == inf == -inf, so must use repr() to distinguish
reprn = repr(n).lower()
if ('inf' in reprn and '-' in reprn) or n == neginf:
return '-Infinity'
elif 'inf' in reprn or n is inf:
return 'Infinity'
elif 'nan' in reprn or n is nan:
return 'NaN'
return repr(n)
else:
raise TypeError('encode_number expected an integral, float, or decimal number type',type(n))
def decode_string(self, s, i=0, imax=None):
"""Intermediate-level decoder for JSON string literals.
Takes a string and a starting index, and returns a Python
string (or unicode string) and the index of the next unparsed
character.
"""
if imax is None:
imax = len(s)
if imax < i+2 or s[i] not in '"\'':
raise JSONDecodeError('string literal must be properly quoted',s[i:])
closer = s[i]
if closer == '\'' and not self._allow_single_quoted_strings:
raise JSONDecodeError('string literals must use double quotation marks in strict JSON',s[i:])
i += 1 # skip quote
if self._allow_js_string_escapes:
escapes = self._escapes_js
else:
escapes = self._escapes_json
ccallowed = self._allow_control_char_in_string
chunks = []
_append = chunks.append
done = False
high_surrogate = None
while i < imax:
c = s[i]
# Make sure a high surrogate is immediately followed by a low surrogate
if high_surrogate and (i+1 >= imax or s[i:i+2] != '\\u'):
raise JSONDecodeError('High unicode surrogate must be followed by a low surrogate',s[i:])
if c == closer:
i += 1 # skip end quote
done = True
break
elif c == '\\':
# Escaped character
i += 1
if i >= imax:
raise JSONDecodeError('escape in string literal is incomplete',s[i-1:])
c = s[i]
if '0' <= c <= '7' and self._allow_octal_numbers:
# Handle octal escape codes first so special \0 doesn't kick in yet.
# Follow Annex B.1.2 of ECMAScript standard.
if '0' <= c <= '3':
maxdigits = 3
else:
maxdigits = 2
for k in range(i, i+maxdigits+1):
if k >= imax or s[k] not in octaldigits:
break
n = decode_octal(s[i:k])
if n < 128:
_append( chr(n) )
else:
_append( unichr(n) )
i = k
continue
if escapes.has_key(c):
_append(escapes[c])
i += 1
elif c == 'u' or c == 'x':
i += 1
if c == 'u':
digits = 4
else: # c== 'x'
if not self._allow_js_string_escapes:
raise JSONDecodeError(r'string literals may not use the \x hex-escape in strict JSON',s[i-1:])
digits = 2
if i+digits >= imax:
raise JSONDecodeError('numeric character escape sequence is truncated',s[i-1:])
n = decode_hex( s[i:i+digits] )
if high_surrogate:
# Decode surrogate pair and clear high surrogate
_append( surrogate_pair_as_unicode( high_surrogate, unichr(n) ) )
high_surrogate = None
elif n < 128:
# ASCII chars always go in as a str
_append( chr(n) )
elif 0xd800 <= n <= 0xdbff: # high surrogate
if imax < i + digits + 2 or s[i+digits] != '\\' or s[i+digits+1] != 'u':
raise JSONDecodeError('High unicode surrogate must be followed by a low surrogate',s[i-2:])
high_surrogate = unichr(n) # remember until we get to the low surrogate
elif 0xdc00 <= n <= 0xdfff: # low surrogate
raise JSONDecodeError('Low unicode surrogate must be proceeded by a high surrogate',s[i-2:])
else:
# Other chars go in as a unicode char
_append( unichr(n) )
i += digits
else:
# Unknown escape sequence
if self._allow_nonescape_characters:
_append( c )
i += 1
else:
raise JSONDecodeError('unsupported escape code in JSON string literal',s[i-1:])
elif ord(c) <= 0x1f: # A control character
if self.islineterm(c):
raise JSONDecodeError('line terminator characters must be escaped inside string literals',s[i:])
elif ccallowed:
_append( c )
i += 1
else:
raise JSONDecodeError('control characters must be escaped inside JSON string literals',s[i:])
else: # A normal character; not an escape sequence or end-quote.
# Find a whole sequence of "safe" characters so we can append them
# all at once rather than one a time, for speed.
j = i
i += 1
while i < imax and s[i] not in unsafe_string_chars and s[i] != closer:
i += 1
_append(s[j:i])
if not done:
raise JSONDecodeError('string literal is not terminated with a quotation mark',s)
s = ''.join( chunks )
return s, i
def encode_string(self, s):
"""Encodes a Python string into a JSON string literal.
"""
# Must handle instances of UserString specially in order to be
# able to use ord() on it's simulated "characters".
import UserString
if isinstance(s, (UserString.UserString, UserString.MutableString)):
def tochar(c):
return c.data
else:
# Could use "lambda c:c", but that is too slow. So we set to None
# and use an explicit if test inside the loop.
tochar = None
chunks = []
chunks.append('"')
revesc = self._rev_escapes
asciiencodable = self._asciiencodable
encunicode = self._encode_unicode_as_escapes
i = 0
imax = len(s)
while i < imax:
if tochar:
c = tochar(s[i])
else:
c = s[i]
cord = ord(c)
if cord < 256 and asciiencodable[cord] and isinstance(encunicode, bool):
# Contiguous runs of plain old printable ASCII can be copied
# directly to the JSON output without worry (unless the user
# has supplied a custom is-encodable function).
j = i
i += 1
while i < imax:
if tochar:
c = tochar(s[i])
else:
c = s[i]
cord = ord(c)
if cord < 256 and asciiencodable[cord]:
i += 1
else:
break
chunks.append( unicode(s[j:i]) )
elif revesc.has_key(c):
# Has a shortcut escape sequence, like "\n"
chunks.append(revesc[c])
i += 1
elif cord <= 0x1F:
# Always unicode escape ASCII-control characters
chunks.append(r'\u%04x' % cord)
i += 1
elif 0xD800 <= cord <= 0xDFFF:
# A raw surrogate character! This should never happen
# and there's no way to include it in the JSON output.
# So all we can do is complain.
cname = 'U+%04X' % cord
raise JSONEncodeError('can not include or escape a Unicode surrogate character',cname)
elif cord <= 0xFFFF:
# Other BMP Unicode character
if isinstance(encunicode, bool):
doesc = encunicode
else:
doesc = encunicode( c )
if doesc:
chunks.append(r'\u%04x' % cord)
else:
chunks.append( c )
i += 1
else: # ord(c) >= 0x10000
# Non-BMP Unicode
if isinstance(encunicode, bool):
doesc = encunicode
else:
doesc = encunicode( c )
if doesc:
for surrogate in unicode_as_surrogate_pair(c):
chunks.append(r'\u%04x' % ord(surrogate))
else:
chunks.append( c )
i += 1
chunks.append('"')
return ''.join( chunks )
def skip_comment(self, txt, i=0):
"""Skips an ECMAScript comment, either // or /* style.
The contents of the comment are returned as a string, as well
as the index of the character immediately after the comment.
"""
if i+1 >= len(txt) or txt[i] != '/' or txt[i+1] not in '/*':
return None, i
if not self._allow_comments:
raise JSONDecodeError('comments are not allowed in strict JSON',txt[i:])
multiline = (txt[i+1] == '*')
istart = i
i += 2
while i < len(txt):
if multiline:
if txt[i] == '*' and i+1 < len(txt) and txt[i+1] == '/':
j = i+2
break
elif txt[i] == '/' and i+1 < len(txt) and txt[i+1] == '*':
raise JSONDecodeError('multiline /* */ comments may not nest',txt[istart:i+1])
else:
if self.islineterm(txt[i]):
j = i # line terminator is not part of comment
break
i += 1
if i >= len(txt):
if not multiline:
j = len(txt) # // comment terminated by end of file is okay
else:
raise JSONDecodeError('comment was never terminated',txt[istart:])
return txt[istart:j], j
def skipws(self, txt, i=0, imax=None, skip_comments=True):
"""Skips whitespace.
"""
if not self._allow_comments and not self._allow_unicode_whitespace:
if imax is None:
imax = len(txt)
while i < imax and txt[i] in ' \r\n\t':
i += 1
return i
else:
return self.skipws_any(txt, i, imax, skip_comments)
def skipws_any(self, txt, i=0, imax=None, skip_comments=True):
"""Skips all whitespace, including comments and unicode whitespace
Takes a string and a starting index, and returns the index of the
next non-whitespace character.
If skip_comments is True and not running in strict JSON mode, then
comments will be skipped over just like whitespace.
"""
if imax is None:
imax = len(txt)
while i < imax:
if txt[i] == '/':
cmt, i = self.skip_comment(txt, i)
if i < imax and self.isws(txt[i]):
i += 1
else:
break
return i
def decode_composite(self, txt, i=0, imax=None):
"""Intermediate-level JSON decoder for composite literal types (array and object).
Takes text and a starting index, and returns either a Python list or
dictionary and the index of the next unparsed character.
"""
if imax is None:
imax = len(txt)
i = self.skipws(txt, i, imax)
starti = i
if i >= imax or txt[i] not in '{[':
raise JSONDecodeError('composite object must start with "[" or "{"',txt[i:])
if txt[i] == '[':
isdict = False
closer = ']'
obj = []
else:
isdict = True
closer = '}'
obj = {}
i += 1 # skip opener
i = self.skipws(txt, i, imax)
if i < imax and txt[i] == closer:
# empty composite
i += 1
done = True
else:
saw_value = False # set to false at beginning and after commas
done = False
while i < imax:
i = self.skipws(txt, i, imax)
if i < imax and (txt[i] == ',' or txt[i] == closer):
c = txt[i]
i += 1
if c == ',':
if not saw_value:
# no preceeding value, an elided (omitted) element
if isdict:
raise JSONDecodeError('can not omit elements of an object (dictionary)')
if self._allow_omitted_array_elements:
if self._allow_undefined_values:
obj.append( undefined )
else:
obj.append( None )
else:
raise JSONDecodeError('strict JSON does not permit omitted array (list) elements',txt[i:])
saw_value = False
continue
else: # c == closer
if not saw_value and not self._allow_trailing_comma_in_literal:
if isdict:
raise JSONDecodeError('strict JSON does not allow a final comma in an object (dictionary) literal',txt[i-2:])
else:
raise JSONDecodeError('strict JSON does not allow a final comma in an array (list) literal',txt[i-2:])
done = True
break
# Decode the item
if isdict and self._allow_nonstring_keys:
r = self.decodeobj(txt, i, identifier_as_string=True)
else:
r = self.decodeobj(txt, i, identifier_as_string=False)
if r:
if saw_value:
# two values without a separating comma
raise JSONDecodeError('values must be separated by a comma', txt[i:r[1]])
saw_value = True
i = self.skipws(txt, r[1], imax)
if isdict:
key = r[0] # Ref 11.1.5
if not isstringtype(key):
if isnumbertype(key):
if not self._allow_nonstring_keys:
raise JSONDecodeError('strict JSON only permits string literals as object properties (dictionary keys)',txt[starti:])
else:
raise JSONDecodeError('object properties (dictionary keys) must be either string literals or numbers',txt[starti:])
if i >= imax or txt[i] != ':':
raise JSONDecodeError('object property (dictionary key) has no value, expected ":"',txt[starti:])
i += 1
i = self.skipws(txt, i, imax)
rval = self.decodeobj(txt, i)
if rval:
i = self.skipws(txt, rval[1], imax)
obj[key] = rval[0]
else:
raise JSONDecodeError('object property (dictionary key) has no value',txt[starti:])
else: # list
obj.append( r[0] )
else: # not r
if isdict:
raise JSONDecodeError('expected a value, or "}"',txt[i:])
elif not self._allow_omitted_array_elements:
raise JSONDecodeError('expected a value or "]"',txt[i:])
else:
raise JSONDecodeError('expected a value, "," or "]"',txt[i:])
# end while
if not done:
if isdict:
raise JSONDecodeError('object literal (dictionary) is not terminated',txt[starti:])
else:
raise JSONDecodeError('array literal (list) is not terminated',txt[starti:])
return obj, i
def decode_javascript_identifier(self, name):
"""Convert a JavaScript identifier into a Python string object.
This method can be overriden by a subclass to redefine how JavaScript
identifiers are turned into Python objects. By default this just
converts them into strings.
"""
return name
def decodeobj(self, txt, i=0, imax=None, identifier_as_string=False, only_object_or_array=False):
"""Intermediate-level JSON decoder.
Takes a string and a starting index, and returns a two-tuple consting
of a Python object and the index of the next unparsed character.
If there is no value at all (empty string, etc), the None is
returned instead of a tuple.
"""
if imax is None:
imax = len(txt)
obj = None
i = self.skipws(txt, i, imax)
if i >= imax:
raise JSONDecodeError('Unexpected end of input')
c = txt[i]
if c == '[' or c == '{':
obj, i = self.decode_composite(txt, i, imax)
elif only_object_or_array:
raise JSONDecodeError('JSON document must start with an object or array type only', txt[i:i+20])
elif c == '"' or c == '\'':
obj, i = self.decode_string(txt, i, imax)
elif c.isdigit() or c in '.+-':
obj, i = self.decode_number(txt, i, imax)
elif c.isalpha() or c in'_$':
j = i
while j < imax and (txt[j].isalnum() or txt[j] in '_$'):
j += 1
kw = txt[i:j]
if kw == 'null':
obj, i = None, j
elif kw == 'true':
obj, i = True, j
elif kw == 'false':
obj, i = False, j
elif kw == 'undefined':
if self._allow_undefined_values:
obj, i = undefined, j
else:
raise JSONDecodeError('strict JSON does not allow undefined elements',txt[i:])
elif kw == 'NaN' or kw == 'Infinity':
obj, i = self.decode_number(txt, i)
else:
if identifier_as_string:
obj, i = self.decode_javascript_identifier(kw), j
else:
raise JSONDecodeError('unknown keyword or identifier',kw)
else:
raise JSONDecodeError('can not decode value',txt[i:])
return obj, i
def decode(self, txt):
"""Decodes a JSON-endoded string into a Python object."""
if self._allow_unicode_format_control_chars:
txt = self.strip_format_control_chars(txt)
r = self.decodeobj(txt, 0, only_object_or_array=not self._allow_any_type_at_start)
if not r:
raise JSONDecodeError('can not decode value',txt)
else:
obj, i = r
i = self.skipws(txt, i)
if i < len(txt):
raise JSONDecodeError('unexpected or extra text',txt[i:])
return obj
def encode(self, obj, nest_level=0):
"""Encodes the Python object into a JSON string representation.
This method will first attempt to encode an object by seeing
if it has a json_equivalent() method. If so than it will
call that method and then recursively attempt to encode
the object resulting from that call.
Next it will attempt to determine if the object is a native
type or acts like a squence or dictionary. If so it will
encode that object directly.
Finally, if no other strategy for encoding the object of that
type exists, it will call the encode_default() method. That
method currently raises an error, but it could be overridden
by subclasses to provide a hook for extending the types which
can be encoded.
"""
chunks = []
self.encode_helper(chunks, obj, nest_level)
return ''.join( chunks )
def encode_helper(self, chunklist, obj, nest_level):
#print 'encode_helper(chunklist=%r, obj=%r, nest_level=%r)'%(chunklist,obj,nest_level)
if hasattr(obj, 'json_equivalent'):
json = self.encode_equivalent( obj, nest_level=nest_level )
if json is not None:
chunklist.append( json )
return
if obj is None:
chunklist.append( self.encode_null() )
elif obj is undefined:
if self._allow_undefined_values:
chunklist.append( self.encode_undefined() )
else:
raise JSONEncodeError('strict JSON does not permit "undefined" values')
elif isinstance(obj, bool):
chunklist.append( self.encode_boolean(obj) )
elif isinstance(obj, (int,long,float,complex)) or \
(decimal and isinstance(obj, decimal.Decimal)):
chunklist.append( self.encode_number(obj) )
elif isinstance(obj, basestring) or isstringtype(obj):
chunklist.append( self.encode_string(obj) )
else:
self.encode_composite(chunklist, obj, nest_level)
def encode_composite(self, chunklist, obj, nest_level):
"""Encodes just dictionaries, lists, or sequences.
Basically handles any python type for which iter() can create
an iterator object.
This method is not intended to be called directly. Use the
encode() method instead.
"""
#print 'encode_complex_helper(chunklist=%r, obj=%r, nest_level=%r)'%(chunklist,obj,nest_level)
try:
# Is it a dictionary or UserDict? Try iterkeys method first.
it = obj.iterkeys()
except AttributeError:
try:
# Is it a sequence? Try to make an iterator for it.
it = iter(obj)
except TypeError:
it = None
if it is not None:
# Does it look like a dictionary? Check for a minimal dict or
# UserDict interface.
isdict = hasattr(obj, '__getitem__') and hasattr(obj, 'keys')
compactly = self._encode_compactly
if isdict:
chunklist.append('{')
if compactly:
dictcolon = ':'
else:
dictcolon = ' : '
else:
chunklist.append('[')
#print nest_level, 'opening sequence:', repr(chunklist)
if not compactly:
indent0 = ' ' * nest_level
indent = ' ' * (nest_level+1)
chunklist.append(' ')
sequence_chunks = [] # use this to allow sorting afterwards if dict
try: # while not StopIteration
numitems = 0
while True:
obj2 = it.next()
if obj2 is obj:
raise JSONEncodeError('trying to encode an infinite sequence',obj)
if isdict and not isstringtype(obj2):
# Check JSON restrictions on key types
if isnumbertype(obj2):
if not self._allow_nonstring_keys:
raise JSONEncodeError('object properties (dictionary keys) must be strings in strict JSON',obj2)
else:
raise JSONEncodeError('object properties (dictionary keys) can only be strings or numbers in ECMAScript',obj2)
# Encode this item in the sequence and put into item_chunks
item_chunks = []
self.encode_helper( item_chunks, obj2, nest_level=nest_level+1 )
if isdict:
item_chunks.append(dictcolon)
obj3 = obj[obj2]
self.encode_helper(item_chunks, obj3, nest_level=nest_level+2)
#print nest_level, numitems, 'item:', repr(obj2)
#print nest_level, numitems, 'sequence_chunks:', repr(sequence_chunks)
#print nest_level, numitems, 'item_chunks:', repr(item_chunks)
#extend_list_with_sep(sequence_chunks, item_chunks)
sequence_chunks.append(item_chunks)
#print nest_level, numitems, 'new sequence_chunks:', repr(sequence_chunks)
numitems += 1
except StopIteration:
pass
if isdict and self._sort_dictionary_keys:
sequence_chunks.sort() # Note sorts by JSON repr, not original Python object
if compactly:
sep = ','
else:
sep = ',\n' + indent
#print nest_level, 'closing sequence'
#print nest_level, 'chunklist:', repr(chunklist)
#print nest_level, 'sequence_chunks:', repr(sequence_chunks)
extend_and_flatten_list_with_sep( chunklist, sequence_chunks, sep )
#print nest_level, 'new chunklist:', repr(chunklist)
if not compactly:
if numitems > 1:
chunklist.append('\n' + indent0)
else:
chunklist.append(' ')
if isdict:
chunklist.append('}')
else:
chunklist.append(']')
else: # Can't create an iterator for the object
json2 = self.encode_default( obj, nest_level=nest_level )
chunklist.append( json2 )
def encode_equivalent( self, obj, nest_level=0 ):
"""This method is used to encode user-defined class objects.
The object being encoded should have a json_equivalent()
method defined which returns another equivalent object which
is easily JSON-encoded. If the object in question has no
json_equivalent() method available then None is returned
instead of a string so that the encoding will attempt the next
strategy.
If a caller wishes to disable the calling of json_equivalent()
methods, then subclass this class and override this method
to just return None.
"""
if hasattr(obj, 'json_equivalent') \
and callable(getattr(obj,'json_equivalent')):
obj2 = obj.json_equivalent()
if obj2 is obj:
# Try to prevent careless infinite recursion
raise JSONEncodeError('object has a json_equivalent() method that returns itself',obj)
json2 = self.encode( obj2, nest_level=nest_level )
return json2
else:
return None
def encode_default( self, obj, nest_level=0 ):
"""This method is used to encode objects into JSON which are not straightforward.
This method is intended to be overridden by subclasses which wish
to extend this encoder to handle additional types.
"""
raise JSONEncodeError('can not encode object into a JSON representation',obj)
# ------------------------------
def encode( obj, strict=False, compactly=True, escape_unicode=False, encoding=None ):
"""Encodes a Python object into a JSON-encoded string.
If 'strict' is set to True, then only strictly-conforming JSON
output will be produced. Note that this means that some types
of values may not be convertable and will result in a
JSONEncodeError exception.
If 'compactly' is set to True, then the resulting string will
have all extraneous white space removed; if False then the
string will be "pretty printed" with whitespace and indentation
added to make it more readable.
If 'escape_unicode' is set to True, then all non-ASCII characters
will be represented as a unicode escape sequence; if False then
the actual real unicode character will be inserted.
If no encoding is specified (encoding=None) then the output will
either be a Python string (if entirely ASCII) or a Python unicode
string type.
However if an encoding name is given then the returned value will
be a python string which is the byte sequence encoding the JSON
value. As the default/recommended encoding for JSON is UTF-8,
you should almost always pass in encoding='utf8'.
"""
import sys
encoder = None # Custom codec encoding function
bom = None # Byte order mark to prepend to final output
cdk = None # Codec to use
if encoding is not None:
import codecs
try:
cdk = codecs.lookup(encoding)
except LookupError:
cdk = None
if cdk:
pass
elif not cdk:
# No built-in codec was found, see if it is something we
# can do ourself.
encoding = encoding.lower()
if encoding.startswith('utf-32') or encoding.startswith('utf32') \
or encoding.startswith('ucs4') \
or encoding.startswith('ucs-4'):
# Python doesn't natively have a UTF-32 codec, but JSON
# requires that it be supported. So we must decode these
# manually.
if encoding.endswith('le'):
encoder = utf32le_encode
elif encoding.endswith('be'):
encoder = utf32be_encode
else:
encoder = utf32be_encode
bom = codecs.BOM_UTF32_BE
elif encoding.startswith('ucs2') or encoding.startswith('ucs-2'):
# Python has no UCS-2, but we can simulate with
# UTF-16. We just need to force us to not try to
# encode anything past the BMP.
encoding = 'utf-16'
if not escape_unicode and not callable(escape_unicode):
escape_unicode = lambda c: (0xD800 <= ord(c) <= 0xDFFF) or ord(c) >= 0x10000
else:
raise JSONEncodeError('this python has no codec for this character encoding',encoding)
if not escape_unicode and not callable(escape_unicode):
if encoding and encoding.startswith('utf'):
# All UTF-x encodings can do the whole Unicode repertoire, so
# do nothing special.
pass
else:
# Even though we don't want to escape all unicode chars,
# the encoding being used may force us to do so anyway.
# We must pass in a function which says which characters
# the encoding can handle and which it can't.
def in_repertoire( c, encoding_func ):
try:
x = encoding_func( c, errors='strict' )
except UnicodeError:
return False
return True
if encoder:
escape_unicode = lambda c: not in_repertoire(c, encoder)
elif cdk:
escape_unicode = lambda c: not in_repertoire(c, cdk[0])
else:
pass # Let the JSON object deal with it
j = JSON( strict=strict, compactly=compactly, escape_unicode=escape_unicode )
unitxt = j.encode( obj )
if encoder:
txt = encoder( unitxt )
elif encoding is not None:
txt = unitxt.encode( encoding )
else:
txt = unitxt
if bom:
txt = bom + txt
return txt
def decode( txt, strict=False, encoding=None, **kw ):
"""Decodes a JSON-encoded string into a Python object.
If 'strict' is set to True, then those strings that are not
entirely strictly conforming to JSON will result in a
JSONDecodeError exception.
The input string can be either a python string or a python unicode
string. If it is already a unicode string, then it is assumed
that no character set decoding is required.
However, if you pass in a non-Unicode text string (i.e., a python
type 'str') then an attempt will be made to auto-detect and decode
the character encoding. This will be successful if the input was
encoded in any of UTF-8, UTF-16 (BE or LE), or UTF-32 (BE or LE),
and of course plain ASCII works too.
Note though that if you know the character encoding, then you
should convert to a unicode string yourself, or pass it the name
of the 'encoding' to avoid the guessing made by the auto
detection, as with
python_object = demjson.decode( input_bytes, encoding='utf8' )
Optional keywords arguments must be of the form
allow_xxxx=True/False
or
prevent_xxxx=True/False
where each will allow or prevent the specific behavior, after the
evaluation of the 'strict' argument. For example, if strict=True
then by also passing 'allow_comments=True' then comments will be
allowed. If strict=False then prevent_comments=True will allow
everything except comments.
"""
# Initialize the JSON object
j = JSON( strict=strict )
for keyword, value in kw.items():
if keyword.startswith('allow_'):
behavior = keyword[6:]
allow = bool(value)
elif keyword.startswith('prevent_'):
behavior = keyword[8:]
allow = not bool(value)
else:
raise ValueError('unknown keyword argument', keyword)
if allow:
j.allow(behavior)
else:
j.prevent(behavior)
# Convert the input string into unicode if needed.
if isinstance(txt,unicode):
unitxt = txt
else:
if encoding is None:
unitxt = auto_unicode_decode( txt )
else:
cdk = None # codec
decoder = None
import codecs
try:
cdk = codecs.lookup(encoding)
except LookupError:
encoding = encoding.lower()
decoder = None
if encoding.startswith('utf-32') \
or encoding.startswith('ucs4') \
or encoding.startswith('ucs-4'):
# Python doesn't natively have a UTF-32 codec, but JSON
# requires that it be supported. So we must decode these
# manually.
if encoding.endswith('le'):
decoder = utf32le_decode
elif encoding.endswith('be'):
decoder = utf32be_decode
else:
if txt.startswith( codecs.BOM_UTF32_BE ):
decoder = utf32be_decode
txt = txt[4:]
elif txt.startswith( codecs.BOM_UTF32_LE ):
decoder = utf32le_decode
txt = txt[4:]
else:
if encoding.startswith('ucs'):
raise JSONDecodeError('UCS-4 encoded string must start with a BOM')
decoder = utf32be_decode # Default BE for UTF, per unicode spec
elif encoding.startswith('ucs2') or encoding.startswith('ucs-2'):
# Python has no UCS-2, but we can simulate with
# UTF-16. We just need to force us to not try to
# encode anything past the BMP.
encoding = 'utf-16'
if decoder:
unitxt = decoder(txt)
elif encoding:
unitxt = txt.decode(encoding)
else:
raise JSONDecodeError('this python has no codec for this character encoding',encoding)
# Check that the decoding seems sane. Per RFC 4627 section 3:
# "Since the first two characters of a JSON text will
# always be ASCII characters [RFC0020], ..."
#
# This check is probably not necessary, but it allows us to
# raise a suitably descriptive error rather than an obscure
# syntax error later on.
#
# Note that the RFC requirements of two ASCII characters seems
# to be an incorrect statement as a JSON string literal may
# have as it's first character any unicode character. Thus
# the first two characters will always be ASCII, unless the
# first character is a quotation mark. And in non-strict
# mode we can also have a few other characters too.
if len(unitxt) > 2:
first, second = unitxt[:2]
if first in '"\'':
pass # second can be anything inside string literal
else:
if ((ord(first) < 0x20 or ord(first) > 0x7f) or \
(ord(second) < 0x20 or ord(second) > 0x7f)) and \
(not j.isws(first) and not j.isws(second)):
# Found non-printable ascii, must check unicode
# categories to see if the character is legal.
# Only whitespace, line and paragraph separators,
# and format control chars are legal here.
import unicodedata
catfirst = unicodedata.category(unicode(first))
catsecond = unicodedata.category(unicode(second))
if catfirst not in ('Zs','Zl','Zp','Cf') or \
catsecond not in ('Zs','Zl','Zp','Cf'):
raise JSONDecodeError('the decoded string is gibberish, is the encoding correct?',encoding)
# Now ready to do the actual decoding
obj = j.decode( unitxt )
return obj
# end file
|
gpl-2.0
|
bdrung/audacity
|
lib-src/lv2/sratom/waflib/extras/autowaf.py
|
176
|
22430
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import glob
import os
import subprocess
import sys
from waflib import Configure,Context,Logs,Node,Options,Task,Utils
from waflib.TaskGen import feature,before,after
global g_is_child
g_is_child=False
global g_step
g_step=0
@feature('c','cxx')
@after('apply_incpaths')
def include_config_h(self):
self.env.append_value('INCPATHS',self.bld.bldnode.abspath())
def set_options(opt,debug_by_default=False):
global g_step
if g_step>0:
return
dirs_options=opt.add_option_group('Installation directories','')
for k in('--prefix','--destdir'):
option=opt.parser.get_option(k)
if option:
opt.parser.remove_option(k)
dirs_options.add_option(option)
dirs_options.add_option('--bindir',type='string',help="Executable programs [Default: PREFIX/bin]")
dirs_options.add_option('--configdir',type='string',help="Configuration data [Default: PREFIX/etc]")
dirs_options.add_option('--datadir',type='string',help="Shared data [Default: PREFIX/share]")
dirs_options.add_option('--includedir',type='string',help="Header files [Default: PREFIX/include]")
dirs_options.add_option('--libdir',type='string',help="Libraries [Default: PREFIX/lib]")
dirs_options.add_option('--mandir',type='string',help="Manual pages [Default: DATADIR/man]")
dirs_options.add_option('--docdir',type='string',help="HTML documentation [Default: DATADIR/doc]")
if debug_by_default:
opt.add_option('--optimize',action='store_false',default=True,dest='debug',help="Build optimized binaries")
else:
opt.add_option('--debug',action='store_true',default=False,dest='debug',help="Build debuggable binaries")
opt.add_option('--pardebug',action='store_true',default=False,dest='pardebug',help="Build parallel-installable debuggable libraries with D suffix")
opt.add_option('--grind',action='store_true',default=False,dest='grind',help="Run tests in valgrind")
opt.add_option('--strict',action='store_true',default=False,dest='strict',help="Use strict compiler flags and show all warnings")
opt.add_option('--ultra-strict',action='store_true',default=False,dest='ultra_strict',help="Use even stricter compiler flags (likely to trigger many warnings in library headers)")
opt.add_option('--docs',action='store_true',default=False,dest='docs',help="Build documentation - requires doxygen")
opt.add_option('--lv2-user',action='store_true',default=False,dest='lv2_user',help="Install LV2 bundles to user location")
opt.add_option('--lv2-system',action='store_true',default=False,dest='lv2_system',help="Install LV2 bundles to system location")
dirs_options.add_option('--lv2dir',type='string',help="LV2 bundles [Default: LIBDIR/lv2]")
g_step=1
def check_header(conf,lang,name,define='',mandatory=True):
includes=''
if sys.platform=="darwin":
includes='/opt/local/include'
if lang=='c':
check_func=conf.check_cc
elif lang=='cxx':
check_func=conf.check_cxx
else:
Logs.error("Unknown header language `%s'"%lang)
return
if define!='':
check_func(header_name=name,includes=includes,define_name=define,mandatory=mandatory)
else:
check_func(header_name=name,includes=includes,mandatory=mandatory)
def nameify(name):
return name.replace('/','_').replace('++','PP').replace('-','_').replace('.','_')
def define(conf,var_name,value):
conf.define(var_name,value)
conf.env[var_name]=value
def check_pkg(conf,name,**args):
if args['uselib_store'].lower()in conf.env['AUTOWAF_LOCAL_LIBS']:
return
class CheckType:
OPTIONAL=1
MANDATORY=2
var_name='CHECKED_'+nameify(args['uselib_store'])
check=not var_name in conf.env
mandatory=not'mandatory'in args or args['mandatory']
if not check and'atleast_version'in args:
checked_version=conf.env['VERSION_'+name]
if checked_version and checked_version<args['atleast_version']:
check=True;
if not check and mandatory and conf.env[var_name]==CheckType.OPTIONAL:
check=True;
if check:
found=None
pkg_var_name='PKG_'+name.replace('-','_')
pkg_name=name
if conf.env.PARDEBUG:
args['mandatory']=False
found=conf.check_cfg(package=pkg_name+'D',args="--cflags --libs",**args)
if found:
pkg_name+='D'
if mandatory:
args['mandatory']=True
if not found:
found=conf.check_cfg(package=pkg_name,args="--cflags --libs",**args)
if found:
conf.env[pkg_var_name]=pkg_name
if'atleast_version'in args:
conf.env['VERSION_'+name]=args['atleast_version']
if mandatory:
conf.env[var_name]=CheckType.MANDATORY
else:
conf.env[var_name]=CheckType.OPTIONAL
def normpath(path):
if sys.platform=='win32':
return os.path.normpath(path).replace('\\','/')
else:
return os.path.normpath(path)
def configure(conf):
global g_step
if g_step>1:
return
def append_cxx_flags(flags):
conf.env.append_value('CFLAGS',flags)
conf.env.append_value('CXXFLAGS',flags)
print('')
display_header('Global Configuration')
if Options.options.docs:
conf.load('doxygen')
conf.env['DOCS']=Options.options.docs
conf.env['DEBUG']=Options.options.debug or Options.options.pardebug
conf.env['PARDEBUG']=Options.options.pardebug
conf.env['PREFIX']=normpath(os.path.abspath(os.path.expanduser(conf.env['PREFIX'])))
def config_dir(var,opt,default):
if opt:
conf.env[var]=normpath(opt)
else:
conf.env[var]=normpath(default)
opts=Options.options
prefix=conf.env['PREFIX']
config_dir('BINDIR',opts.bindir,os.path.join(prefix,'bin'))
config_dir('SYSCONFDIR',opts.configdir,os.path.join(prefix,'etc'))
config_dir('DATADIR',opts.datadir,os.path.join(prefix,'share'))
config_dir('INCLUDEDIR',opts.includedir,os.path.join(prefix,'include'))
config_dir('LIBDIR',opts.libdir,os.path.join(prefix,'lib'))
config_dir('MANDIR',opts.mandir,os.path.join(conf.env['DATADIR'],'man'))
config_dir('DOCDIR',opts.docdir,os.path.join(conf.env['DATADIR'],'doc'))
if Options.options.lv2dir:
conf.env['LV2DIR']=Options.options.lv2dir
elif Options.options.lv2_user:
if sys.platform=="darwin":
conf.env['LV2DIR']=os.path.join(os.getenv('HOME'),'Library/Audio/Plug-Ins/LV2')
elif sys.platform=="win32":
conf.env['LV2DIR']=os.path.join(os.getenv('APPDATA'),'LV2')
else:
conf.env['LV2DIR']=os.path.join(os.getenv('HOME'),'.lv2')
elif Options.options.lv2_system:
if sys.platform=="darwin":
conf.env['LV2DIR']='/Library/Audio/Plug-Ins/LV2'
elif sys.platform=="win32":
conf.env['LV2DIR']=os.path.join(os.getenv('COMMONPROGRAMFILES'),'LV2')
else:
conf.env['LV2DIR']=os.path.join(conf.env['LIBDIR'],'lv2')
else:
conf.env['LV2DIR']=os.path.join(conf.env['LIBDIR'],'lv2')
conf.env['LV2DIR']=normpath(conf.env['LV2DIR'])
if Options.options.docs:
doxygen=conf.find_program('doxygen')
if not doxygen:
conf.fatal("Doxygen is required to build with --docs")
dot=conf.find_program('dot')
if not dot:
conf.fatal("Graphviz (dot) is required to build with --docs")
if Options.options.debug:
if conf.env['MSVC_COMPILER']:
conf.env['CFLAGS']=['/Od','/Zi','/MTd']
conf.env['CXXFLAGS']=['/Od','/Zi','/MTd']
conf.env['LINKFLAGS']=['/DEBUG']
else:
conf.env['CFLAGS']=['-O0','-g']
conf.env['CXXFLAGS']=['-O0','-g']
else:
if conf.env['MSVC_COMPILER']:
conf.env['CFLAGS']=['/MD']
conf.env['CXXFLAGS']=['/MD']
append_cxx_flags(['-DNDEBUG'])
if Options.options.ultra_strict:
Options.options.strict=True
conf.env.append_value('CFLAGS',['-Wredundant-decls','-Wstrict-prototypes','-Wmissing-prototypes','-Wcast-qual'])
conf.env.append_value('CXXFLAGS',['-Wcast-qual'])
if Options.options.strict:
conf.env.append_value('CFLAGS',['-pedantic','-Wshadow'])
conf.env.append_value('CXXFLAGS',['-ansi','-Wnon-virtual-dtor','-Woverloaded-virtual'])
append_cxx_flags(['-Wall','-Wcast-align','-Wextra','-Wmissing-declarations','-Wno-unused-parameter','-Wstrict-overflow','-Wundef','-Wwrite-strings','-fstrict-overflow'])
if not conf.check_cc(fragment='''
#ifndef __clang__
#error
#endif
int main() { return 0; }''',features='c',mandatory=False,execute=False,msg='Checking for clang'):
append_cxx_flags(['-Wlogical-op','-Wsuggest-attribute=noreturn','-Wunsafe-loop-optimizations'])
if not conf.env['MSVC_COMPILER']:
append_cxx_flags(['-fshow-column'])
conf.env.prepend_value('CFLAGS','-I'+os.path.abspath('.'))
conf.env.prepend_value('CXXFLAGS','-I'+os.path.abspath('.'))
display_msg(conf,"Install prefix",conf.env['PREFIX'])
display_msg(conf,"Debuggable build",str(conf.env['DEBUG']))
display_msg(conf,"Build documentation",str(conf.env['DOCS']))
print('')
g_step=2
def set_c99_mode(conf):
if conf.env.MSVC_COMPILER:
conf.env.append_unique('CFLAGS',['-TP'])
else:
conf.env.append_unique('CFLAGS',['-std=c99'])
def set_local_lib(conf,name,has_objects):
var_name='HAVE_'+nameify(name.upper())
define(conf,var_name,1)
if has_objects:
if type(conf.env['AUTOWAF_LOCAL_LIBS'])!=dict:
conf.env['AUTOWAF_LOCAL_LIBS']={}
conf.env['AUTOWAF_LOCAL_LIBS'][name.lower()]=True
else:
if type(conf.env['AUTOWAF_LOCAL_HEADERS'])!=dict:
conf.env['AUTOWAF_LOCAL_HEADERS']={}
conf.env['AUTOWAF_LOCAL_HEADERS'][name.lower()]=True
def append_property(obj,key,val):
if hasattr(obj,key):
setattr(obj,key,getattr(obj,key)+val)
else:
setattr(obj,key,val)
def use_lib(bld,obj,libs):
abssrcdir=os.path.abspath('.')
libs_list=libs.split()
for l in libs_list:
in_headers=l.lower()in bld.env['AUTOWAF_LOCAL_HEADERS']
in_libs=l.lower()in bld.env['AUTOWAF_LOCAL_LIBS']
if in_libs:
append_property(obj,'use',' lib%s '%l.lower())
append_property(obj,'framework',bld.env['FRAMEWORK_'+l])
if in_headers or in_libs:
inc_flag='-iquote '+os.path.join(abssrcdir,l.lower())
for f in['CFLAGS','CXXFLAGS']:
if not inc_flag in bld.env[f]:
bld.env.prepend_value(f,inc_flag)
else:
append_property(obj,'uselib',' '+l)
@feature('c','cxx')
@before('apply_link')
def version_lib(self):
if sys.platform=='win32':
self.vnum=None
if self.env['PARDEBUG']:
applicable=['cshlib','cxxshlib','cstlib','cxxstlib']
if[x for x in applicable if x in self.features]:
self.target=self.target+'D'
def set_lib_env(conf,name,version):
'Set up environment for local library as if found via pkg-config.'
NAME=name.upper()
major_ver=version.split('.')[0]
pkg_var_name='PKG_'+name.replace('-','_')+'_'+major_ver
lib_name='%s-%s'%(name,major_ver)
if conf.env.PARDEBUG:
lib_name+='D'
conf.env[pkg_var_name]=lib_name
conf.env['INCLUDES_'+NAME]=['${INCLUDEDIR}/%s-%s'%(name,major_ver)]
conf.env['LIBPATH_'+NAME]=[conf.env.LIBDIR]
conf.env['LIB_'+NAME]=[lib_name]
def display_header(title):
Logs.pprint('BOLD',title)
def display_msg(conf,msg,status=None,color=None):
color='CYAN'
if type(status)==bool and status or status=="True":
color='GREEN'
elif type(status)==bool and not status or status=="False":
color='YELLOW'
Logs.pprint('BOLD'," *",sep='')
Logs.pprint('NORMAL',"%s"%msg.ljust(conf.line_just-3),sep='')
Logs.pprint('BOLD',":",sep='')
Logs.pprint(color,status)
def link_flags(env,lib):
return' '.join(map(lambda x:env['LIB_ST']%x,env['LIB_'+lib]))
def compile_flags(env,lib):
return' '.join(map(lambda x:env['CPPPATH_ST']%x,env['INCLUDES_'+lib]))
def set_recursive():
global g_is_child
g_is_child=True
def is_child():
global g_is_child
return g_is_child
def build_pc(bld,name,version,version_suffix,libs,subst_dict={}):
'''Build a pkg-config file for a library.
name -- uppercase variable name (e.g. 'SOMENAME')
version -- version string (e.g. '1.2.3')
version_suffix -- name version suffix (e.g. '2')
libs -- string/list of dependencies (e.g. 'LIBFOO GLIB')
'''
pkg_prefix=bld.env['PREFIX']
if pkg_prefix[-1]=='/':
pkg_prefix=pkg_prefix[:-1]
target=name.lower()
if version_suffix!='':
target+='-'+version_suffix
if bld.env['PARDEBUG']:
target+='D'
target+='.pc'
libdir=bld.env['LIBDIR']
if libdir.startswith(pkg_prefix):
libdir=libdir.replace(pkg_prefix,'${exec_prefix}')
includedir=bld.env['INCLUDEDIR']
if includedir.startswith(pkg_prefix):
includedir=includedir.replace(pkg_prefix,'${prefix}')
obj=bld(features='subst',source='%s.pc.in'%name.lower(),target=target,install_path=os.path.join(bld.env['LIBDIR'],'pkgconfig'),exec_prefix='${prefix}',PREFIX=pkg_prefix,EXEC_PREFIX='${prefix}',LIBDIR=libdir,INCLUDEDIR=includedir)
if type(libs)!=list:
libs=libs.split()
subst_dict[name+'_VERSION']=version
subst_dict[name+'_MAJOR_VERSION']=version[0:version.find('.')]
for i in libs:
subst_dict[i+'_LIBS']=link_flags(bld.env,i)
lib_cflags=compile_flags(bld.env,i)
if lib_cflags=='':
lib_cflags=' '
subst_dict[i+'_CFLAGS']=lib_cflags
obj.__dict__.update(subst_dict)
def build_dir(name,subdir):
if is_child():
return os.path.join('build',name,subdir)
else:
return os.path.join('build',subdir)
def make_simple_dox(name):
name=name.lower()
NAME=name.upper()
try:
top=os.getcwd()
os.chdir(build_dir(name,'doc/html'))
page='group__%s.html'%name
if not os.path.exists(page):
return
for i in[['%s_API '%NAME,''],['%s_DEPRECATED '%NAME,''],['group__%s.html'%name,''],[' ',''],['<script.*><\/script>',''],['<hr\/><a name="details" id="details"><\/a><h2>.*<\/h2>',''],['<link href=\"tabs.css\" rel=\"stylesheet\" type=\"text\/css\"\/>',''],['<img class=\"footer\" src=\"doxygen.png\" alt=\"doxygen\"\/>','Doxygen']]:
os.system("sed -i 's/%s/%s/g' %s"%(i[0],i[1],page))
os.rename('group__%s.html'%name,'index.html')
for i in(glob.glob('*.png')+glob.glob('*.html')+glob.glob('*.js')+glob.glob('*.css')):
if i!='index.html'and i!='style.css':
os.remove(i)
os.chdir(top)
os.chdir(build_dir(name,'doc/man/man3'))
for i in glob.glob('*.3'):
os.system("sed -i 's/%s_API //' %s"%(NAME,i))
for i in glob.glob('_*'):
os.remove(i)
os.chdir(top)
except Exception ,e:
Logs.error("Failed to fix up %s documentation: %s"%(name,e))
def build_dox(bld,name,version,srcdir,blddir,outdir='',versioned=True):
if not bld.env['DOCS']:
return
if is_child():
src_dir=os.path.join(srcdir,name.lower())
doc_dir=os.path.join(blddir,name.lower(),'doc')
else:
src_dir=srcdir
doc_dir=os.path.join(blddir,'doc')
subst_tg=bld(features='subst',source='doc/reference.doxygen.in',target='doc/reference.doxygen',install_path='',name='doxyfile')
subst_dict={name+'_VERSION':version,name+'_SRCDIR':os.path.abspath(src_dir),name+'_DOC_DIR':os.path.abspath(doc_dir)}
subst_tg.__dict__.update(subst_dict)
subst_tg.post()
docs=bld(features='doxygen',doxyfile='doc/reference.doxygen')
docs.post()
outname=name.lower()
if versioned:
outname+='-%d'%int(version[0:version.find('.')])
bld.install_files(os.path.join('${DOCDIR}',outname,outdir,'html'),bld.path.get_bld().ant_glob('doc/html/*'))
for i in range(1,8):
bld.install_files('${MANDIR}/man%d'%i,bld.path.get_bld().ant_glob('doc/man/man%d/*'%i,excl='**/_*'))
def build_version_files(header_path,source_path,domain,major,minor,micro):
header_path=os.path.abspath(header_path)
source_path=os.path.abspath(source_path)
text="int "+domain+"_major_version = "+str(major)+";\n"
text+="int "+domain+"_minor_version = "+str(minor)+";\n"
text+="int "+domain+"_micro_version = "+str(micro)+";\n"
try:
o=open(source_path,'w')
o.write(text)
o.close()
except IOError:
Logs.error('Failed to open %s for writing\n'%source_path)
sys.exit(-1)
text="#ifndef __"+domain+"_version_h__\n"
text+="#define __"+domain+"_version_h__\n"
text+="extern const char* "+domain+"_revision;\n"
text+="extern int "+domain+"_major_version;\n"
text+="extern int "+domain+"_minor_version;\n"
text+="extern int "+domain+"_micro_version;\n"
text+="#endif /* __"+domain+"_version_h__ */\n"
try:
o=open(header_path,'w')
o.write(text)
o.close()
except IOError:
Logs.warn('Failed to open %s for writing\n'%header_path)
sys.exit(-1)
return None
def build_i18n_pot(bld,srcdir,dir,name,sources,copyright_holder=None):
Logs.info('Generating pot file from %s'%name)
pot_file='%s.pot'%name
cmd=['xgettext','--keyword=_','--keyword=N_','--keyword=S_','--from-code=UTF-8','-o',pot_file]
if copyright_holder:
cmd+=['--copyright-holder="%s"'%copyright_holder]
cmd+=sources
Logs.info('Updating '+pot_file)
subprocess.call(cmd,cwd=os.path.join(srcdir,dir))
def build_i18n_po(bld,srcdir,dir,name,sources,copyright_holder=None):
pwd=os.getcwd()
os.chdir(os.path.join(srcdir,dir))
pot_file='%s.pot'%name
po_files=glob.glob('po/*.po')
for po_file in po_files:
cmd=['msgmerge','--update',po_file,pot_file]
Logs.info('Updating '+po_file)
subprocess.call(cmd)
os.chdir(pwd)
def build_i18n_mo(bld,srcdir,dir,name,sources,copyright_holder=None):
pwd=os.getcwd()
os.chdir(os.path.join(srcdir,dir))
pot_file='%s.pot'%name
po_files=glob.glob('po/*.po')
for po_file in po_files:
mo_file=po_file.replace('.po','.mo')
cmd=['msgfmt','-c','-f','-o',mo_file,po_file]
Logs.info('Generating '+po_file)
subprocess.call(cmd)
os.chdir(pwd)
def build_i18n(bld,srcdir,dir,name,sources,copyright_holder=None):
build_i18n_pot(bld,srcdir,dir,name,sources,copyright_holder)
build_i18n_po(bld,srcdir,dir,name,sources,copyright_holder)
build_i18n_mo(bld,srcdir,dir,name,sources,copyright_holder)
def cd_to_build_dir(ctx,appname):
orig_dir=os.path.abspath(os.curdir)
top_level=(len(ctx.stack_path)>1)
if top_level:
os.chdir(os.path.join('build',appname))
else:
os.chdir('build')
Logs.pprint('GREEN',"Waf: Entering directory `%s'"%os.path.abspath(os.getcwd()))
def cd_to_orig_dir(ctx,child):
if child:
os.chdir(os.path.join('..','..'))
else:
os.chdir('..')
def pre_test(ctx,appname,dirs=['src']):
diropts=''
for i in dirs:
diropts+=' -d '+i
cd_to_build_dir(ctx,appname)
clear_log=open('lcov-clear.log','w')
try:
try:
subprocess.call(('lcov %s -z'%diropts).split(),stdout=clear_log,stderr=clear_log)
except:
Logs.warn('Failed to run lcov, no coverage report will be generated')
finally:
clear_log.close()
def post_test(ctx,appname,dirs=['src'],remove=['*boost*','c++*']):
diropts=''
for i in dirs:
diropts+=' -d '+i
coverage_log=open('lcov-coverage.log','w')
coverage_lcov=open('coverage.lcov','w')
coverage_stripped_lcov=open('coverage-stripped.lcov','w')
try:
try:
base='.'
if g_is_child:
base='..'
subprocess.call(('lcov -c %s -b %s'%(diropts,base)).split(),stdout=coverage_lcov,stderr=coverage_log)
subprocess.call(['lcov','--remove','coverage.lcov']+remove,stdout=coverage_stripped_lcov,stderr=coverage_log)
if not os.path.isdir('coverage'):
os.makedirs('coverage')
subprocess.call('genhtml -o coverage coverage-stripped.lcov'.split(),stdout=coverage_log,stderr=coverage_log)
except:
Logs.warn('Failed to run lcov, no coverage report will be generated')
finally:
coverage_stripped_lcov.close()
coverage_lcov.close()
coverage_log.close()
print('')
Logs.pprint('GREEN',"Waf: Leaving directory `%s'"%os.path.abspath(os.getcwd()))
top_level=(len(ctx.stack_path)>1)
if top_level:
cd_to_orig_dir(ctx,top_level)
print('')
Logs.pprint('BOLD','Coverage:',sep='')
print('<file://%s>\n\n'%os.path.abspath('coverage/index.html'))
def run_test(ctx,appname,test,desired_status=0,dirs=['src'],name='',header=False):
s=test
if type(test)==type([]):
s=' '.join(i)
if header:
Logs.pprint('BOLD','** Test',sep='')
Logs.pprint('NORMAL','%s'%s)
cmd=test
if Options.options.grind:
cmd='valgrind '+test
if subprocess.call(cmd,shell=True)==desired_status:
Logs.pprint('GREEN','** Pass %s'%name)
return True
else:
Logs.pprint('RED','** FAIL %s'%name)
return False
def run_tests(ctx,appname,tests,desired_status=0,dirs=['src'],name='*',headers=False):
failures=0
diropts=''
for i in dirs:
diropts+=' -d '+i
for i in tests:
if not run_test(ctx,appname,i,desired_status,dirs,i,headers):
failures+=1
print('')
if failures==0:
Logs.pprint('GREEN','** Pass: All %s.%s tests passed'%(appname,name))
else:
Logs.pprint('RED','** FAIL: %d %s.%s tests failed'%(failures,appname,name))
def run_ldconfig(ctx):
if(ctx.cmd=='install'and not ctx.env['RAN_LDCONFIG']and ctx.env['LIBDIR']and not'DESTDIR'in os.environ and not Options.options.destdir):
try:
Logs.info("Waf: Running `/sbin/ldconfig %s'"%ctx.env['LIBDIR'])
subprocess.call(['/sbin/ldconfig',ctx.env['LIBDIR']])
ctx.env['RAN_LDCONFIG']=True
except:
pass
def write_news(name,in_files,out_file,top_entries=None,extra_entries=None):
import rdflib
import textwrap
from time import strftime,strptime
doap=rdflib.Namespace('http://usefulinc.com/ns/doap#')
dcs=rdflib.Namespace('http://ontologi.es/doap-changeset#')
rdfs=rdflib.Namespace('http://www.w3.org/2000/01/rdf-schema#')
foaf=rdflib.Namespace('http://xmlns.com/foaf/0.1/')
rdf=rdflib.Namespace('http://www.w3.org/1999/02/22-rdf-syntax-ns#')
m=rdflib.ConjunctiveGraph()
try:
for i in in_files:
m.parse(i,format='n3')
except:
Logs.warn('Error parsing data, unable to generate NEWS')
return
proj=m.value(None,rdf.type,doap.Project)
for f in m.triples([proj,rdfs.seeAlso,None]):
if f[2].endswith('.ttl'):
m.parse(f[2],format='n3')
entries={}
for r in m.triples([proj,doap.release,None]):
release=r[2]
revision=m.value(release,doap.revision,None)
date=m.value(release,doap.created,None)
blamee=m.value(release,dcs.blame,None)
changeset=m.value(release,dcs.changeset,None)
dist=m.value(release,doap['file-release'],None)
if revision and date and blamee and changeset:
entry='%s (%s) stable;\n'%(name,revision)
for i in m.triples([changeset,dcs.item,None]):
item=textwrap.wrap(m.value(i[2],rdfs.label,None),width=79)
entry+='\n * '+'\n '.join(item)
if dist and top_entries is not None:
if not str(dist)in top_entries:
top_entries[str(dist)]=[]
top_entries[str(dist)]+=['%s: %s'%(name,'\n '.join(item))]
if extra_entries:
for i in extra_entries[str(dist)]:
entry+='\n * '+i
entry+='\n\n --'
blamee_name=m.value(blamee,foaf.name,None)
blamee_mbox=m.value(blamee,foaf.mbox,None)
if blamee_name and blamee_mbox:
entry+=' %s <%s>'%(blamee_name,blamee_mbox.replace('mailto:',''))
entry+=' %s\n\n'%(strftime('%a, %d %b %Y %H:%M:%S +0000',strptime(date,'%Y-%m-%d')))
entries[(date,revision)]=entry
else:
Logs.warn('Ignored incomplete %s release description'%name)
if len(entries)>0:
news=open(out_file,'w')
for e in sorted(entries.keys(),reverse=True):
news.write(entries[e])
news.close()
|
gpl-2.0
|
sneaker-rohit/PI2-ns-3
|
src/lte/doc/source/conf.py
|
93
|
7620
|
# -*- coding: utf-8 -*-
#
# ns-3 documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 14 09:00:39 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'lte'
# General information about the project.
project = u'LENA'
copyright = u'CTTC'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'v8'
# The full version, including alpha/beta/rc tags.
release = 'v8'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
#htmlhelp_basename = 'ns-3doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
# ('lte-testing', 'lte-doc-testing.tex', u'LTE Simulator Testing Documentation', u'Centre Tecnologic de Telecomunicacions de Catalunya (CTTC)', 'manual'),
# ('lte-design', 'lte-doc-design.tex', u'LTE Simulator Design Documentation', u'Centre Tecnologic de Telecomunicacions de Catalunya (CTTC)', 'manual'),
# ('lte-user', 'lte-doc-user.tex', u'LTE Simulator User Documentation', u'Centre Tecnologic de Telecomunicacions de Catalunya (CTTC)', 'manual'),
('lte', 'lena-lte-module-doc.tex', u'The LENA ns-3 LTE Module Documentation', u'Centre Tecnològic de Telecomunicacions de Catalunya (CTTC)', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# add page breaks in the pdf. Level 1 is for top-level sections, level 2 for subsections, and so on.
pdf_break_level = 4
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ns-3-model-library', u'ns-3 Model Library',
[u'ns-3 project'], 1)
]
|
gpl-2.0
|
hshindo/POS-Tagging-benchmark
|
Theano/layer.py
|
1
|
1138
|
__author__ = 'hiroki'
import theano
import theano.tensor as T
import numpy as np
from nn_utils import sigmoid
class Layer(object):
def __init__(self, rand, input=None, n_input=784, n_output=10, activation=None, W=None, b=None):
self.input = input
if W is None:
W_values = np.asarray(
rand.uniform(low=-np.sqrt(6.0 / (n_input + n_output)),
high=np.sqrt(6.0 / (n_input + n_output)),
size=(n_input, n_output)),
dtype=theano.config.floatX)
if activation == sigmoid:
W_values *= 4.0
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = np.zeros((n_output,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
linear_output = T.dot(input, self.W) + self.b
if activation is None:
self.output = linear_output
else:
self.output = activation(linear_output)
self.params = [self.W, self.b]
|
mit
|
yize/grunt-tps
|
tasks/lib/python/Lib/python2.7/distutils/command/build.py
|
250
|
5437
|
"""distutils.command.build
Implements the Distutils 'build' command."""
__revision__ = "$Id$"
import sys, os
from distutils.util import get_platform
from distutils.core import Command
from distutils.errors import DistutilsOptionError
def show_compilers():
from distutils.ccompiler import show_compilers
show_compilers()
class build(Command):
description = "build everything needed to install"
user_options = [
('build-base=', 'b',
"base directory for build library"),
('build-purelib=', None,
"build directory for platform-neutral distributions"),
('build-platlib=', None,
"build directory for platform-specific distributions"),
('build-lib=', None,
"build directory for all distribution (defaults to either " +
"build-purelib or build-platlib"),
('build-scripts=', None,
"build directory for scripts"),
('build-temp=', 't',
"temporary build directory"),
('plat-name=', 'p',
"platform name to build for, if supported "
"(default: %s)" % get_platform()),
('compiler=', 'c',
"specify the compiler type"),
('debug', 'g',
"compile extensions and libraries with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('executable=', 'e',
"specify final destination interpreter path (build.py)"),
]
boolean_options = ['debug', 'force']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options(self):
self.build_base = 'build'
# these are decided only after 'build_base' has its final value
# (unless overridden by the user or client)
self.build_purelib = None
self.build_platlib = None
self.build_lib = None
self.build_temp = None
self.build_scripts = None
self.compiler = None
self.plat_name = None
self.debug = None
self.force = 0
self.executable = None
def finalize_options(self):
if self.plat_name is None:
self.plat_name = get_platform()
else:
# plat-name only supported for windows (other platforms are
# supported via ./configure flags, if at all). Avoid misleading
# other platforms.
if os.name != 'nt':
raise DistutilsOptionError(
"--plat-name only supported on Windows (try "
"using './configure --help' on your platform)")
plat_specifier = ".%s-%s" % (self.plat_name, sys.version[0:3])
# Make it so Python 2.x and Python 2.x with --with-pydebug don't
# share the same build directories. Doing so confuses the build
# process for C modules
if hasattr(sys, 'gettotalrefcount'):
plat_specifier += '-pydebug'
# 'build_purelib' and 'build_platlib' just default to 'lib' and
# 'lib.<plat>' under the base build directory. We only use one of
# them for a given distribution, though --
if self.build_purelib is None:
self.build_purelib = os.path.join(self.build_base, 'lib')
if self.build_platlib is None:
self.build_platlib = os.path.join(self.build_base,
'lib' + plat_specifier)
# 'build_lib' is the actual directory that we will use for this
# particular module distribution -- if user didn't supply it, pick
# one of 'build_purelib' or 'build_platlib'.
if self.build_lib is None:
if self.distribution.ext_modules:
self.build_lib = self.build_platlib
else:
self.build_lib = self.build_purelib
# 'build_temp' -- temporary directory for compiler turds,
# "build/temp.<plat>"
if self.build_temp is None:
self.build_temp = os.path.join(self.build_base,
'temp' + plat_specifier)
if self.build_scripts is None:
self.build_scripts = os.path.join(self.build_base,
'scripts-' + sys.version[0:3])
if self.executable is None:
self.executable = os.path.normpath(sys.executable)
def run(self):
# Run all relevant sub-commands. This will be some subset of:
# - build_py - pure Python modules
# - build_clib - standalone C libraries
# - build_ext - Python extensions
# - build_scripts - (Python) scripts
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
# -- Predicates for the sub-command list ---------------------------
def has_pure_modules (self):
return self.distribution.has_pure_modules()
def has_c_libraries (self):
return self.distribution.has_c_libraries()
def has_ext_modules (self):
return self.distribution.has_ext_modules()
def has_scripts (self):
return self.distribution.has_scripts()
sub_commands = [('build_py', has_pure_modules),
('build_clib', has_c_libraries),
('build_ext', has_ext_modules),
('build_scripts', has_scripts),
]
|
mit
|
sinhrks/numpy
|
numpy/polynomial/__init__.py
|
123
|
1119
|
"""
A sub-package for efficiently dealing with polynomials.
Within the documentation for this sub-package, a "finite power series,"
i.e., a polynomial (also referred to simply as a "series") is represented
by a 1-D numpy array of the polynomial's coefficients, ordered from lowest
order term to highest. For example, array([1,2,3]) represents
``P_0 + 2*P_1 + 3*P_2``, where P_n is the n-th order basis polynomial
applicable to the specific module in question, e.g., `polynomial` (which
"wraps" the "standard" basis) or `chebyshev`. For optimal performance,
all operations on polynomials, including evaluation at an argument, are
implemented as operations on the coefficients. Additional (module-specific)
information can be found in the docstring for the module of interest.
"""
from __future__ import division, absolute_import, print_function
from .polynomial import Polynomial
from .chebyshev import Chebyshev
from .legendre import Legendre
from .hermite import Hermite
from .hermite_e import HermiteE
from .laguerre import Laguerre
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
|
bsd-3-clause
|
kursitet/edx-platform
|
lms/djangoapps/course_api/tests/test_permissions.py
|
61
|
1738
|
"""
Test authorization functions
"""
from django.contrib.auth.models import AnonymousUser
from django.test import TestCase
from .mixins import CourseApiFactoryMixin
from ..permissions import can_view_courses_for_username
class ViewCoursesForUsernameTestCase(CourseApiFactoryMixin, TestCase):
"""
Verify functionality of view_courses_for_username.
Any user should be able to view their own courses, and staff users
should be able to view anyone's courses.
"""
@classmethod
def setUpClass(cls):
super(ViewCoursesForUsernameTestCase, cls).setUpClass()
cls.staff_user = cls.create_user('staff', is_staff=True)
cls.honor_user = cls.create_user('honor', is_staff=False)
cls.anonymous_user = AnonymousUser()
def test_for_staff(self):
self.assertTrue(can_view_courses_for_username(self.staff_user, self.staff_user.username))
def test_for_honor(self):
self.assertTrue(can_view_courses_for_username(self.honor_user, self.honor_user.username))
def test_for_staff_as_honor(self):
self.assertTrue(can_view_courses_for_username(self.staff_user, self.honor_user.username))
def test_for_honor_as_staff(self):
self.assertFalse(can_view_courses_for_username(self.honor_user, self.staff_user.username))
def test_for_none_as_staff(self):
with self.assertRaises(TypeError):
can_view_courses_for_username(self.staff_user, None)
def test_for_anonymous(self):
self.assertTrue(can_view_courses_for_username(self.anonymous_user, self.anonymous_user.username))
def test_for_anonymous_as_honor(self):
self.assertFalse(can_view_courses_for_username(self.anonymous_user, self.honor_user.username))
|
agpl-3.0
|
mlalic/servo
|
tests/wpt/web-platform-tests/html/semantics/embedded-content/media-elements/track/track-element/cors/support/cors-tester.py
|
238
|
1454
|
from wptserve.handlers import HTTPException
import urllib
def main(request, response):
if request.method != "GET":
raise HTTPException(400, message="Method was not GET")
if not "id" in request.GET:
raise HTTPException(400, message="No id")
id = request.GET['id']
if "read" in request.GET:
data = request.server.stash.take(id)
if data is None:
response.set_error(404, "Tried to read data not yet set")
return
return [("Content-Type", "text/plain")], data
elif "cleanup" in request.GET:
request.server.stash.take(id)
return "OK"
elif "delete-cookie" in request.GET:
response.delete_cookie(id)
return [("Content-Type", "text/plain")], "OK"
if "origin" in request.GET:
response.headers.set('Access-Control-Allow-Origin', request.GET['origin'])
response.headers.set('Access-Control-Allow-Credentials', 'true')
cors = request.headers.get("origin", "no")
cookie = request.cookies.first(id, "no")
line = 'cors = ' + cors + ' | cookie = ' + cookie.value;
data = request.server.stash.take(id)
if data is not None:
line = data + "\n" + line
request.server.stash.put(id, line)
if "redirect" in request.GET:
response.status = 302
response.headers.set('Location', request.GET['redirect'])
else:
return """WEBVTT
00:00:00.000 --> 00:00:10.000
Test"""
|
mpl-2.0
|
swjtuacmer/Ranker
|
Ranker/venv/lib/python2.7/site-packages/wheel/test/test_tagopt.py
|
326
|
5927
|
"""
Tests for the bdist_wheel tag options (--python-tag, --universal, and
--plat-name)
"""
import sys
import shutil
import pytest
import py.path
import tempfile
import subprocess
SETUP_PY = """\
from setuptools import setup, Extension
setup(
name="Test",
version="1.0",
author_email="[email protected]",
py_modules=["test"],
{ext_modules}
)
"""
EXT_MODULES = "ext_modules=[Extension('_test', sources=['test.c'])],"
@pytest.fixture
def temp_pkg(request, ext=False):
tempdir = tempfile.mkdtemp()
def fin():
shutil.rmtree(tempdir)
request.addfinalizer(fin)
temppath = py.path.local(tempdir)
temppath.join('test.py').write('print("Hello, world")')
if ext:
temppath.join('test.c').write('#include <stdio.h>')
setup_py = SETUP_PY.format(ext_modules=EXT_MODULES)
else:
setup_py = SETUP_PY.format(ext_modules='')
temppath.join('setup.py').write(setup_py)
return temppath
@pytest.fixture
def temp_ext_pkg(request):
return temp_pkg(request, ext=True)
def test_default_tag(temp_pkg):
subprocess.check_call([sys.executable, 'setup.py', 'bdist_wheel'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename == 'Test-1.0-py%s-none-any.whl' % (sys.version[0],)
assert wheels[0].ext == '.whl'
def test_explicit_tag(temp_pkg):
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel', '--python-tag=py32'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.startswith('Test-1.0-py32-')
assert wheels[0].ext == '.whl'
def test_universal_tag(temp_pkg):
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel', '--universal'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.startswith('Test-1.0-py2.py3-')
assert wheels[0].ext == '.whl'
def test_universal_beats_explicit_tag(temp_pkg):
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel', '--universal', '--python-tag=py32'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.startswith('Test-1.0-py2.py3-')
assert wheels[0].ext == '.whl'
def test_universal_in_setup_cfg(temp_pkg):
temp_pkg.join('setup.cfg').write('[bdist_wheel]\nuniversal=1')
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.startswith('Test-1.0-py2.py3-')
assert wheels[0].ext == '.whl'
def test_pythontag_in_setup_cfg(temp_pkg):
temp_pkg.join('setup.cfg').write('[bdist_wheel]\npython_tag=py32')
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.startswith('Test-1.0-py32-')
assert wheels[0].ext == '.whl'
def test_legacy_wheel_section_in_setup_cfg(temp_pkg):
temp_pkg.join('setup.cfg').write('[wheel]\nuniversal=1')
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.startswith('Test-1.0-py2.py3-')
assert wheels[0].ext == '.whl'
def test_plat_name_purepy(temp_pkg):
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel', '--plat-name=testplat.pure'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.endswith('-testplat_pure.whl')
assert wheels[0].ext == '.whl'
def test_plat_name_ext(temp_ext_pkg):
try:
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel', '--plat-name=testplat.arch'],
cwd=str(temp_ext_pkg))
except subprocess.CalledProcessError:
pytest.skip("Cannot compile C Extensions")
dist_dir = temp_ext_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.endswith('-testplat_arch.whl')
assert wheels[0].ext == '.whl'
def test_plat_name_purepy_in_setupcfg(temp_pkg):
temp_pkg.join('setup.cfg').write('[bdist_wheel]\nplat_name=testplat.pure')
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel'],
cwd=str(temp_pkg))
dist_dir = temp_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.endswith('-testplat_pure.whl')
assert wheels[0].ext == '.whl'
def test_plat_name_ext_in_setupcfg(temp_ext_pkg):
temp_ext_pkg.join('setup.cfg').write('[bdist_wheel]\nplat_name=testplat.arch')
try:
subprocess.check_call(
[sys.executable, 'setup.py', 'bdist_wheel'],
cwd=str(temp_ext_pkg))
except subprocess.CalledProcessError:
pytest.skip("Cannot compile C Extensions")
dist_dir = temp_ext_pkg.join('dist')
assert dist_dir.check(dir=1)
wheels = dist_dir.listdir()
assert len(wheels) == 1
assert wheels[0].basename.endswith('-testplat_arch.whl')
assert wheels[0].ext == '.whl'
|
mit
|
cb1234/pynet-test
|
pynet/learnpy_ecourse/class7/ex2_ospf_parsing.py
|
4
|
1353
|
#!/usr/bin/env python
'''
Open the ./OSPF_DATA/ospf_single_interface.txt and extract the interface, IP
address, area, type, cost, hello timer, and dead timer. Use regular expressions
to accomplish your extraction.
Your output should look similar to the following:
Int: GigabitEthernet0/1
IP: 172.16.13.150/29
Area: 303953
Type: BROADCAST
Cost: 1
Hello: 10
Dead: 40
'''
import re
f = open('./OSPF_DATA/ospf_single_interface.txt')
ospf_dict = {}
for line in f:
intf = re.search(r"^(.+) is up, line protocol is up", line)
if intf:
ospf_dict['Int'] = intf.group(1)
ip_addr = re.search(r"Internet Address (.+), Area (.+), Attached", line)
if ip_addr:
ospf_dict['IP'] = ip_addr.group(1)
ospf_dict['Area'] = ip_addr.group(2)
network_type = re.search(r", Network Type (.+), Cost: (.+)", line)
if network_type:
ospf_dict['Type'] = network_type.group(1)
ospf_dict['Cost'] = network_type.group(2)
ospf_timers = re.search(r"Timer intervals configured, Hello (.+), Dead (.+?),", line)
if ospf_timers:
ospf_dict['Hello'] = ospf_timers.group(1)
ospf_dict['Dead'] = ospf_timers.group(2)
# Print output
print
field_order = ('Int', 'IP', 'Area', 'Type', 'Cost', 'Hello', 'Dead')
for k in field_order:
print "%10s: %-20s" % (k, ospf_dict[k])
print
|
apache-2.0
|
Guts/Metadator
|
test/test_odf_genexample.py
|
1
|
12252
|
# -*- coding: UTF-8 -*-
#!/usr/bin/env python
from __future__ import unicode_literals
# odfpy_gen_example.py
# http://mashupguide.net/1.0/html/ch17s04.xhtml
"""
Description: This program used odfpy to generate a simple ODF text document
odfpy: http://opendocumentfellowship.com/projects/odfpy
documentation for odfpy: http://opendocumentfellowship.com/files/api-for-
odfpy.odt
"""
from odf.opendocument import OpenDocumentText
from odf.style import Style, TextProperties, ParagraphProperties, ListLevelProperties, FontFace, TableCellProperties
from odf.text import P, H, A, S, Section, List, ListItem, ListStyle, ListLevelStyleBullet, ListLevelStyleNumber, ListLevelStyleBullet, Span
from odf.text import Note, NoteBody, NoteCitation
from odf.office import FontFaceDecls
from odf.table import Table, TableColumn, TableRow, TableCell
from odf.draw import Frame, Image
# fname is the path for the output file
fname= r'test_odfpy_gen_example.odt';
#fname='D:\Document\PersonalInfoRemixBook\examples\ch17\odfpy_gen_example.odt'
# instantiate an ODF text document (odt)
textdoc = OpenDocumentText()
## Styles
# styles
"""
<style:style style:name="Standard" style:family="paragraph" style:class="text"/>
<style:style style:name="Text_20_body" style:display-name="Text body"
style:family="paragraph"
style:parent-style-name="Standard" style:class="text">
<style:paragraph-properties fo:margin-top="0in" fo:margin-bottom="0.0835in"/>
</style:style>
"""
s = textdoc.styles
StandardStyle = Style(name="Standard", family="paragraph")
##StandardStyle.addAttribute('class','text')
s.addElement(StandardStyle)
TextBodyStyle = Style(name="Text_20_body",family="paragraph", parentstylename='Standard', displayname="Text body")
##TextBodyStyle.addAttribute('class','text')
TextBodyStyle.addElement(ParagraphProperties(margintop="0in", marginbottom="0.0835in"))
s.addElement(TextBodyStyle)
# font declarations
"""
<office:font-face-decls>
<style:font-face style:name="Arial" svg:font-family="Arial"
style:font-family-generic="swiss"
style:font-pitch="variable"/>
</office:font-face-decls>
"""
textdoc.fontfacedecls.addElement((FontFace(name="Arial",fontfamily="Arial",
fontfamilygeneric="swiss",fontpitch="variable")))
# Automatic Style
# P1
"""
<style:style style:name="P1" style:family="paragraph"
style:parent-style-name="Standard"
style:list-style-name="L1"/>
"""
P1style = Style(name="P1", family="paragraph", parentstylename="Standard", liststylename="L1")
textdoc.automaticstyles.addElement(P1style)
# L1
"""
<text:list-style style:name="L1">
<text:list-level-style-bullet text:level="1"
text:style-name="Numbering_20_Symbols"
style:num-suffix="." text:bullet-char="•">
<style:list-level-properties text:space-before="0.25in"
text:min-label-width="0.25in"/>
<style:text-properties style:font-name="StarSymbol"/>
</text:list-level-style-bullet>
</text:list-style>
"""
L1style=ListStyle(name="L1")
# u'\u2022' is the bullet character (http://www.unicode.org/charts/PDF/U2000.pdf)
bullet1 = ListLevelStyleBullet(level="1", stylename="Numbering_20_Symbols", numsuffix=".", bulletchar=u'\u2022')
L1prop1 = ListLevelProperties(spacebefore="0.25in", minlabelwidth="0.25in")
bullet1.addElement(L1prop1)
L1style.addElement(bullet1)
textdoc.automaticstyles.addElement(L1style)
# P6
"""
<style:style style:name="P6" style:family="paragraph"
style:parent-style-name="Standard"
style:list-style-name="L5"/>
"""
P6style = Style(name="P6", family="paragraph", parentstylename="Standard", liststylename="L5")
textdoc.automaticstyles.addElement(P6style)
# L5
"""
<text:list-style style:name="L5">
<text:list-level-style-number text:level="1"
text:style-name="Numbering_20_Symbols"
style:num-suffix="." style:num-format="1">
<style:list-level-properties text:space-before="0.25in"
text:min-label-width="0.25in"/>
</text:list-level-style-number>
</text:list-style>
"""
L5style=ListStyle(name="L5")
numstyle1 = ListLevelStyleNumber(level="1", stylename="Numbering_20_Symbols", numsuffix=".", numformat='1')
L5prop1 = ListLevelProperties(spacebefore="0.25in", minlabelwidth="0.25in")
numstyle1.addElement(L5prop1)
L5style.addElement(numstyle1)
textdoc.automaticstyles.addElement(L5style)
# T1
"""
<style:style style:name="T1" style:family="text">
<style:text-properties fo:font-style="italic" style:font-style-asian="italic"
style:font-style-complex="italic"/>
</style:style>
"""
T1style = Style(name="T1", family="text")
T1style.addElement(TextProperties(fontstyle="italic",fontstyleasian="italic", fontstylecomplex="italic"))
textdoc.automaticstyles.addElement(T1style)
# T2
"""
<style:style style:name="T2" style:family="text">
<style:text-properties fo:font-weight="bold" style:font-weight-asian="bold"
style:font-weight-complex="bold"/>
</style:style>
"""
T2style = Style(name="T2", family="text")
T2style.addElement(TextProperties(fontweight="bold",fontweightasian="bold", fontweightcomplex="bold"))
textdoc.automaticstyles.addElement(T2style)
# T5
"""
<style:style style:name="T5" style:family="text">
<style:text-properties fo:color="#ff0000" style:font-name="Arial"/>
</style:style>
"""
T5style = Style(name="T5", family="text")
T5style.addElement(TextProperties(color="#ff0000",fontname="Arial"))
textdoc.automaticstyles.addElement(T5style)
# table
"""
<style:style style:name="Tableau1.A1" style:family="table-cell">
<style:table-cell-properties fo:padding="0.097cm" fo:border="0.05pt solid #000000"/>
"""
TAB_style = Style(name="Table", family="table-cell")
TAB_style.addElement(TableCellProperties(border="0.05pt solid #000000"))
textdoc.automaticstyles.addElement(TAB_style)
# now construct what goes into <office:text>
h=H(outlinelevel=1, text='Purpose (Heading 1)')
textdoc.text.addElement(h)
p = P(text="The following sections illustrate various possibilities in ODF Text",stylename='Text_20_body')
textdoc.text.addElement(p)
textdoc.text.addElement(H(outlinelevel=2,text='A simple series of paragraphs (Heading 2)'))
textdoc.text.addElement(P(text="This section contains a series of paragraphs.", stylename='Text_20_body'))
textdoc.text.addElement(P(text="This is a second paragraph.",stylename='Text_20_body'))
textdoc.text.addElement(P(text="And a third paragraph.", stylename='Text_20_body'))
textdoc.text.addElement(H(outlinelevel=2,text='A section with lists (Heading 2)'))
textdoc.text.addElement(P(text="Elements to illustrate:"))
# add the first list (unordered list)
textList = List(stylename="L1")
item = ListItem()
item.addElement(P(text='hyperlinks', stylename="P1"))
textList.addElement(item)
item = ListItem()
item.addElement(P(text='italics and bo ld text', stylename="P1"))
textList.addElement(item)
item = ListItem()
item.addElement(P(text='lists (ordered and unordered)', stylename="P1"))
textList.addElement(item)
textdoc.text.addElement(textList)
# add the second (ordered) list
textdoc.text.addElement(P(text="How to figure out ODF"))
textList = List(stylename="L5")
#item = ListItem(startvalue=P(text='item 1'))
item = ListItem()
item.addElement(P(text='work out the content.xml tags', stylename="P5"))
textList.addElement(item)
item = ListItem()
item.addElement(P(text='work styles into the mix', stylename="P5"))
textList.addElement(item)
item = ListItem()
item.addElement(P(text='figure out how to apply what we learned to spreadsheets and presentations', stylename="P5"))
textList.addElement(item)
textdoc.text.addElement(textList)
# A paragraph with bold, italics, font change, and hyperlinks
"""
<text:p>The <text:span text:style-name="T1">URL</text:span> for <text:span
text:style-name="T5">Flickr</text:span> is <text:a xlink:type="simple"
xlink:href="http://www.flickr.com/"
>http://www.flickr.com</text:a>. <text:s/>The <text:span
text:style-name="T2"
>API page</text:span> is <text:a xlink:type="simple"
xlink:href="http://www.flickr.com/services/api/"
>http://www.flickr.com/services/api/</text:a></text:p>
"""
p = P(text='The ')
# italicized URL
s = Span(text='URL', stylename='T1')
p.addElement(s)
p.addText(' for ')
# Flickr in red and Arial font
p.addElement(Span(text='Flickr',stylename='T5'))
p.addText(' is ')
# link
link = A(type="simple",href="http://www.flickr.com", text="http://www.flickr.com")
p.addElement(link)
p.addText('. The ')
# API page in bold
s = Span(text='API page', stylename='T2')
p.addElement(s)
p.addText(' is ')
link = A(type="simple",href="http://www.flickr.com/services/api",text="http://www.flickr.com/services/api")
p.addElement(link)
textdoc.text.addElement(p)
# add the table
"""
<table:table-column table:number-columns-repeated="3"/>
"""
section = Section(name="section1")
textdoc.text.addElement(section)
line = A(type="simple", href="", text="\<hr>")
p=P()
p.addElement(line)
textdoc.text.addElement(p)
textdoc.text.addElement(H(outlinelevel=1,text='A Table (Heading 1)'))
table = Table(name="Table 1")
table.addElement(TableColumn(numbercolumnsrepeated="3"))
# first row
tr = TableRow()
table.addElement(tr)
tc = TableCell(valuetype="string", stylename="Table")
tc.addElement(P(text='Website'))
tr.addElement(tc)
tc = TableCell(valuetype="string")
tc.addElement(P(text='Description'))
tr.addElement(tc)
tc = TableCell(valuetype="string")
tc.addElement(P(text='URL'))
tr.addElement(tc)
# second row
tr = TableRow()
table.addElement(tr)
tc = TableCell(valuetype="string")
tc.addElement(P(text='Flickr', stylename="Table"))
tr.addElement(tc)
tc = TableCell(valuetype="string")
tc.addElement(P(text='A social photo sharing site'))
tr.addElement(tc)
tc = TableCell(valuetype="string")
link = A(type="simple",href="http://www.flickr.com", text="http://www.flickr.com")
p = P()
p.addElement(link)
tc.addElement(p)
tr.addElement(tc)
# third row
tr = TableRow()
table.addElement(tr)
tc = TableCell(valuetype="string")
tc.addElement(P(text='Google Maps'))
tr.addElement(tc)
tc = TableCell(valuetype="string")
tc.addElement(P(text='An online map'))
tr.addElement(tc)
tc = TableCell(valuetype="string")
link = A(type="simple",href="http://maps.google.com", text="http://maps.google.com")
p = P()
p.addElement(link)
tc.addElement(p)
tr.addElement(tc)
textdoc.text.addElement(table)
# paragraph with footnote
"""
<text:h text:outline-level="1">Footnotes (Heading 1)</text:h>
<text:p>This sentence has an accompanying footnote.<text:note text:id="ftn0"
text:note-class="footnote">
<text:note-citation>1</text:note-citation>
<text:note-body>
<text:p text:style-name="Footnote">You are reading a footnote.</text:p>
</text:note-body>
</text:note>
<text:s text:c="2"/>Where does the text after a footnote go?</text:p>
"""
textdoc.text.addElement(H(outlinelevel=1,text='Footnotes (Heading 1)'))
p = P()
textdoc.text.addElement(p)
p.addText("This sentence has an accompanying footnote.")
note = Note(id="ftn0", noteclass="footnote")
p.addElement(note)
note.addElement(NoteCitation(text='1'))
notebody = NoteBody()
note.addElement(notebody)
notebody.addElement(P(stylename="Footnote", text="You are reading a footnote."))
p.addElement(S(c=2))
p.addText("Where does the text after a footnote go?")
# Insert the photo
"""
<text:h text:outline-level="1">An Image</text:h>
<text:p>
<draw:frame draw:name="graphics1" text:anchor-type="paragraph"
svg:width="5in"
svg:height="6.6665in" draw:z-index="0">
<draw:image xlink:href="Pictures/campanile_fog.jpg" xlink:type="simple"
xlink:show="embed"
xlink:actuate="onLoad"/>
</draw:frame>
</text:p>
"""
textdoc.text.addElement(H(outlinelevel=1,text='An Image'))
p = P()
textdoc.text.addElement(p)
# add the image
# img_path is the local path of the image to include
img_path = r'..\data\img\metadator.png';
#img_path = 'D:\Document\PersonalInfoRemixBook\examples\ch17\campanile_fog.jpg'
href = textdoc.addPicture(img_path)
f = Frame(name="graphics1", anchortype="paragraph", width="5in", height="6.6665in",
zindex="0")
p.addElement(f)
img = Image(href=href, type="simple", show="embed", actuate="onLoad")
f.addElement(img)
# save the document
textdoc.save(fname)
|
gpl-3.0
|
Dhivyap/ansible
|
lib/ansible/modules/cloud/google/gcp_mlengine_model_info.py
|
3
|
6467
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_mlengine_model_info
description:
- Gather info for GCP Model
short_description: Gather info for GCP Model
version_added: '2.9'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- for authentication, you can set service_account_file using the c(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the c(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: get info on a model
gcp_mlengine_model_info:
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
resources:
description: List of resources
returned: always
type: complex
contains:
name:
description:
- The name specified for the model.
returned: success
type: str
description:
description:
- The description specified for the model when it was created.
returned: success
type: str
defaultVersion:
description:
- The default version of the model. This version will be used to handle prediction
requests that do not specify a version.
returned: success
type: complex
contains:
name:
description:
- The name specified for the version when it was created.
returned: success
type: str
regions:
description:
- The list of regions where the model is going to be deployed.
- Currently only one region per model is supported .
returned: success
type: list
onlinePredictionLogging:
description:
- If true, online prediction access logs are sent to StackDriver Logging.
returned: success
type: bool
onlinePredictionConsoleLogging:
description:
- If true, online prediction nodes send stderr and stdout streams to Stackdriver
Logging.
returned: success
type: bool
labels:
description:
- One or more labels that you can add, to organize your models.
returned: success
type: dict
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict())
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform']
return_value = {'resources': fetch_list(module, collection(module))}
module.exit_json(**return_value)
def collection(module):
return "https://ml.googleapis.com/v1/projects/{project}/models".format(**module.params)
def fetch_list(module, link):
auth = GcpSession(module, 'mlengine')
return auth.list(link, return_if_object, array_name='models')
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
|
gpl-3.0
|
writefaruq/lionface-app
|
django/core/serializers/json.py
|
9
|
2118
|
"""
Serialize data to/from JSON
"""
import datetime
import decimal
from StringIO import StringIO
from django.core.serializers.python import Serializer as PythonSerializer
from django.core.serializers.python import Deserializer as PythonDeserializer
from django.utils import datetime_safe
from django.utils import simplejson
class Serializer(PythonSerializer):
"""
Convert a queryset to JSON.
"""
internal_use_only = False
def end_serialization(self):
self.options.pop('stream', None)
self.options.pop('fields', None)
self.options.pop('use_natural_keys', None)
simplejson.dump(self.objects, self.stream, cls=DjangoJSONEncoder, **self.options)
def getvalue(self):
if callable(getattr(self.stream, 'getvalue', None)):
return self.stream.getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of JSON data.
"""
if isinstance(stream_or_string, basestring):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
for obj in PythonDeserializer(simplejson.load(stream), **options):
yield obj
class DjangoJSONEncoder(simplejson.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time and decimal types.
"""
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S"
def default(self, o):
if isinstance(o, datetime.datetime):
d = datetime_safe.new_datetime(o)
return d.strftime("%s %s" % (self.DATE_FORMAT, self.TIME_FORMAT))
elif isinstance(o, datetime.date):
d = datetime_safe.new_date(o)
return d.strftime(self.DATE_FORMAT)
elif isinstance(o, datetime.time):
return o.strftime(self.TIME_FORMAT)
elif isinstance(o, decimal.Decimal):
return str(o)
else:
return super(DjangoJSONEncoder, self).default(o)
# Older, deprecated class name (for backwards compatibility purposes).
DateTimeAwareJSONEncoder = DjangoJSONEncoder
|
bsd-3-clause
|
jonashaag/ansible
|
lib/ansible/cli/pull.py
|
81
|
9626
|
# (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
########################################################
import datetime
import os
import platform
import random
import shutil
import socket
import sys
import time
from ansible.errors import AnsibleOptionsError
from ansible.cli import CLI
from ansible.plugins import module_loader
from ansible.utils.cmd_functions import run_cmd
########################################################
class PullCLI(CLI):
''' code behind ansible ad-hoc cli'''
DEFAULT_REPO_TYPE = 'git'
DEFAULT_PLAYBOOK = 'local.yml'
PLAYBOOK_ERRORS = {
1: 'File does not exist',
2: 'File is not readable'
}
SUPPORTED_REPO_MODULES = ['git']
def parse(self):
''' create an options parser for bin/ansible '''
self.parser = CLI.base_parser(
usage='%prog <host-pattern> [options]',
connect_opts=True,
vault_opts=True,
runtask_opts=True,
subset_opts=True,
inventory_opts=True,
module_opts=True,
)
# options unique to pull
self.parser.add_option('--purge', default=False, action='store_true', help='purge checkout after playbook run')
self.parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true',
help='only run the playbook if the repository has been updated')
self.parser.add_option('-s', '--sleep', dest='sleep', default=None,
help='sleep for random interval (between 0 and n number of seconds) before starting. This is a useful way to disperse git requests')
self.parser.add_option('-f', '--force', dest='force', default=False, action='store_true',
help='run the playbook even if the repository could not be updated')
self.parser.add_option('-d', '--directory', dest='dest', default=None, help='directory to checkout repository to')
self.parser.add_option('-U', '--url', dest='url', default=None, help='URL of the playbook repository')
self.parser.add_option('-C', '--checkout', dest='checkout',
help='branch/tag/commit to checkout. ' 'Defaults to behavior of repository module.')
self.parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true',
help='adds the hostkey for the repo url if not already added')
self.parser.add_option('-m', '--module-name', dest='module_name', default=self.DEFAULT_REPO_TYPE,
help='Repository module name, which ansible will use to check out the repo. Default is %s.' % self.DEFAULT_REPO_TYPE)
self.parser.add_option('--verify-commit', dest='verify', default=False, action='store_true',
help='verify GPG signature of checked out commit, if it fails abort running the playbook.'
' This needs the corresponding VCS module to support such an operation')
self.options, self.args = self.parser.parse_args()
if self.options.sleep:
try:
secs = random.randint(0,int(self.options.sleep))
self.options.sleep = secs
except ValueError:
raise AnsibleOptionsError("%s is not a number." % self.options.sleep)
if not self.options.url:
raise AnsibleOptionsError("URL for repository not specified, use -h for help")
if len(self.args) != 1:
raise AnsibleOptionsError("Missing target hosts")
if self.options.module_name not in self.SUPPORTED_REPO_MODULES:
raise AnsibleOptionsError("Unsuported repo module %s, choices are %s" % (self.options.module_name, ','.join(self.SUPPORTED_REPO_MODULES)))
self.display.verbosity = self.options.verbosity
self.validate_conflicts(vault_opts=True)
def run(self):
''' use Runner lib to do SSH things '''
super(PullCLI, self).run()
# log command line
now = datetime.datetime.now()
self.display.display(now.strftime("Starting Ansible Pull at %F %T"))
self.display.display(' '.join(sys.argv))
# Build Checkout command
# Now construct the ansible command
node = platform.node()
host = socket.getfqdn()
limit_opts = 'localhost:%s:127.0.0.1' % ':'.join(set([host, node, host.split('.')[0], node.split('.')[0]]))
base_opts = '-c local "%s"' % limit_opts
if self.options.verbosity > 0:
base_opts += ' -%s' % ''.join([ "v" for x in range(0, self.options.verbosity) ])
# Attempt to use the inventory passed in as an argument
# It might not yet have been downloaded so use localhost if note
if not self.options.inventory or not os.path.exists(self.options.inventory):
inv_opts = 'localhost,'
else:
inv_opts = self.options.inventory
#TODO: enable more repo modules hg/svn?
if self.options.module_name == 'git':
repo_opts = "name=%s dest=%s" % (self.options.url, self.options.dest)
if self.options.checkout:
repo_opts += ' version=%s' % self.options.checkout
if self.options.accept_host_key:
repo_opts += ' accept_hostkey=yes'
if self.options.private_key_file:
repo_opts += ' key_file=%s' % self.options.private_key_file
if self.options.verify:
repo_opts += ' verify_commit=yes'
path = module_loader.find_plugin(self.options.module_name)
if path is None:
raise AnsibleOptionsError(("module '%s' not found.\n" % self.options.module_name))
bin_path = os.path.dirname(os.path.abspath(sys.argv[0]))
cmd = '%s/ansible -i "%s" %s -m %s -a "%s"' % (
bin_path, inv_opts, base_opts, self.options.module_name, repo_opts
)
for ev in self.options.extra_vars:
cmd += ' -e "%s"' % ev
# Nap?
if self.options.sleep:
self.display.display("Sleeping for %d seconds..." % self.options.sleep)
time.sleep(self.options.sleep);
# RUN the Checkout command
rc, out, err = run_cmd(cmd, live=True)
if rc != 0:
if self.options.force:
self.display.warning("Unable to update repository. Continuing with (forced) run of playbook.")
else:
return rc
elif self.options.ifchanged and '"changed": true' not in out:
self.display.display("Repository has not changed, quitting.")
return 0
playbook = self.select_playbook(path)
if playbook is None:
raise AnsibleOptionsError("Could not find a playbook to run.")
# Build playbook command
cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook)
if self.options.vault_password_file:
cmd += " --vault-password-file=%s" % self.options.vault_password_file
if self.options.inventory:
cmd += ' -i "%s"' % self.options.inventory
for ev in self.options.extra_vars:
cmd += ' -e "%s"' % ev
if self.options.ask_sudo_pass:
cmd += ' -K'
if self.options.tags:
cmd += ' -t "%s"' % self.options.tags
if self.options.limit:
cmd += ' -l "%s"' % self.options.limit
os.chdir(self.options.dest)
# RUN THE PLAYBOOK COMMAND
rc, out, err = run_cmd(cmd, live=True)
if self.options.purge:
os.chdir('/')
try:
shutil.rmtree(self.options.dest)
except Exception as e:
self.display.error("Failed to remove %s: %s" % (self.options.dest, str(e)))
return rc
def try_playbook(self, path):
if not os.path.exists(path):
return 1
if not os.access(path, os.R_OK):
return 2
return 0
def select_playbook(self, path):
playbook = None
if len(self.args) > 0 and self.args[0] is not None:
playbook = os.path.join(path, self.args[0])
rc = self.try_playbook(playbook)
if rc != 0:
self.display.warning("%s: %s" % (playbook, self.PLAYBOOK_ERRORS[rc]))
return None
return playbook
else:
fqdn = socket.getfqdn()
hostpb = os.path.join(path, fqdn + '.yml')
shorthostpb = os.path.join(path, fqdn.split('.')[0] + '.yml')
localpb = os.path.join(path, self.DEFAULT_PLAYBOOK)
errors = []
for pb in [hostpb, shorthostpb, localpb]:
rc = self.try_playbook(pb)
if rc == 0:
playbook = pb
break
else:
errors.append("%s: %s" % (pb, self.PLAYBOOK_ERRORS[rc]))
if playbook is None:
self.display.warning("\n".join(errors))
return playbook
|
gpl-3.0
|
shepdelacreme/ansible
|
lib/ansible/modules/cloud/ovirt/ovirt_snapshot.py
|
19
|
9982
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_snapshot
short_description: "Module to manage Virtual Machine Snapshots in oVirt/RHV"
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage Virtual Machine Snapshots in oVirt/RHV"
options:
snapshot_id:
description:
- "ID of the snapshot to manage."
vm_name:
description:
- "Name of the Virtual Machine to manage."
required: true
state:
description:
- "Should the Virtual Machine snapshot be restore/present/absent."
choices: ['restore', 'present', 'absent']
default: present
description:
description:
- "Description of the snapshot."
use_memory:
description:
- "If I(true) and C(state) is I(present) save memory of the Virtual
Machine if it's running."
- "If I(true) and C(state) is I(restore) restore memory of the
Virtual Machine."
- "Note that Virtual Machine will be paused while saving the memory."
aliases:
- "restore_memory"
- "save_memory"
type: bool
keep_days_old:
description:
- "Number of days after which should snapshot be deleted."
- "It will check all snapshots of virtual machine and delete them, if they are older."
version_added: "2.8"
notes:
- "Note that without a guest agent the data on the created snapshot may be
inconsistent."
- "Deleting a snapshot does not remove any information from the virtual
machine - it simply removes a return-point. However, restoring a virtual
machine from a snapshot deletes any content that was written to the
virtual machine after the time the snapshot was taken."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create snapshot:
- ovirt_snapshot:
vm_name: rhel7
description: MySnapshot
register: snapshot
# Create snapshot and save memory:
- ovirt_snapshot:
vm_name: rhel7
description: SnapWithMem
use_memory: true
register: snapshot
# Restore snapshot:
- ovirt_snapshot:
state: restore
vm_name: rhel7
snapshot_id: "{{ snapshot.id }}"
# Remove snapshot:
- ovirt_snapshot:
state: absent
vm_name: rhel7
snapshot_id: "{{ snapshot.id }}"
# Delete all snapshots older than 2 days
- ovirt_snapshot:
vm_name: test
keep_days_old: 2
'''
RETURN = '''
id:
description: ID of the snapshot which is managed
returned: On success if snapshot is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
snapshot:
description: "Dictionary of all the snapshot attributes. Snapshot attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/snapshot."
returned: On success if snapshot is found.
type: dict
snapshots:
description: List of deleted snapshots when keep_days_old is defined and snapshot is older than the input days
returned: On success returns deleted snapshots
type: list
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from datetime import datetime
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
get_entity,
ovirt_full_argument_spec,
search_by_name,
wait,
)
def create_snapshot(module, vm_service, snapshots_service):
changed = False
snapshot = get_entity(
snapshots_service.snapshot_service(module.params['snapshot_id'])
)
if snapshot is None:
if not module.check_mode:
snapshot = snapshots_service.add(
otypes.Snapshot(
description=module.params.get('description'),
persist_memorystate=module.params.get('use_memory'),
)
)
changed = True
wait(
service=snapshots_service.snapshot_service(snapshot.id),
condition=lambda snap: snap.snapshot_status == otypes.SnapshotStatus.OK,
wait=module.params['wait'],
timeout=module.params['timeout'],
)
return {
'changed': changed,
'id': snapshot.id,
'snapshot': get_dict_of_struct(snapshot),
}
def remove_snapshot(module, vm_service, snapshots_service, snapshot_id=None):
changed = False
if not snapshot_id:
snapshot_id = module.params['snapshot_id']
snapshot = get_entity(
snapshots_service.snapshot_service(snapshot_id)
)
if snapshot:
snapshot_service = snapshots_service.snapshot_service(snapshot.id)
if not module.check_mode:
snapshot_service.remove()
changed = True
wait(
service=snapshot_service,
condition=lambda snapshot: snapshot is None,
wait=module.params['wait'],
timeout=module.params['timeout'],
)
return {
'changed': changed,
'id': snapshot.id if snapshot else None,
'snapshot': get_dict_of_struct(snapshot),
}
def restore_snapshot(module, vm_service, snapshots_service):
changed = False
snapshot_service = snapshots_service.snapshot_service(
module.params['snapshot_id']
)
snapshot = get_entity(snapshot_service)
if snapshot is None:
raise Exception(
"Snapshot with id '%s' doesn't exist" % module.params['snapshot_id']
)
if snapshot.snapshot_status != otypes.SnapshotStatus.IN_PREVIEW:
if not module.check_mode:
snapshot_service.restore(
restore_memory=module.params.get('use_memory'),
)
changed = True
else:
if not module.check_mode:
vm_service.commit_snapshot()
changed = True
if changed:
wait(
service=snapshot_service,
condition=lambda snap: snap.snapshot_status == otypes.SnapshotStatus.OK,
wait=module.params['wait'],
timeout=module.params['timeout'],
)
return {
'changed': changed,
'id': snapshot.id if snapshot else None,
'snapshot': get_dict_of_struct(snapshot),
}
def remove_old_snapshosts(module, vm_service, snapshots_service):
deleted_snapshots = []
changed = False
date_now = datetime.now()
for snapshot in snapshots_service.list():
if snapshot.vm is not None and snapshot.vm.name == module.params.get('vm_name'):
diff = date_now - snapshot.date.replace(tzinfo=None)
if diff.days >= module.params.get('keep_days_old'):
snapshot = remove_snapshot(module, vm_service, snapshots_service, snapshot.id).get('snapshot')
deleted_snapshots.append(snapshot)
changed = True
return dict(snapshots=deleted_snapshots, changed=changed)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['restore', 'present', 'absent'],
default='present',
),
vm_name=dict(required=True),
snapshot_id=dict(default=None),
description=dict(default=None),
keep_days_old=dict(default=None, type='int'),
use_memory=dict(
default=None,
type='bool',
aliases=['restore_memory', 'save_memory'],
),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
('state', 'absent', ['snapshot_id']),
('state', 'restore', ['snapshot_id']),
]
)
check_sdk(module)
vm_name = module.params.get('vm_name')
auth = module.params.pop('auth')
connection = create_connection(auth)
vms_service = connection.system_service().vms_service()
vm = search_by_name(vms_service, vm_name)
if not vm:
module.fail_json(
msg="Vm '{name}' doesn't exist.".format(name=vm_name),
)
vm_service = vms_service.vm_service(vm.id)
snapshots_service = vms_service.vm_service(vm.id).snapshots_service()
try:
state = module.params['state']
if state == 'present':
if module.params.get('keep_days_old') is not None:
ret = remove_old_snapshosts(module, vm_service, snapshots_service)
else:
ret = create_snapshot(module, vm_service, snapshots_service)
elif state == 'restore':
ret = restore_snapshot(module, vm_service, snapshots_service)
elif state == 'absent':
ret = remove_snapshot(module, vm_service, snapshots_service)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
|
gpl-3.0
|
WatanabeYasumasa/edx-platform
|
lms/djangoapps/instructor/tests/test_legacy_download_csv.py
|
5
|
3022
|
"""
Unit tests for instructor dashboard
Based on (and depends on) unit tests for courseware.
Notes for running by hand:
./manage.py lms --settings test test lms/djangoapps/instructor
"""
from django.test.utils import override_settings
# Need access to internal func to put users in the right group
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from courseware.tests.helpers import LoginEnrollmentTestCase
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from student.roles import CourseStaffRole
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.django import modulestore, clear_existing_modulestores
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorDashboardGradeDownloadCSV(ModuleStoreTestCase, LoginEnrollmentTestCase):
'''
Check for download of csv
'''
def setUp(self):
clear_existing_modulestores()
self.toy = modulestore().get_course("edX/toy/2012_Fall")
# Create two accounts
self.student = '[email protected]'
self.instructor = '[email protected]'
self.password = 'foo'
self.create_account('u1', self.student, self.password)
self.create_account('u2', self.instructor, self.password)
self.activate_user(self.student)
self.activate_user(self.instructor)
CourseStaffRole(self.toy.location).add_users(User.objects.get(email=self.instructor))
self.logout()
self.login(self.instructor, self.password)
self.enroll(self.toy)
def test_download_grades_csv(self):
course = self.toy
url = reverse('instructor_dashboard_legacy', kwargs={'course_id': course.id})
msg = "url = {0}\n".format(url)
response = self.client.post(url, {'action': 'Download CSV of all student grades for this course'})
msg += "instructor dashboard download csv grades: response = '{0}'\n".format(response)
self.assertEqual(response['Content-Type'], 'text/csv', msg)
cdisp = response['Content-Disposition']
msg += "Content-Disposition = '%s'\n" % cdisp
self.assertEqual(cdisp, 'attachment; filename=grades_{0}.csv'.format(course.id), msg)
body = response.content.replace('\r', '')
msg += "body = '{0}'\n".format(body)
# All the not-actually-in-the-course hw and labs come from the
# default grading policy string in graders.py
expected_body = '''"ID","Username","Full Name","edX email","External email","HW 01","HW 02","HW 03","HW 04","HW 05","HW 06","HW 07","HW 08","HW 09","HW 10","HW 11","HW 12","HW Avg","Lab 01","Lab 02","Lab 03","Lab 04","Lab 05","Lab 06","Lab 07","Lab 08","Lab 09","Lab 10","Lab 11","Lab 12","Lab Avg","Midterm","Final"
"2","u2","username","[email protected]","","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0"
'''
self.assertEqual(body, expected_body, msg)
|
agpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.