repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
julien78910/CouchPotatoServer
|
libs/requests/packages/charade/constants.py
|
3008
|
1335
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
_debug = 0
eDetecting = 0
eFoundIt = 1
eNotMe = 2
eStart = 0
eError = 1
eItsMe = 2
SHORTCUT_THRESHOLD = 0.95
|
gpl-3.0
|
bbbenja/SickRage
|
lib/sqlalchemy/ext/declarative/base.py
|
77
|
20180
|
# ext/declarative/base.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Internal implementation for declarative."""
from ...schema import Table, Column
from ...orm import mapper, class_mapper, synonym
from ...orm.interfaces import MapperProperty
from ...orm.properties import ColumnProperty, CompositeProperty
from ...orm.attributes import QueryableAttribute
from ...orm.base import _is_mapped_class
from ... import util, exc
from ...util import topological
from ...sql import expression
from ... import event
from . import clsregistry
import collections
import weakref
def _declared_mapping_info(cls):
# deferred mapping
if _DeferredMapperConfig.has_cls(cls):
return _DeferredMapperConfig.config_for_cls(cls)
# regular mapping
elif _is_mapped_class(cls):
return class_mapper(cls, configure=False)
else:
return None
def _as_declarative(cls, classname, dict_):
from .api import declared_attr
# dict_ will be a dictproxy, which we can't write to, and we need to!
dict_ = dict(dict_)
column_copies = {}
potential_columns = {}
mapper_args_fn = None
table_args = inherited_table_args = None
tablename = None
declarative_props = (declared_attr, util.classproperty)
for base in cls.__mro__:
_is_declarative_inherits = hasattr(base, '_decl_class_registry')
if '__declare_last__' in base.__dict__:
@event.listens_for(mapper, "after_configured")
def go():
cls.__declare_last__()
if '__declare_first__' in base.__dict__:
@event.listens_for(mapper, "before_configured")
def go():
cls.__declare_first__()
if '__abstract__' in base.__dict__:
if (base is cls or
(base in cls.__bases__ and not _is_declarative_inherits)
):
return
class_mapped = _declared_mapping_info(base) is not None
for name, obj in vars(base).items():
if name == '__mapper_args__':
if not mapper_args_fn and (
not class_mapped or
isinstance(obj, declarative_props)
):
# don't even invoke __mapper_args__ until
# after we've determined everything about the
# mapped table.
mapper_args_fn = lambda: cls.__mapper_args__
elif name == '__tablename__':
if not tablename and (
not class_mapped or
isinstance(obj, declarative_props)
):
tablename = cls.__tablename__
elif name == '__table_args__':
if not table_args and (
not class_mapped or
isinstance(obj, declarative_props)
):
table_args = cls.__table_args__
if not isinstance(table_args, (tuple, dict, type(None))):
raise exc.ArgumentError(
"__table_args__ value must be a tuple, "
"dict, or None")
if base is not cls:
inherited_table_args = True
elif class_mapped:
if isinstance(obj, declarative_props):
util.warn("Regular (i.e. not __special__) "
"attribute '%s.%s' uses @declared_attr, "
"but owning class %s is mapped - "
"not applying to subclass %s."
% (base.__name__, name, base, cls))
continue
elif base is not cls:
# we're a mixin.
if isinstance(obj, Column):
if getattr(cls, name) is not obj:
# if column has been overridden
# (like by the InstrumentedAttribute of the
# superclass), skip
continue
if obj.foreign_keys:
raise exc.InvalidRequestError(
"Columns with foreign keys to other columns "
"must be declared as @declared_attr callables "
"on declarative mixin classes. ")
if name not in dict_ and not (
'__table__' in dict_ and
(obj.name or name) in dict_['__table__'].c
) and name not in potential_columns:
potential_columns[name] = \
column_copies[obj] = \
obj.copy()
column_copies[obj]._creation_order = \
obj._creation_order
elif isinstance(obj, MapperProperty):
raise exc.InvalidRequestError(
"Mapper properties (i.e. deferred,"
"column_property(), relationship(), etc.) must "
"be declared as @declared_attr callables "
"on declarative mixin classes.")
elif isinstance(obj, declarative_props):
dict_[name] = ret = \
column_copies[obj] = getattr(cls, name)
if isinstance(ret, (Column, MapperProperty)) and \
ret.doc is None:
ret.doc = obj.__doc__
# apply inherited columns as we should
for k, v in potential_columns.items():
dict_[k] = v
if inherited_table_args and not tablename:
table_args = None
clsregistry.add_class(classname, cls)
our_stuff = util.OrderedDict()
for k in list(dict_):
# TODO: improve this ? all dunders ?
if k in ('__table__', '__tablename__', '__mapper_args__'):
continue
value = dict_[k]
if isinstance(value, declarative_props):
value = getattr(cls, k)
elif isinstance(value, QueryableAttribute) and \
value.class_ is not cls and \
value.key != k:
# detect a QueryableAttribute that's already mapped being
# assigned elsewhere in userland, turn into a synonym()
value = synonym(value.key)
setattr(cls, k, value)
if (isinstance(value, tuple) and len(value) == 1 and
isinstance(value[0], (Column, MapperProperty))):
util.warn("Ignoring declarative-like tuple value of attribute "
"%s: possibly a copy-and-paste error with a comma "
"left at the end of the line?" % k)
continue
if not isinstance(value, (Column, MapperProperty)):
if not k.startswith('__'):
dict_.pop(k)
setattr(cls, k, value)
continue
if k == 'metadata':
raise exc.InvalidRequestError(
"Attribute name 'metadata' is reserved "
"for the MetaData instance when using a "
"declarative base class."
)
prop = clsregistry._deferred_relationship(cls, value)
our_stuff[k] = prop
# set up attributes in the order they were created
our_stuff.sort(key=lambda key: our_stuff[key]._creation_order)
# extract columns from the class dict
declared_columns = set()
name_to_prop_key = collections.defaultdict(set)
for key, c in list(our_stuff.items()):
if isinstance(c, (ColumnProperty, CompositeProperty)):
for col in c.columns:
if isinstance(col, Column) and \
col.table is None:
_undefer_column_name(key, col)
if not isinstance(c, CompositeProperty):
name_to_prop_key[col.name].add(key)
declared_columns.add(col)
elif isinstance(c, Column):
_undefer_column_name(key, c)
name_to_prop_key[c.name].add(key)
declared_columns.add(c)
# if the column is the same name as the key,
# remove it from the explicit properties dict.
# the normal rules for assigning column-based properties
# will take over, including precedence of columns
# in multi-column ColumnProperties.
if key == c.key:
del our_stuff[key]
for name, keys in name_to_prop_key.items():
if len(keys) > 1:
util.warn(
"On class %r, Column object %r named directly multiple times, "
"only one will be used: %s" %
(classname, name, (", ".join(sorted(keys))))
)
declared_columns = sorted(
declared_columns, key=lambda c: c._creation_order)
table = None
if hasattr(cls, '__table_cls__'):
table_cls = util.unbound_method_to_callable(cls.__table_cls__)
else:
table_cls = Table
if '__table__' not in dict_:
if tablename is not None:
args, table_kw = (), {}
if table_args:
if isinstance(table_args, dict):
table_kw = table_args
elif isinstance(table_args, tuple):
if isinstance(table_args[-1], dict):
args, table_kw = table_args[0:-1], table_args[-1]
else:
args = table_args
autoload = dict_.get('__autoload__')
if autoload:
table_kw['autoload'] = True
cls.__table__ = table = table_cls(
tablename, cls.metadata,
*(tuple(declared_columns) + tuple(args)),
**table_kw)
else:
table = cls.__table__
if declared_columns:
for c in declared_columns:
if not table.c.contains_column(c):
raise exc.ArgumentError(
"Can't add additional column %r when "
"specifying __table__" % c.key
)
if hasattr(cls, '__mapper_cls__'):
mapper_cls = util.unbound_method_to_callable(cls.__mapper_cls__)
else:
mapper_cls = mapper
for c in cls.__bases__:
if _declared_mapping_info(c) is not None:
inherits = c
break
else:
inherits = None
if table is None and inherits is None:
raise exc.InvalidRequestError(
"Class %r does not have a __table__ or __tablename__ "
"specified and does not inherit from an existing "
"table-mapped class." % cls
)
elif inherits:
inherited_mapper = _declared_mapping_info(inherits)
inherited_table = inherited_mapper.local_table
inherited_mapped_table = inherited_mapper.mapped_table
if table is None:
# single table inheritance.
# ensure no table args
if table_args:
raise exc.ArgumentError(
"Can't place __table_args__ on an inherited class "
"with no table."
)
# add any columns declared here to the inherited table.
for c in declared_columns:
if c.primary_key:
raise exc.ArgumentError(
"Can't place primary key columns on an inherited "
"class with no table."
)
if c.name in inherited_table.c:
if inherited_table.c[c.name] is c:
continue
raise exc.ArgumentError(
"Column '%s' on class %s conflicts with "
"existing column '%s'" %
(c, cls, inherited_table.c[c.name])
)
inherited_table.append_column(c)
if inherited_mapped_table is not None and \
inherited_mapped_table is not inherited_table:
inherited_mapped_table._refresh_for_new_column(c)
defer_map = hasattr(cls, '_sa_decl_prepare')
if defer_map:
cfg_cls = _DeferredMapperConfig
else:
cfg_cls = _MapperConfig
mt = cfg_cls(mapper_cls,
cls, table,
inherits,
declared_columns,
column_copies,
our_stuff,
mapper_args_fn)
if not defer_map:
mt.map()
class _MapperConfig(object):
mapped_table = None
def __init__(self, mapper_cls,
cls,
table,
inherits,
declared_columns,
column_copies,
properties, mapper_args_fn):
self.mapper_cls = mapper_cls
self.cls = cls
self.local_table = table
self.inherits = inherits
self.properties = properties
self.mapper_args_fn = mapper_args_fn
self.declared_columns = declared_columns
self.column_copies = column_copies
def _prepare_mapper_arguments(self):
properties = self.properties
if self.mapper_args_fn:
mapper_args = self.mapper_args_fn()
else:
mapper_args = {}
# make sure that column copies are used rather
# than the original columns from any mixins
for k in ('version_id_col', 'polymorphic_on',):
if k in mapper_args:
v = mapper_args[k]
mapper_args[k] = self.column_copies.get(v, v)
assert 'inherits' not in mapper_args, \
"Can't specify 'inherits' explicitly with declarative mappings"
if self.inherits:
mapper_args['inherits'] = self.inherits
if self.inherits and not mapper_args.get('concrete', False):
# single or joined inheritance
# exclude any cols on the inherited table which are
# not mapped on the parent class, to avoid
# mapping columns specific to sibling/nephew classes
inherited_mapper = _declared_mapping_info(self.inherits)
inherited_table = inherited_mapper.local_table
if 'exclude_properties' not in mapper_args:
mapper_args['exclude_properties'] = exclude_properties = \
set([c.key for c in inherited_table.c
if c not in inherited_mapper._columntoproperty])
exclude_properties.difference_update(
[c.key for c in self.declared_columns])
# look through columns in the current mapper that
# are keyed to a propname different than the colname
# (if names were the same, we'd have popped it out above,
# in which case the mapper makes this combination).
# See if the superclass has a similar column property.
# If so, join them together.
for k, col in list(properties.items()):
if not isinstance(col, expression.ColumnElement):
continue
if k in inherited_mapper._props:
p = inherited_mapper._props[k]
if isinstance(p, ColumnProperty):
# note here we place the subclass column
# first. See [ticket:1892] for background.
properties[k] = [col] + p.columns
result_mapper_args = mapper_args.copy()
result_mapper_args['properties'] = properties
return result_mapper_args
def map(self):
mapper_args = self._prepare_mapper_arguments()
self.cls.__mapper__ = self.mapper_cls(
self.cls,
self.local_table,
**mapper_args
)
class _DeferredMapperConfig(_MapperConfig):
_configs = util.OrderedDict()
@property
def cls(self):
return self._cls()
@cls.setter
def cls(self, class_):
self._cls = weakref.ref(class_, self._remove_config_cls)
self._configs[self._cls] = self
@classmethod
def _remove_config_cls(cls, ref):
cls._configs.pop(ref, None)
@classmethod
def has_cls(cls, class_):
# 2.6 fails on weakref if class_ is an old style class
return isinstance(class_, type) and \
weakref.ref(class_) in cls._configs
@classmethod
def config_for_cls(cls, class_):
return cls._configs[weakref.ref(class_)]
@classmethod
def classes_for_base(cls, base_cls, sort=True):
classes_for_base = [m for m in cls._configs.values()
if issubclass(m.cls, base_cls)]
if not sort:
return classes_for_base
all_m_by_cls = dict(
(m.cls, m)
for m in classes_for_base
)
tuples = []
for m_cls in all_m_by_cls:
tuples.extend(
(all_m_by_cls[base_cls], all_m_by_cls[m_cls])
for base_cls in m_cls.__bases__
if base_cls in all_m_by_cls
)
return list(
topological.sort(
tuples,
classes_for_base
)
)
def map(self):
self._configs.pop(self._cls, None)
super(_DeferredMapperConfig, self).map()
def _add_attribute(cls, key, value):
"""add an attribute to an existing declarative class.
This runs through the logic to determine MapperProperty,
adds it to the Mapper, adds a column to the mapped Table, etc.
"""
if '__mapper__' in cls.__dict__:
if isinstance(value, Column):
_undefer_column_name(key, value)
cls.__table__.append_column(value)
cls.__mapper__.add_property(key, value)
elif isinstance(value, ColumnProperty):
for col in value.columns:
if isinstance(col, Column) and col.table is None:
_undefer_column_name(key, col)
cls.__table__.append_column(col)
cls.__mapper__.add_property(key, value)
elif isinstance(value, MapperProperty):
cls.__mapper__.add_property(
key,
clsregistry._deferred_relationship(cls, value)
)
elif isinstance(value, QueryableAttribute) and value.key != key:
# detect a QueryableAttribute that's already mapped being
# assigned elsewhere in userland, turn into a synonym()
value = synonym(value.key)
cls.__mapper__.add_property(
key,
clsregistry._deferred_relationship(cls, value)
)
else:
type.__setattr__(cls, key, value)
else:
type.__setattr__(cls, key, value)
def _declarative_constructor(self, **kwargs):
"""A simple constructor that allows initialization from kwargs.
Sets attributes on the constructed instance using the names and
values in ``kwargs``.
Only keys that are present as
attributes of the instance's class are allowed. These could be,
for example, any mapped columns or relationships.
"""
cls_ = type(self)
for k in kwargs:
if not hasattr(cls_, k):
raise TypeError(
"%r is an invalid keyword argument for %s" %
(k, cls_.__name__))
setattr(self, k, kwargs[k])
_declarative_constructor.__name__ = '__init__'
def _undefer_column_name(key, column):
if column.key is None:
column.key = key
if column.name is None:
column.name = key
|
gpl-3.0
|
kasioumis/invenio
|
invenio/legacy/bibcheck/plugins/files.py
|
13
|
2221
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Plugin to validate the checksum of a record's files """
from invenio.legacy.bibdocfile.api import BibRecDocs
import os
try:
import magic
if hasattr(magic, "from_file"):
HAS_MAGIC = 1
else:
HAS_MAGIC = 2
magic_object = magic.open(magic.MAGIC_MIME_TYPE)
magic_object.load()
except ImportError:
HAS_MAGIC = 0
def check_record(record):
"""
Validates the checksum of all the BibDocFile's in the record
"""
record_id = record["001"][0][3]
docs = BibRecDocs(record_id).list_bibdocs()
for doc in docs:
for bibfile in doc.list_latest_files():
if not os.path.exists(bibfile.fullpath):
record.set_invalid("File doesn't exists %s" % bibfile.fullpath)
continue
if not bibfile.check():
record.set_invalid("Invalid checksum for file %s" % bibfile.fullpath)
if HAS_MAGIC:
if HAS_MAGIC == 1:
magic_mime = magic.from_file(bibfile.fullpath, mime=True)
else:
magic_mime = magic_object.file(bibfile.fullpath)
if bibfile.mime != magic_mime:
record.set_invalid(
("Guessed mime type from extension (%s) is different" +
"from guessed mime type from headers (%s)") %
(bibfile.mime, magic_mime)
)
|
gpl-2.0
|
MartinThoma/algorithms
|
ML/gtsrb/densenet.py
|
1
|
7475
|
import keras.backend as K
from keras.layers import Input, merge
from keras.layers.convolutional import Convolution2D
from keras.layers.core import Activation, Dense, Dropout
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import AveragePooling2D, GlobalAveragePooling2D
from keras.models import Model
from keras.regularizers import l2
def conv_block(ip, nb_filter, bottleneck=False, dropout_rate=None, weight_decay=1E-4):
''' Apply BatchNorm, Relu 3x3, Conv2D, optional bottleneck block and dropout
Args:
ip: Input keras tensor
nb_filter: number of filters
bottleneck: add bottleneck block
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor with batch_norm, relu and convolution2d added (optional bottleneck)
'''
concat_axis = 1 if K.image_dim_ordering() == "th" else -1
x = BatchNormalization(mode=0, axis=concat_axis, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(ip)
x = Activation('relu')(x)
if bottleneck:
inter_channel = nb_filter * 4 # Obtained from https://github.com/liuzhuang13/DenseNet/blob/master/densenet.lua
x = Convolution2D(inter_channel, 1, 1, init='he_uniform', border_mode='same', bias=False,
W_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = BatchNormalization(mode=0, axis=concat_axis, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter, 3, 3, init="he_uniform", border_mode="same", bias=False,
W_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
def transition_block(ip, nb_filter, compression=1.0, dropout_rate=None, weight_decay=1E-4):
''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
Args:
ip: keras tensor
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
'''
concat_axis = 1 if K.image_dim_ordering() == "th" else -1
x = BatchNormalization(mode=0, axis=concat_axis, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(ip)
x = Activation('relu')(x)
x = Convolution2D(int(nb_filter * compression), 1, 1, init="he_uniform", border_mode="same", bias=False,
W_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
return x
def dense_block(x, nb_layers, nb_filter, growth_rate, bottleneck=False, dropout_rate=None, weight_decay=1E-4):
''' Build a dense_block where the output of each conv_block is fed to subsequent ones
Args:
x: keras tensor
nb_layers: the number of layers of conv_block to append to the model.
nb_filter: number of filters
growth_rate: growth rate
bottleneck: bottleneck block
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor with nb_layers of conv_block appended
'''
concat_axis = 1 if K.image_dim_ordering() == "th" else -1
feature_list = [x]
for i in range(nb_layers):
x = conv_block(x, growth_rate, bottleneck, dropout_rate, weight_decay)
feature_list.append(x)
x = merge(feature_list, mode='concat', concat_axis=concat_axis)
nb_filter += growth_rate
return x, nb_filter
def create_dense_net(nb_classes, img_dim, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=-1,
bottleneck=False, reduction=0.0, dropout_rate=None, weight_decay=1E-4, verbose=True):
''' Build the create_dense_net model
Args:
nb_classes: number of classes
img_dim: tuple of shape (channels, rows, columns) or (rows, columns, channels)
depth: number or layers
nb_dense_block: number of dense blocks to add to end (generally = 3)
growth_rate: number of filters to add per dense block
nb_filter: initial number of filters. Default -1 indicates initial number of filters is 2 * growth_rate
bottleneck: add bottleneck blocks
reduction: reduction factor of transition blocks. Note : reduction value is inverted to compute compression
dropout_rate: dropout rate
weight_decay: weight decay
verbose: print the model type
Returns: keras tensor with nb_layers of conv_block appended
'''
model_input = Input(shape=img_dim)
concat_axis = 1 if K.image_dim_ordering() == "th" else -1
assert (depth - 4) % 3 == 0, "Depth must be 3 N + 4"
if reduction != 0.0:
assert reduction <= 1.0 and reduction > 0.0, "reduction value must lie between 0.0 and 1.0"
# layers in each dense block
nb_layers = int((depth - 4) / 3)
if bottleneck:
nb_layers = int(nb_layers // 2)
# compute initial nb_filter if -1, else accept users initial nb_filter
if nb_filter <= 0:
nb_filter = 2 * growth_rate
# compute compression factor
compression = 1.0 - reduction
# Initial convolution
x = Convolution2D(nb_filter, 3, 3, init="he_uniform", border_mode="same", name="initial_conv2D", bias=False,
W_regularizer=l2(weight_decay))(model_input)
# Add dense blocks
for block_idx in range(nb_dense_block - 1):
x, nb_filter = dense_block(x, nb_layers, nb_filter, growth_rate, bottleneck=bottleneck,
dropout_rate=dropout_rate, weight_decay=weight_decay)
# add transition_block
x = transition_block(x, nb_filter, compression=compression, dropout_rate=dropout_rate,
weight_decay=weight_decay)
nb_filter = int(nb_filter * compression)
# The last dense_block does not have a transition_block
x, nb_filter = dense_block(x, nb_layers, nb_filter, growth_rate, bottleneck=bottleneck,
dropout_rate=dropout_rate, weight_decay=weight_decay)
x = BatchNormalization(mode=0, axis=concat_axis, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
x = Dense(nb_classes, activation='softmax', W_regularizer=l2(weight_decay), b_regularizer=l2(weight_decay))(x)
densenet = Model(input=model_input, output=x, name="create_dense_net")
if verbose:
if bottleneck and not reduction:
print("Bottleneck DenseNet-B-%d-%d created." % (depth, growth_rate))
elif not bottleneck and reduction > 0.0:
print("DenseNet-C-%d-%d with %0.1f compression created." % (depth, growth_rate, compression))
elif bottleneck and reduction > 0.0:
print("Bottleneck DenseNet-BC-%d-%d with %0.1f compression created." % (depth, growth_rate, compression))
else:
print("DenseNet-%d-%d created." % (depth, growth_rate))
return densenet
if __name__ == '__main__':
model = create_dense_net(nb_classes=10, img_dim=(3, 32, 32), depth=40, growth_rate=12, bottleneck=True, reduction=0.5)
model.summary()
|
mit
|
omega-roms/I9300_Stock_Kernel_JB_4.3
|
tools/perf/scripts/python/sctop.py
|
11180
|
1924
|
# system call top
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
gpl-2.0
|
COCS4950G7/COSC4950
|
Source/Brute_Force/manager_server.py
|
1
|
4627
|
from multiprocessing.managers import SyncManager
import time
import Queue
import Dictionary
IP = "10.121.0.158"
PORTNUM = 22536
AUTHKEY = "Popcorn is awesome!!!"
dictionary = Dictionary.Dictionary()
def runserver():
try: #runserver definition try block
# Start a shared manager server and access its queues
manager = make_server_manager(PORTNUM, AUTHKEY)
shared_job_q = manager.get_job_q()
shared_result_q = manager.get_result_q()
dictionary.setAlgorithm('md5')
dictionary.setFileName("dic")
dictionary.setHash("33da7a40473c1637f1a2e142f4925194") # popcorn
while not dictionary.isEof():
#chunk is a Chunk object
chunk = dictionary.getNextChunk()
newChunk = manager.Value(dict, {'params': chunk.params, 'data': chunk.data})
shared_job_q.put(newChunk)
while True:
result = shared_result_q.get()
if result[0] is "win":
key = result[1]
print "Key is: %s" % key
break
else:
print "Chunk finished with params: %s" %result[1]
# Sleep a bit before shutting down the server - to give clients time to
# realize the job queue is empty and exit in an orderly way.
time.sleep(2)
manager.shutdown()
return
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in runserver definition Try block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
# This is based on the examples in the official docs of multiprocessing.
# get_{job|result}_q return synchronized proxies for the actual Queue
# objects.
class JobQueueManager(SyncManager):
pass
def make_server_manager(port, authkey):
""" Create a manager for the server, listening on the given port.
Return a manager object with get_job_q and get_result_q methods.
"""
try: #Make_server_manager definition try block
job_q = Queue.Queue(maxsize=1000)
result_q = Queue.Queue()
try: #JobQueueManager/Lambda functions Try Block
JobQueueManager.register('get_job_q', callable=lambda: job_q)
JobQueueManager.register('get_result_q', callable=lambda: result_q)
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in Make_server_Manager: JobQueueManager/Lambda functions Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
manager = JobQueueManager(address=(IP, port), authkey=authkey)
manager.start()
print 'Server started at port %s' % port
return manager
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in Make_server_manager definition Try block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
if __name__ == '__main__':
try: #Main
import time
start_time= time.time()
runserver()
except Exception as inst:
print "============================================================================================="
print "ERROR: An exception was thrown in Main"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
finally:
end_time= time.time() - start_time
print "Server ran for "+str(end_time)+" seconds"
|
gpl-3.0
|
acshi/osf.io
|
scripts/migration/migrate_registration_extra_drafts.py
|
24
|
3179
|
"""
Changes existing question.extra on a draft to a list
required for multiple files attached to a question
"""
import sys
import logging
from modularodm import Q
from website.app import init_app
from website.files.models import FileNode, TrashedFileNode
from scripts import utils as scripts_utils
from website.models import DraftRegistration
from website.prereg.utils import get_prereg_schema
from framework.transactions.context import TokuTransaction
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def migrate_file_representation(bad_file):
logger.info('Migrating file representation of File: {0}'.format(bad_file))
view_url = bad_file['viewUrl'].rstrip('/')
fid = view_url.split('/')[-1]
fnode = FileNode.load(fid)
if fnode is None:
fnode = TrashedFileNode.load(fid)
if fnode is None:
logger.error('Could not load FileNode or TrashedFileNode with id {}. Skipping...'.format(fid))
return
data = {
'data': {
'kind': 'file',
'name': bad_file['selectedFileName'],
'path': fnode.path,
'extra': {},
'sha256': fnode.versions[-1].metadata['sha256']
}
}
bad_file.update(data)
def migrate_file_meta(question):
files = question.get('extra')
migrated = False
if files and isinstance(files, list):
for f in files:
if 'viewUrl' in f:
if not f.get('data', None):
migrate_file_representation(f)
migrated = True
if isinstance(files, dict):
if len(files) == 0:
question['extra'] = []
else:
question['extra'] = [files]
migrated = True
return migrated
def migrate_drafts(dry):
PREREG_CHALLENGE_METASCHEMA = get_prereg_schema()
draft_registrations = DraftRegistration.find(
Q('registration_schema', 'eq', PREREG_CHALLENGE_METASCHEMA)
)
count = 0
for r in draft_registrations:
# NOTE: We don't query Q('approval', 'eq', None) just in case
# approval is set but the fk doesn't exist in the database
if r.approval or r.registered_node:
continue
logger.debug('Reading draft with id: {0}'.format(r._id))
data = r.registration_metadata
migrated = False
for q, ans in data.iteritems():
if isinstance(ans.get('value'), dict):
for value in ans['value'].values():
migrated = migrate_file_meta(value)
else:
migrated = migrate_file_meta(ans)
if migrated:
count += 1
logger.info('Migrated draft with id: {0}'.format(r._id))
if not dry:
r.save()
logger.info('Done with {0} drafts migrated.'.format(count))
def main(dry=True):
init_app(set_backends=True, routes=False)
if not dry:
scripts_utils.add_file_logger(logger, __file__)
migrate_drafts(dry)
if __name__ == '__main__':
dry_run = '--dry' in sys.argv
with TokuTransaction():
main(dry=dry_run)
if dry_run:
raise RuntimeError('Dry run, rolling back transaction.')
|
apache-2.0
|
johnmcdowall/procedural_city_generation
|
procedural_city_generation/additional_stuff/jsontools.py
|
3
|
2246
|
def save_vertexlist(vertex_list, name="output",savefig=0):
print "Output is being saved."
import json
import procedural_city_generation
import os
path=os.getcwd()+"/procedural_city_generation"
Vertexwb={}
for i in range(len(vertex_list)):
neighboursindizes=[vertex_list.index(x) for x in vertex_list[i].neighbours]
#coords, minor_road, seed, neighboursindizes
Vertexwb[i]=[(vertex_list[i].coords[0],vertex_list[i].coords[1]),
vertex_list[i].minor_road,
vertex_list[i].seed,neighboursindizes]
with open(path+"/outputs/"+name+".json", 'w') as fp:
json.dump(Vertexwb, fp)
if savefig==1:
print "Figure is being saved as" + name +".png"
import matplotlib.pyplot as plt
for k in vertex_list:
for n in k.neighbours:
col='black'
width=3
if n.minor_road or k.minor_road:
col='blue'
width=1
plt.plot([n.coords[0],k.coords[0]],[n.coords[1],k.coords[1]],color=col, linewidth=width)
plt.savefig(path+"/outputs/"+name+".png")
else:
print "Figure is not being saved as image, if you want to save it, change savefig option in conf.txt"
print "New File " + name+ ".json created in procedural_city_generation/outputs/ with " , len(vertex_list) , " Vertices "
return 0
def reconstruct(path=None):
if path is None:
import os
import procedural_city_generation
path=os.path.dirname(procedural_city_generation.__file__)+"/outputs/output.json"
import json
try:
with open(path,'r') as d:
data=d.read()
except IOError:
print "Input could not be located. Try to run the previous program in the chain first."
return 0
data=json.loads(data)
from procedural_city_generation.roadmap.Vertex import Vertex
import numpy as np
vertex_list=[]
vertex_list=[0]*len(data)
for x in data:
y=data[x]
k=Vertex(np.array(y[0]))
k.minor_road,k.seed,k.neighboursindizes=y[1],y[2],y[3]
vertex_list[int(x)]=k
index=0
for k in vertex_list:
for x in k.neighboursindizes:
k.neighbours.append(vertex_list[x])
k.selfindex=index
index+=1
setliste=[]
return vertex_list
if __name__=='__main__':
k=reconstruct()
import matplotlib.pyplot as plt
fig=plt.figure()
import numpy as np
for x in w:
x.selfplot()
plt.show()
|
mpl-2.0
|
seaotterman/tensorflow
|
tensorflow/python/kernel_tests/spacetodepth_op_test.py
|
90
|
8391
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for SpacetoDepth op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class SpaceToDepthTest(test.TestCase):
def _testOne(self, inputs, block_size, outputs):
with self.test_session(use_gpu=True):
x_tf = array_ops.space_to_depth(math_ops.to_float(inputs), block_size)
self.assertAllEqual(x_tf.eval(), outputs)
def testBasic(self):
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered spatially.
def testLargerInput2x2(self):
x_np = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
[[9], [10], [13], [14]], [[11], [12], [15], [16]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12],
[13, 14, 15, 16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered in depth. Here, larger block size.
def testLargerInput4x4(self):
x_np = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
[[9], [10], [13], [14]], [[11], [12], [15], [16]]]]
block_size = 4
x_out = [[[[1, 2, 5, 6, 3, 4, 7, 8, 9, 10, 13, 14, 11, 12, 15, 16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths.
# To make sure elements are properly interleaved in depth.
def testDepthInterleaved(self):
x_np = [[[[1, 10], [2, 20]], [[3, 30], [4, 40]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths. Here an odd depth.
# To make sure elements are properly interleaved in depth.
def testDepthInterleavedDepth3(self):
x_np = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions AND for larger input depths.
# To make sure elements are properly interleaved in depth and ordered
# spatially.
def testDepthInterleavedLarge(self):
x_np = [[[[1, 10], [2, 20], [5, 50], [6, 60]],
[[3, 30], [4, 40], [7, 70], [8, 80]],
[[9, 90], [10, 100], [13, 130], [14, 140]],
[[11, 110], [12, 120], [15, 150], [16, 160]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40], [5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120],
[13, 130, 14, 140, 15, 150, 16, 160]]]]
self._testOne(x_np, block_size, x_out)
def testBlockSize2Batch10(self):
block_size = 2
def batch_input_elt(i):
return [[[1 * i], [2 * i], [5 * i], [6 * i]],
[[3 * i], [4 * i], [7 * i], [8 * i]],
[[9 * i], [10 * i], [13 * i], [14 * i]],
[[11 * i], [12 * i], [15 * i], [16 * i]]]
def batch_output_elt(i):
return [[[1 * i, 2 * i, 3 * i, 4 * i], [5 * i, 6 * i, 7 * i, 8 * i]],
[[9 * i, 10 * i, 11 * i, 12 * i],
[13 * i, 14 * i, 15 * i, 16 * i]]]
batch_size = 10
x_np = [batch_input_elt(i) for i in range(batch_size)]
x_out = [batch_output_elt(i) for i in range(batch_size)]
self._testOne(x_np, block_size, x_out)
# Tests for different width and height.
def testNonSquare(self):
x_np = [[[[1, 10], [2, 20]], [[3, 30], [4, 40]], [[5, 50], [6, 60]],
[[7, 70], [8, 80]], [[9, 90], [10, 100]], [[11, 110], [12, 120]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40]], [[5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120]]]]
self._testOne(x_np, block_size, x_out)
# Error handling:
def testInputWrongDimMissingDepth(self):
# The input is missing the last dimension ("depth")
x_np = [[[1, 2], [3, 4]]]
block_size = 2
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testInputWrongDimMissingBatch(self):
# The input is missing the first dimension ("batch")
x_np = [[[1], [2]], [[3], [4]]]
block_size = 2
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
def testBlockSize0(self):
# The block size is 0.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 0
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeOne(self):
# The block size is 1. The block size needs to be > 1.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 1
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeLarger(self):
# The block size is too large for this input.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 10
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeNotDivisibleWidth(self):
# The block size divides width but not height.
x_np = [[[[1], [2], [3]], [[3], [4], [7]]]]
block_size = 3
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
def testBlockSizeNotDivisibleHeight(self):
# The block size divides height but not width.
x_np = [[[[1], [2]], [[3], [4]], [[5], [6]]]]
block_size = 3
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
def testBlockSizeNotDivisibleBoth(self):
# The block size does not divide neither width or height.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 3
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
def testUnknownShape(self):
t = array_ops.space_to_depth(
array_ops.placeholder(dtypes.float32), block_size=4)
self.assertEqual(4, t.get_shape().ndims)
class SpaceToDepthGradientTest(test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_size):
assert 4 == x.ndim
with self.test_session(use_gpu=True):
tf_x = ops.convert_to_tensor(x)
tf_y = array_ops.space_to_depth(tf_x, block_size)
epsilon = 1e-2
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
# Tests a gradient for space_to_depth of x which is a four dimensional
# tensor of shape [b, h * block_size, w * block_size, d].
def _compare(self, b, h, w, d, block_size):
block_size_sq = block_size * block_size
x = np.random.normal(0, 1, b * h * w * d *
block_size_sq).astype(np.float32).reshape(
[b, h * block_size, w * block_size, d])
self._checkGrad(x, block_size)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
def testSmall(self):
block_size = 2
self._compare(1, 2, 3, 5, block_size)
def testSmall2(self):
block_size = 2
self._compare(2, 4, 3, 2, block_size)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
jamesbulpin/xcp-xen-4.1
|
tools/xm-test/tests/network/13_network_domU_udp_pos.py
|
38
|
2376
|
#!/usr/bin/python
# Copyright (C) International Business Machines Corp., 2006
# Copyright (C) flonatel GmbH & Co. KG, 2009
# Authors: <[email protected]>
# Andreas Florath <[email protected]>
# UDP tests to domU interface
# - creates two guest domains
# - sets up a single NIC on each on same subnet
# - conducts udp tests to the domU IP address.
# hping2 $domU_IP -1 -c 7 -d $size
# where $size = 1, 48, 64, 512, 1440, 1500, 1505,
# 4096, 4192, 32767, 65507, 65508
pingsizes = [ 1, 48, 64, 512, 1440, 1500, 1505, 4096, 4192,
32767, 65495 ]
from XmTestLib import *
def netDomain():
dom = XmTestDomain()
dom.newDevice(XenNetDevice, "eth0")
try:
console = dom.start()
console.setHistorySaveCmds(value=True)
except DomainError, e:
if verbose:
print "Failed to create test domain because:"
print e.extra
FAIL(str(e))
return dom
fails = ""
# Test creates 2 domains, which requires 4 ips: 2 for the domains and 2 for
# aliases on dom0
if xmtest_netconf.canRunNetTest(4) == False:
SKIP("Don't have enough free configured IPs to run this test")
# Fire up a pair of guest domains w/1 nic each
guest1 = netDomain()
guest1_console = guest1.getConsole()
guest1_netdev = guest1.getDevice("eth0")
guest1_ip = guest1_netdev.getNetDevIP()
guest1_dom0_alias_ip = guest1_netdev.dom0_alias_ip
guest2 = netDomain()
guest2_console = guest2.getConsole()
guest2_netdev = guest2.getDevice("eth0")
guest2_ip = guest2_netdev.getNetDevIP()
guest2_dom0_alias_ip = guest2_netdev.dom0_alias_ip
def hping_cmd(ip, size):
return "hping2 " + ip + " -E /dev/urandom -1 -q " \
+ "-c 7 --fast -d " + str(size) + " -N " + str(size)
# Ping everything from guests
try:
for size in pingsizes:
for console in [(guest1_console, "Guest1Console"),
(guest2_console, "Guest2Console")]:
for dest_ip in [guest1_ip, guest1_dom0_alias_ip,
guest2_ip, guest2_dom0_alias_ip ]:
out = console[0].runCmd(hping_cmd(dest_ip, size))
if out["return"]:
fails += " [%d, %s, %s]" % (size, console[1], dest_ip)
except ConsoleError, e:
FAIL(str(e))
guest1.stop()
guest2.stop()
if len(fails):
FAIL("UDP hping2 failed for size" + fails + ".")
|
gpl-2.0
|
DerekRies/generator-angular-sublime
|
generator-angular.py
|
1
|
5121
|
import sublime
import sublime_plugin
import subprocess
import os
import errno
import threading
# from Queue import Queue, Empty
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class AngularCommand(sublime_plugin.WindowCommand):
"""
This plugin will take a file path for where the folder should be located
and the folder name
"""
def run(self):
# sublime.error_message("haha shits broke son")
user_path = os.path.expanduser("~")
if len(self.window.folders()) == 0:
self.window.show_input_panel("Application Name:", user_path+"/", self.on_done, None, None)
else:
self.run_yo(self.window.folders()[0])
def on_done(self, user_input):
if os.path.isdir(user_input) is False:
print("Creating dir")
mkdir_p(user_input)
os.chmod(user_input, 0777)
self.run_yo(user_input)
def run_yo(self, path):
thread = AngularAppGenerator(path)
thread.daemon = True
thread.start()
# print(os.getcwd())
class AngularAppGenerator(threading.Thread):
def __init__(self, path):
self.path = path
threading.Thread.__init__(self)
def run(self):
os.chdir(self.path)
# output = subprocess.Popen(["ls"], stdout=subprocess.PIPE).communicate()[0]
output = subprocess.Popen(["yo", "angular", "--minsafe"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# response = output.stdout.read()
print(output.communicate(input="Y\n"))
"""
Okay heres what needs to happen here
While the program is running:
We need to wait on a response from the yo angular command
Then we need to response accordingly
"""
print("thread done")
class ThreadedTaskRunner(threading.Thread):
def __init__(self, path, cmd):
self.path = path
self.cmd = cmd
threading.Thread.__init__(self)
def run(self):
os.chdir(self.path)
output = subprocess.Popen(self.cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
print(output.communicate()[0])
file_path = self.path + "/app/"
files = []
print(self.cmd[1])
if self.cmd[1] == "angular:route":
files.append(file_path + "views/{0}.html".format(self.cmd[2]))
files.append(file_path + "scripts/controllers/{0}.js".format(self.cmd[2]))
elif self.cmd[1] == "angular:controller":
files.append(file_path + "scripts/controllers/{0}.js".format(self.cmd[2]))
elif self.cmd[1] == "angular:service":
files.append(file_path + "scripts/services/{0}.js".format(self.cmd[2]))
elif self.cmd[1] == "angular:directive":
files.append(file_path + "scripts/directives/{0}.js".format(self.cmd[2]))
elif self.cmd[1] == "angular:filter":
files.append(file_path + "scripts/filters/{0}.js".format(self.cmd[2]))
for f in files:
sublime.windows()[0].open_file(f)
print("Angular Task Should Have Run")
class AngularRoute(sublime_plugin.WindowCommand):
def run(self):
self.window.show_input_panel("Route Name:", "", self.on_done, None, None)
def on_done(self, text):
thread = ThreadedTaskRunner(self.window.folders()[0], ["yo", "angular:route", text, "--minsafe"])
thread.start()
class AngularController(sublime_plugin.WindowCommand):
def run(self):
self.window.show_input_panel("Controller Name:", "", self.on_done, None, None)
def on_done(self, text):
thread = ThreadedTaskRunner(self.window.folders()[0], ["yo", "angular:controller", text, "--minsafe"])
thread.start()
class AngularDirective(sublime_plugin.WindowCommand):
def run(self):
self.window.show_input_panel("Directive Name:", "", self.on_done, None, None)
def on_done(self, text):
thread = ThreadedTaskRunner(self.window.folders()[0], ["yo", "angular:directive", text, "--minsafe"])
thread.start()
class AngularFilter(sublime_plugin.WindowCommand):
def run(self):
self.window.show_input_panel("Filter Name:", "", self.on_done, None, None)
def on_done(self, text):
thread = ThreadedTaskRunner(self.window.folders()[0], ["yo", "angular:filter", text, "--minsafe"])
thread.start()
class AngularService(sublime_plugin.WindowCommand):
def run(self):
self.window.show_input_panel("Service Name:", "", self.on_done, None, None)
def on_done(self, text):
thread = ThreadedTaskRunner(self.window.folders()[0], ["yo", "angular:service", text, "--minsafe"])
thread.start()
class AngularView(sublime_plugin.WindowCommand):
def run(self):
self.window.show_input_panel("View Name:", "", self.on_done, None, None)
def on_done(self, text):
thread = ThreadedTaskRunner(self.window.folders()[0], ["yo", "angular:view", text, "--minsafe"])
thread.start()
|
mit
|
kostyll/django-websocket-request-example
|
ws.py
|
2
|
1154
|
import os
import logging
from tornado import web, ioloop
from sockjs.tornado import SockJSRouter, SockJSConnection
# Set Django Environment
os.environ['DJANGO_SETTINGS_MODULE'] = 'wsrequest_example.settings'
from wsrequest import WebSocketRequest
logging.getLogger().setLevel(logging.INFO)
class IndexHandler(web.RequestHandler):
def get(self, url='/'):
self.render('templates/index.html')
class RESTAPIConnection(SockJSConnection):
def on_message(self, data):
logging.info(self.session.conn_info.ip)
logging.info(self.session.conn_info.headers)
request = WebSocketRequest(data)
response = request.get_response()
self.send({
'response': {
'url': request.get_url(),
'data': response.data
}
})
if __name__ == '__main__':
port = int(os.environ.get("PORT", 8080))
Router = SockJSRouter(RESTAPIConnection, '/ws/api')
app = web.Application(
[(r'/', IndexHandler)] + Router.urls
)
app.listen(port)
logging.info(' [*] Listening on 0.0.0.0:{}'.format(port))
ioloop.IOLoop.instance().start()
|
mit
|
mark-ignacio/phantomjs
|
src/qt/qtwebkit/Source/ThirdParty/gtest/test/gtest_throw_on_failure_test.py
|
2917
|
5766
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's throw-on-failure mode with exceptions disabled.
This script invokes gtest_throw_on_failure_test_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the gtest_throw_on_failure_test_ program, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_throw_on_failure_test_')
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
env_var = env_var.upper()
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns True/False if its exit code is/isn't 0."""
print 'Running "%s". . .' % ' '.join(command)
p = gtest_test_utils.Subprocess(command)
return p.exited and p.exit_code == 0
# The tests. TODO([email protected]): refactor the class to share common
# logic with code in gtest_break_on_failure_unittest.py.
class ThrowOnFailureTest(gtest_test_utils.TestCase):
"""Tests the throw-on-failure mode."""
def RunAndVerify(self, env_var_value, flag_value, should_fail):
"""Runs gtest_throw_on_failure_test_ and verifies that it does
(or does not) exit with a non-zero code.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
should_fail: True iff the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % THROW_ON_FAILURE
else:
flag = '--%s' % THROW_ON_FAILURE
command = [EXE_PATH]
if flag:
command.append(flag)
if should_fail:
should_or_not = 'should'
else:
should_or_not = 'should not'
failed = not Run(command)
SetEnvVar(THROW_ON_FAILURE, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
'exit code.' %
(THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(failed == should_fail, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False)
def testThrowOnFailureEnvVar(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value=None,
should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value=None,
flag_value='1',
should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='0',
flag_value='1',
should_fail=True)
self.RunAndVerify(env_var_value='1',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value='1',
should_fail=True)
if __name__ == '__main__':
gtest_test_utils.Main()
|
bsd-3-clause
|
cgar/servo
|
python/servo/package_commands.py
|
15
|
16950
|
# Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
from __future__ import print_function, unicode_literals
import sys
import os.path as path
sys.path.append(path.join(path.dirname(sys.argv[0]), "components", "style", "properties", "Mako-0.9.1.zip"))
import json
import os
import shutil
import subprocess
import mako.template
from mach.registrar import Registrar
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
from mako.template import Template
from servo.command_base import (
archive_deterministically,
BuildNotFound,
cd,
CommandBase,
is_macosx,
is_windows,
get_browserhtml_path,
)
from servo.command_base import find_dep_path_newest
def delete(path):
try:
os.remove(path) # Succeeds if path was a file
except OSError: # Or, if path was a directory...
shutil.rmtree(path) # Remove it and all its contents.
def otool(s):
o = subprocess.Popen(['/usr/bin/otool', '-L', s], stdout=subprocess.PIPE)
for l in o.stdout:
if l[0] == '\t':
yield l.split(' ', 1)[0][1:]
def listfiles(directory):
return [f for f in os.listdir(directory)
if path.isfile(path.join(directory, f))]
def install_name_tool(old, new, binary):
try:
subprocess.check_call(['install_name_tool', '-change', old, '@executable_path/' + new, binary])
except subprocess.CalledProcessError as e:
print("install_name_tool exited with return value %d" % e.returncode)
def is_system_library(lib):
return lib.startswith("/System/Library") or lib.startswith("/usr/lib")
def change_non_system_libraries_path(libraries, relative_path, binary):
for lib in libraries:
if is_system_library(lib):
continue
new_path = path.join(relative_path, path.basename(lib))
install_name_tool(lib, new_path, binary)
def copy_dependencies(binary_path, lib_path):
relative_path = path.relpath(lib_path, path.dirname(binary_path)) + "/"
# Update binary libraries
binary_dependencies = set(otool(binary_path))
change_non_system_libraries_path(binary_dependencies, relative_path, binary_path)
# Update dependencies libraries
need_checked = binary_dependencies
checked = set()
while need_checked:
checking = set(need_checked)
need_checked = set()
for f in checking:
# No need to check these for their dylibs
if is_system_library(f):
continue
need_relinked = set(otool(f))
new_path = path.join(lib_path, path.basename(f))
if not path.exists(new_path):
shutil.copyfile(f, new_path)
change_non_system_libraries_path(need_relinked, relative_path, new_path)
need_checked.update(need_relinked)
checked.update(checking)
need_checked.difference_update(checked)
def copy_windows_dependencies(binary_path, destination):
try:
[shutil.copy(path.join(binary_path, d), destination) for d in ["libeay32md.dll", "ssleay32md.dll"]]
except:
deps = [
"libstdc++-6.dll",
"libwinpthread-1.dll",
"libbz2-1.dll",
"libgcc_s_seh-1.dll",
"libexpat-1.dll",
"zlib1.dll",
"libiconv-2.dll",
"libintl-8.dll",
"libeay32.dll",
"ssleay32.dll",
]
for d in deps:
dep_path = path.join("C:\\msys64\\mingw64\\bin", d)
if path.exists(dep_path):
shutil.copy(dep_path, path.join(destination, d))
def change_prefs(resources_path, platform):
print("Swapping prefs")
prefs_path = path.join(resources_path, "prefs.json")
package_prefs_path = path.join(resources_path, "package-prefs.json")
os_type = "os:{}".format(platform)
with open(prefs_path) as prefs, open(package_prefs_path) as package_prefs:
prefs = json.load(prefs)
package_prefs = json.load(package_prefs)
for pref in package_prefs:
if os_type in pref:
prefs[pref.split(";")[1]] = package_prefs[pref]
if pref in prefs:
prefs[pref] = package_prefs[pref]
with open(prefs_path, "w") as out:
json.dump(prefs, out, sort_keys=True, indent=2)
delete(package_prefs_path)
@CommandProvider
class PackageCommands(CommandBase):
@Command('package',
description='Package Servo',
category='package')
@CommandArgument('--release', '-r', action='store_true',
help='Package the release build')
@CommandArgument('--dev', '-d', action='store_true',
help='Package the dev build')
@CommandArgument('--android',
default=None,
action='store_true',
help='Package Android')
def package(self, release=False, dev=False, android=None, debug=False, debugger=None):
env = self.build_env()
if android is None:
android = self.config["build"]["android"]
binary_path = self.get_binary_path(release, dev, android=android)
dir_to_root = self.get_top_dir()
target_dir = path.dirname(binary_path)
if android:
if dev:
env["NDK_DEBUG"] = "1"
env["ANT_FLAVOR"] = "debug"
dev_flag = "-d"
else:
env["ANT_FLAVOR"] = "release"
dev_flag = ""
output_apk = "{}.apk".format(binary_path)
dir_to_apk = path.join(target_dir, "apk")
if path.exists(dir_to_apk):
print("Cleaning up from previous packaging")
delete(dir_to_apk)
shutil.copytree(path.join(dir_to_root, "support", "android", "apk"), dir_to_apk)
blurdroid_path = find_dep_path_newest('blurdroid', binary_path)
if blurdroid_path is None:
print("Could not find blurdroid package; perhaps you haven't built Servo.")
return 1
else:
dir_to_libs = path.join(dir_to_apk, "libs")
if not path.exists(dir_to_libs):
os.makedirs(dir_to_libs)
shutil.copy2(blurdroid_path + '/out/blurdroid.jar', dir_to_libs)
try:
with cd(path.join("support", "android", "build-apk")):
subprocess.check_call(["cargo", "run", "--", dev_flag, "-o", output_apk, "-t", target_dir,
"-r", dir_to_root], env=env)
except subprocess.CalledProcessError as e:
print("Packaging Android exited with return value %d" % e.returncode)
return e.returncode
elif is_macosx():
print("Creating Servo.app")
dir_to_dmg = path.join(target_dir, 'dmg')
dir_to_app = path.join(dir_to_dmg, 'Servo.app')
dir_to_resources = path.join(dir_to_app, 'Contents', 'Resources')
if path.exists(dir_to_dmg):
print("Cleaning up from previous packaging")
delete(dir_to_dmg)
browserhtml_path = get_browserhtml_path(binary_path)
print("Copying files")
shutil.copytree(path.join(dir_to_root, 'resources'), dir_to_resources)
shutil.copytree(browserhtml_path, path.join(dir_to_resources, 'browserhtml'))
shutil.copy2(path.join(dir_to_root, 'Info.plist'), path.join(dir_to_app, 'Contents', 'Info.plist'))
content_dir = path.join(dir_to_app, 'Contents', 'MacOS')
os.makedirs(content_dir)
shutil.copy2(binary_path, content_dir)
change_prefs(dir_to_resources, "macosx")
print("Finding dylibs and relinking")
copy_dependencies(path.join(content_dir, 'servo'), content_dir)
print("Adding version to Credits.rtf")
version_command = [binary_path, '--version']
p = subprocess.Popen(version_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
version, stderr = p.communicate()
if p.returncode != 0:
raise Exception("Error occurred when getting Servo version: " + stderr)
version = "Nightly version: " + version
template_path = path.join(dir_to_resources, 'Credits.rtf.mako')
credits_path = path.join(dir_to_resources, 'Credits.rtf')
with open(template_path) as template_file:
template = mako.template.Template(template_file.read())
with open(credits_path, "w") as credits_file:
credits_file.write(template.render(version=version))
delete(template_path)
print("Writing run-servo")
bhtml_path = path.join('${0%/*}', '..', 'Resources', 'browserhtml', 'index.html')
runservo = os.open(
path.join(content_dir, 'run-servo'),
os.O_WRONLY | os.O_CREAT,
int("0755", 8)
)
os.write(runservo, '#!/bin/bash\nexec ${0%/*}/servo ' + bhtml_path)
os.close(runservo)
print("Creating dmg")
os.symlink('/Applications', path.join(dir_to_dmg, 'Applications'))
dmg_path = path.join(target_dir, "servo-tech-demo.dmg")
try:
subprocess.check_call(['hdiutil', 'create', '-volname', 'Servo', dmg_path, '-srcfolder', dir_to_dmg])
except subprocess.CalledProcessError as e:
print("Packaging MacOS dmg exited with return value %d" % e.returncode)
return e.returncode
print("Cleaning up")
delete(dir_to_dmg)
print("Packaged Servo into " + dmg_path)
print("Creating brew package")
dir_to_brew = path.join(target_dir, 'brew_tmp')
dir_to_tar = path.join(target_dir, 'brew')
if not path.exists(dir_to_tar):
os.makedirs(dir_to_tar)
tar_path = path.join(dir_to_tar, "servo.tar.gz")
if path.exists(dir_to_brew):
print("Cleaning up from previous packaging")
delete(dir_to_brew)
if path.exists(tar_path):
print("Deleting existing package")
os.remove(tar_path)
shutil.copytree(path.join(dir_to_root, 'resources'), path.join(dir_to_brew, 'resources'))
os.makedirs(path.join(dir_to_brew, 'bin'))
shutil.copy2(binary_path, path.join(dir_to_brew, 'bin', 'servo'))
# Note that in the context of Homebrew, libexec is reserved for private use by the formula
# and therefore is not symlinked into HOMEBREW_PREFIX.
os.makedirs(path.join(dir_to_brew, 'libexec'))
copy_dependencies(path.join(dir_to_brew, 'bin', 'servo'), path.join(dir_to_brew, 'libexec'))
archive_deterministically(dir_to_brew, tar_path, prepend_path='servo/')
delete(dir_to_brew)
print("Packaged Servo into " + tar_path)
elif is_windows():
dir_to_msi = path.join(target_dir, 'msi')
if path.exists(dir_to_msi):
print("Cleaning up from previous packaging")
delete(dir_to_msi)
os.makedirs(dir_to_msi)
browserhtml_path = get_browserhtml_path(binary_path)
print("Copying files")
dir_to_temp = path.join(dir_to_msi, 'temp')
dir_to_temp_servo = path.join(dir_to_temp, 'servo')
dir_to_resources = path.join(dir_to_temp_servo, 'resources')
shutil.copytree(path.join(dir_to_root, 'resources'), dir_to_resources)
shutil.copytree(browserhtml_path, path.join(dir_to_temp_servo, 'browserhtml'))
shutil.copy(binary_path, dir_to_temp_servo)
shutil.copy("{}.manifest".format(binary_path), dir_to_temp_servo)
copy_windows_dependencies(target_dir, dir_to_temp_servo)
change_prefs(dir_to_resources, "windows")
# generate Servo.wxs
template_path = path.join(dir_to_root, "support", "windows", "Servo.wxs.mako")
template = Template(open(template_path).read())
wxs_path = path.join(dir_to_msi, "Servo.wxs")
open(wxs_path, "w").write(template.render(
exe_path=target_dir,
dir_to_temp=dir_to_temp_servo,
resources_path=dir_to_resources))
# run candle and light
print("Creating MSI")
try:
with cd(dir_to_msi):
subprocess.check_call(['candle', wxs_path])
except subprocess.CalledProcessError as e:
print("WiX candle exited with return value %d" % e.returncode)
return e.returncode
try:
wxsobj_path = "{}.wixobj".format(path.splitext(wxs_path)[0])
with cd(dir_to_msi):
subprocess.check_call(['light', wxsobj_path])
except subprocess.CalledProcessError as e:
print("WiX light exited with return value %d" % e.returncode)
return e.returncode
print("Packaged Servo into " + path.join(dir_to_msi, "Servo.msi"))
print("Creating ZIP")
shutil.make_archive(path.join(dir_to_msi, "Servo"), "zip", dir_to_temp)
print("Packaged Servo into " + path.join(dir_to_msi, "Servo.zip"))
print("Cleaning up")
delete(dir_to_temp)
else:
dir_to_temp = path.join(target_dir, 'packaging-temp')
browserhtml_path = get_browserhtml_path(binary_path)
if path.exists(dir_to_temp):
# TODO(aneeshusa): lock dir_to_temp to prevent simultaneous builds
print("Cleaning up from previous packaging")
delete(dir_to_temp)
print("Copying files")
dir_to_resources = path.join(dir_to_temp, 'resources')
shutil.copytree(path.join(dir_to_root, 'resources'), dir_to_resources)
shutil.copytree(browserhtml_path, path.join(dir_to_temp, 'browserhtml'))
shutil.copy(binary_path, dir_to_temp)
change_prefs(dir_to_resources, "linux")
print("Creating tarball")
tar_path = path.join(target_dir, 'servo-tech-demo.tar.gz')
archive_deterministically(dir_to_temp, tar_path, prepend_path='servo/')
print("Cleaning up")
delete(dir_to_temp)
print("Packaged Servo into " + tar_path)
@Command('install',
description='Install Servo (currently, Android and Windows only)',
category='package')
@CommandArgument('--release', '-r', action='store_true',
help='Install the release build')
@CommandArgument('--dev', '-d', action='store_true',
help='Install the dev build')
@CommandArgument('--android',
action='store_true',
help='Install on Android')
def install(self, release=False, dev=False, android=False):
try:
binary_path = self.get_binary_path(release, dev, android=android)
except BuildNotFound:
print("Servo build not found. Building servo...")
result = Registrar.dispatch(
"build", context=self.context, release=release, dev=dev, android=android
)
if result:
return result
try:
binary_path = self.get_binary_path(release, dev, android=android)
except BuildNotFound:
print("Rebuilding Servo did not solve the missing build problem.")
return 1
if android:
pkg_path = binary_path + ".apk"
exec_command = ["adb", "install", "-r", pkg_path]
elif is_windows():
pkg_path = path.join(path.dirname(binary_path), 'msi', 'Servo.msi')
exec_command = ["msiexec", "/i", pkg_path]
if not path.exists(pkg_path):
result = Registrar.dispatch(
"package", context=self.context, release=release, dev=dev, android=android
)
if result != 0:
return result
print(" ".join(exec_command))
return subprocess.call(exec_command, env=self.build_env())
|
mpl-2.0
|
rudametw/rudametw.github.io
|
src/teaching/ima2a4/print_markdown_code.py
|
2
|
1309
|
#!/usr/bin/env python
# -*-coding:utf-8 -*
"""
Generate output for markdown needed for website file links.
"""
import glob, os
from os import listdir
from os.path import isfile, join
onlyfiles = [f for f in listdir(".") if isfile(join(".", f))]
print (onlyfiles)
os.chdir(".")
for file in glob.glob("*.pdf"):
print(file)
print ("")
print ("")
print ("")
#for file in sorted(glob.glob("*.pdf"), key=os.path.getsize):
for file in sorted(glob.glob("*.pdf")):
print("["+file+"](ima2a4/"+file+") | ")
#def cleanup(templates=template_files):
#for tmplt in templates:
#print ("Cleaning up temp files for "+tmplt+ "...")
#subprocess.call("rm " + tmplt + ".log", shell=True)
#subprocess.call("rm " + tmplt + ".aux", shell=True)
#def getFileName(pdf_name):
#file_name = (os.path.splitext(os.path.basename(pdf_name))[0])
##print ("Filename: " + file_name)
#return file_name
##print (os.path.basename(pdf_name))
##print (os.path.splitext(pdf_name)[1])
#def renamePDF(source, dest, ending):
#cmd = "mv " + source+".pdf " + dest+"-"+ending + ".pdf"
#print (" Moving file: "+cmd)
#subprocess.call(cmd , shell=True)
#"""
#MAIN
#"""
##compile_pdfs(templates[0],pdf_files)
#compile_pdfs()
##print ("")
#cleanup()
##print ("The end...")
|
gpl-3.0
|
mattnenterprise/servo
|
tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_serializer.py
|
12
|
4579
|
import sys
import unittest
from cStringIO import StringIO
import pytest
from .. import parser, serializer
class TokenizerTest(unittest.TestCase):
def setUp(self):
self.serializer = serializer.ManifestSerializer()
self.parser = parser.Parser()
def serialize(self, input_str):
return self.serializer.serialize(self.parser.parse(input_str))
def compare(self, input_str, expected=None):
if expected is None:
expected = input_str
expected = expected.encode("utf8")
actual = self.serialize(input_str)
self.assertEquals(actual, expected)
def test_0(self):
self.compare("""key: value
[Heading 1]
other_key: other_value
""")
def test_1(self):
self.compare("""key: value
[Heading 1]
other_key:
if a or b: other_value
""")
def test_2(self):
self.compare("""key: value
[Heading 1]
other_key:
if a or b: other_value
fallback_value
""")
def test_3(self):
self.compare("""key: value
[Heading 1]
other_key:
if a == 1: other_value
fallback_value
""")
def test_4(self):
self.compare("""key: value
[Heading 1]
other_key:
if a == "1": other_value
fallback_value
""")
def test_5(self):
self.compare("""key: value
[Heading 1]
other_key:
if a == "abc"[1]: other_value
fallback_value
""")
def test_6(self):
self.compare("""key: value
[Heading 1]
other_key:
if a == "abc"[c]: other_value
fallback_value
""")
def test_7(self):
self.compare("""key: value
[Heading 1]
other_key:
if (a or b) and c: other_value
fallback_value
""",
"""key: value
[Heading 1]
other_key:
if a or b and c: other_value
fallback_value
""")
def test_8(self):
self.compare("""key: value
[Heading 1]
other_key:
if a or (b and c): other_value
fallback_value
""")
def test_9(self):
self.compare("""key: value
[Heading 1]
other_key:
if not (a and b): other_value
fallback_value
""")
def test_10(self):
self.compare("""key: value
[Heading 1]
some_key: some_value
[Heading 2]
other_key: other_value
""")
def test_11(self):
self.compare("""key:
if not a and b and c and d: true
""")
def test_12(self):
self.compare("""[Heading 1]
key: [a:1, b:2]
""")
def test_13(self):
self.compare("""key: [a:1, "b:#"]
""")
def test_14(self):
self.compare("""key: [","]
""")
def test_15(self):
self.compare("""key: ,
""")
def test_16(self):
self.compare("""key: ["]", b]
""")
def test_17(self):
self.compare("""key: ]
""")
def test_18(self):
self.compare("""key: \]
""", """key: ]
""")
def test_escape_0(self):
self.compare(r"""k\t\:y: \a\b\f\n\r\t\v""",
r"""k\t\:y: \x07\x08\x0c\n\r\t\x0b
""")
def test_escape_1(self):
self.compare(r"""k\x00: \x12A\x45""",
r"""k\x00: \x12AE
""")
def test_escape_2(self):
self.compare(r"""k\u0045y: \u1234A\uABc6""",
u"""kEy: \u1234A\uabc6
""")
def test_escape_3(self):
self.compare(r"""k\u0045y: \u1234A\uABc6""",
u"""kEy: \u1234A\uabc6
""")
def test_escape_4(self):
self.compare(r"""key: '\u1234A\uABc6'""",
u"""key: \u1234A\uabc6
""")
def test_escape_5(self):
self.compare(r"""key: [\u1234A\uABc6]""",
u"""key: [\u1234A\uabc6]
""")
def test_escape_6(self):
self.compare(r"""key: [\u1234A\uABc6\,]""",
u"""key: ["\u1234A\uabc6,"]
""")
def test_escape_7(self):
self.compare(r"""key: [\,\]\#]""",
r"""key: [",]#"]
""")
def test_escape_8(self):
self.compare(r"""key: \#""",
r"""key: "#"
""")
@pytest.mark.xfail(sys.maxunicode == 0xFFFF, reason="narrow unicode")
def test_escape_9(self):
self.compare(r"""key: \U10FFFFabc""",
u"""key: \U0010FFFFabc
""")
def test_escape_10(self):
self.compare(r"""key: \u10FFab""",
u"""key: \u10FFab
""")
def test_escape_11(self):
self.compare(r"""key: \\ab
""")
def test_atom_1(self):
self.compare(r"""key: @True
""")
def test_atom_2(self):
self.compare(r"""key: @False
""")
def test_atom_3(self):
self.compare(r"""key: @Reset
""")
def test_atom_4(self):
self.compare(r"""key: [a, @Reset, b]
""")
|
mpl-2.0
|
KNMI/VERCE
|
verce-hpc-pe/src/networkx/algorithms/tests/test_swap.py
|
32
|
1260
|
#!/usr/bin/env python
from nose.tools import *
from networkx import *
def test_double_edge_swap():
graph = barabasi_albert_graph(200,1)
degrees = sorted(graph.degree().values())
G = double_edge_swap(graph, 40)
assert_equal(degrees, sorted(graph.degree().values()))
def test_connected_double_edge_swap():
graph = barabasi_albert_graph(200,1)
degrees = sorted(graph.degree().values())
G = connected_double_edge_swap(graph, 40)
assert_true(is_connected(graph))
assert_equal(degrees, sorted(graph.degree().values()))
@raises(NetworkXError)
def test_double_edge_swap_small():
G = nx.double_edge_swap(nx.path_graph(3))
@raises(NetworkXError)
def test_double_edge_swap_tries():
G = nx.double_edge_swap(nx.path_graph(10),nswap=1,max_tries=0)
@raises(NetworkXError)
def test_connected_double_edge_swap_small():
G = nx.connected_double_edge_swap(nx.path_graph(3))
@raises(NetworkXError)
def test_connected_double_edge_swap_not_connected():
G = nx.path_graph(3)
G.add_path([10,11,12])
G = nx.connected_double_edge_swap(G)
def test_degree_seq_c4():
G = cycle_graph(4)
degrees = sorted(G.degree().values())
G = double_edge_swap(G,1,100)
assert_equal(degrees, sorted(G.degree().values()))
|
mit
|
yitian134/chromium
|
third_party/tlslite/tlslite/utils/compat.py
|
361
|
4060
|
"""Miscellaneous functions to mask Python version differences."""
import sys
import os
if sys.version_info < (2,2):
raise AssertionError("Python 2.2 or later required")
if sys.version_info < (2,3):
def enumerate(collection):
return zip(range(len(collection)), collection)
class Set:
def __init__(self, seq=None):
self.values = {}
if seq:
for e in seq:
self.values[e] = None
def add(self, e):
self.values[e] = None
def discard(self, e):
if e in self.values.keys():
del(self.values[e])
def union(self, s):
ret = Set()
for e in self.values.keys():
ret.values[e] = None
for e in s.values.keys():
ret.values[e] = None
return ret
def issubset(self, other):
for e in self.values.keys():
if e not in other.values.keys():
return False
return True
def __nonzero__( self):
return len(self.values.keys())
def __contains__(self, e):
return e in self.values.keys()
def __iter__(self):
return iter(set.values.keys())
if os.name != "java":
import array
def createByteArraySequence(seq):
return array.array('B', seq)
def createByteArrayZeros(howMany):
return array.array('B', [0] * howMany)
def concatArrays(a1, a2):
return a1+a2
def bytesToString(bytes):
return bytes.tostring()
def stringToBytes(s):
bytes = createByteArrayZeros(0)
bytes.fromstring(s)
return bytes
import math
def numBits(n):
if n==0:
return 0
s = "%x" % n
return ((len(s)-1)*4) + \
{'0':0, '1':1, '2':2, '3':2,
'4':3, '5':3, '6':3, '7':3,
'8':4, '9':4, 'a':4, 'b':4,
'c':4, 'd':4, 'e':4, 'f':4,
}[s[0]]
return int(math.floor(math.log(n, 2))+1)
BaseException = Exception
import sys
import traceback
def formatExceptionTrace(e):
newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
return newStr
else:
#Jython 2.1 is missing lots of python 2.3 stuff,
#which we have to emulate here:
#NOTE: JYTHON SUPPORT NO LONGER WORKS, DUE TO USE OF GENERATORS.
#THIS CODE IS LEFT IN SO THAT ONE JYTHON UPDATES TO 2.2, IT HAS A
#CHANCE OF WORKING AGAIN.
import java
import jarray
def createByteArraySequence(seq):
if isinstance(seq, type("")): #If it's a string, convert
seq = [ord(c) for c in seq]
return jarray.array(seq, 'h') #use short instead of bytes, cause bytes are signed
def createByteArrayZeros(howMany):
return jarray.zeros(howMany, 'h') #use short instead of bytes, cause bytes are signed
def concatArrays(a1, a2):
l = list(a1)+list(a2)
return createByteArraySequence(l)
#WAY TOO SLOW - MUST BE REPLACED------------
def bytesToString(bytes):
return "".join([chr(b) for b in bytes])
def stringToBytes(s):
bytes = createByteArrayZeros(len(s))
for count, c in enumerate(s):
bytes[count] = ord(c)
return bytes
#WAY TOO SLOW - MUST BE REPLACED------------
def numBits(n):
if n==0:
return 0
n= 1L * n; #convert to long, if it isn't already
return n.__tojava__(java.math.BigInteger).bitLength()
#Adjust the string to an array of bytes
def stringToJavaByteArray(s):
bytes = jarray.zeros(len(s), 'b')
for count, c in enumerate(s):
x = ord(c)
if x >= 128: x -= 256
bytes[count] = x
return bytes
BaseException = java.lang.Exception
import sys
import traceback
def formatExceptionTrace(e):
newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
return newStr
|
bsd-3-clause
|
curaloucura/money-forecast
|
moneyforecast/tests/records/fixtures.py
|
1
|
5073
|
import pytest
from datetime import date, datetime
from dateutil.relativedelta import relativedelta
from django.contrib.auth.models import User
from records.models import (
Category, Record, Budget, OUTCOME, INCOME, SAVINGS, tmz)
from records.month_control import MonthControl, MonthControlWithBudget
@pytest.fixture
def current_date():
today = date.today()
today_datetime = datetime(
day=today.day, month=today.month, year=today.year)
return tmz(today_datetime)
@pytest.fixture
def future_date(current_date):
date = current_date+relativedelta(days=1)
return date
@pytest.fixture
def day_of_month(future_date):
return future_date.day
@pytest.fixture
def start_of_recurrence(future_date):
"""
Date object representing the first day of a record with recurrence
"""
return future_date
@pytest.fixture
def end_of_recurrence(future_date):
"""
Return a date which is used to determine the end month the recurrence
should occur
"""
date = future_date+relativedelta(months=6)
return date
@pytest.fixture
def next_month(current_date):
date = current_date+relativedelta(months=1)
return date
@pytest.fixture
def next_month_future(future_date):
date = future_date+relativedelta(months=1)
return date
@pytest.fixture
def infinite_future_date(current_date):
date = current_date+relativedelta(years=360)
return date
@pytest.fixture
def month_control(user, current_date):
"""
Return a MonthControl object for the current date.
Important: currently any Record fixture should come before month_control
"""
month_control = MonthControl(
user, current_date.month, current_date.year, cache={})
return month_control
@pytest.fixture
def month_control_with_budget(user, current_date):
"""
Return a MonthControlWithBudget object for the current date.
Important: currently any Record fixture should come before month_control
"""
month_control = MonthControlWithBudget(
user, current_date.month, current_date.year, cache={})
return month_control
def _user(username='test_user'):
raw_password = "fake"
new_user = User.objects.create_user(
username=username, email="[email protected]", password=raw_password)
setattr(new_user, "raw_password", raw_password)
return new_user
@pytest.fixture
def user():
return _user()
@pytest.fixture
def another_user():
return _user('another_user')
@pytest.fixture
def outcome(user):
"""
Main category of outcome type
"""
category = Category.objects.create(
name="outcome", type_category=OUTCOME, user=user)
return category
@pytest.fixture
def income(user):
"""
Main category of income type
"""
category = Category.objects.create(
name="income", type_category=INCOME, user=user)
return category
@pytest.fixture
def savings(user):
"""
Category of Savings
"""
category = Category.objects.create(
name="savings", type_category=SAVINGS, user=user)
return category
@pytest.fixture
def outcome_current(user, outcome, current_date):
"""
Record of type Outcome set to today (current date)
"""
record = Record.objects.create(
category=outcome, amount=1, start_date=current_date, user=user)
return record
@pytest.fixture
def outcome_future(user, outcome, future_date):
"""
Record of type Outcome set in the future
"""
record = Record.objects.create(
category=outcome, amount=1, start_date=future_date, user=user)
return record
@pytest.fixture
def outcome_recurrent(user, outcome, start_of_recurrence):
"""
Record of type Outcome set in the future with a day of the month set
to create a recurring record
This fixture should not be used with outcome_recurrent_limited and
outcome_with_parent since they change the instance of this own record
"""
record = Record.objects.create(
category=outcome, amount=1, start_date=start_of_recurrence, user=user,
day_of_month=start_of_recurrence.day)
return record
@pytest.fixture
def outcome_recurrent_limited(user, outcome_recurrent, end_of_recurrence):
"""
Record of type Outcome set in the future with a recurring day of the month
set and limited to a certain time
"""
outcome_recurrent.end_date = end_of_recurrence
outcome_recurrent.save()
return outcome_recurrent
@pytest.fixture
def outcome_with_parent(
outcome_future, outcome_recurrent, next_month_future):
outcome_future.parent = outcome_recurrent
outcome_future.start_date = next_month_future
outcome_future.save()
return outcome_future
@pytest.fixture
def savings_current(request, user, savings, current_date):
"""
Record of type Outcome set in the future
"""
record = Record.objects.create(
category=savings, amount=1, start_date=current_date, user=user)
return record
@pytest.fixture
def budget(user):
budget = Budget.objects.create(user=user, amount=1)
return budget
|
unlicense
|
ozburo/youtube-dl
|
youtube_dl/extractor/springboardplatform.py
|
28
|
4242
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
xpath_attr,
xpath_text,
xpath_element,
unescapeHTML,
unified_timestamp,
)
class SpringboardPlatformIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
cms\.springboardplatform\.com/
(?:
(?:previews|embed_iframe)/(?P<index>\d+)/video/(?P<id>\d+)|
xml_feeds_advanced/index/(?P<index_2>\d+)/rss3/(?P<id_2>\d+)
)
'''
_TESTS = [{
'url': 'http://cms.springboardplatform.com/previews/159/video/981017/0/0/1',
'md5': '5c3cb7b5c55740d482561099e920f192',
'info_dict': {
'id': '981017',
'ext': 'mp4',
'title': 'Redman "BUD like YOU" "Usher Good Kisser" REMIX',
'description': 'Redman "BUD like YOU" "Usher Good Kisser" REMIX',
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1409132328,
'upload_date': '20140827',
'duration': 193,
},
}, {
'url': 'http://cms.springboardplatform.com/embed_iframe/159/video/981017/rab007/rapbasement.com/1/1',
'only_matching': True,
}, {
'url': 'http://cms.springboardplatform.com/embed_iframe/20/video/1731611/ki055/kidzworld.com/10',
'only_matching': True,
}, {
'url': 'http://cms.springboardplatform.com/xml_feeds_advanced/index/159/rss3/981017/0/0/1/',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
return [
mobj.group('url')
for mobj in re.finditer(
r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//cms\.springboardplatform\.com/embed_iframe/\d+/video/\d+.*?)\1',
webpage)]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id') or mobj.group('id_2')
index = mobj.group('index') or mobj.group('index_2')
video = self._download_xml(
'http://cms.springboardplatform.com/xml_feeds_advanced/index/%s/rss3/%s'
% (index, video_id), video_id)
item = xpath_element(video, './/item', 'item', fatal=True)
content = xpath_element(
item, './{http://search.yahoo.com/mrss/}content', 'content',
fatal=True)
title = unescapeHTML(xpath_text(item, './title', 'title', fatal=True))
video_url = content.attrib['url']
if 'error_video.mp4' in video_url:
raise ExtractorError(
'Video %s no longer exists' % video_id, expected=True)
duration = int_or_none(content.get('duration'))
tbr = int_or_none(content.get('bitrate'))
filesize = int_or_none(content.get('fileSize'))
width = int_or_none(content.get('width'))
height = int_or_none(content.get('height'))
description = unescapeHTML(xpath_text(
item, './description', 'description'))
thumbnail = xpath_attr(
item, './{http://search.yahoo.com/mrss/}thumbnail', 'url',
'thumbnail')
timestamp = unified_timestamp(xpath_text(
item, './{http://cms.springboardplatform.com/namespaces.html}created',
'timestamp'))
formats = [{
'url': video_url,
'format_id': 'http',
'tbr': tbr,
'filesize': filesize,
'width': width,
'height': height,
}]
m3u8_format = formats[0].copy()
m3u8_format.update({
'url': re.sub(r'(https?://)cdn\.', r'\1hls.', video_url) + '.m3u8',
'ext': 'mp4',
'format_id': 'hls',
'protocol': 'm3u8_native',
})
formats.append(m3u8_format)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'formats': formats,
}
|
unlicense
|
PmagPy/PmagPy
|
programs/grab_magic_key.py
|
2
|
1162
|
#!/usr/bin/env python
import sys
import pmagpy.pmag as pmag
def main():
"""
NAME
grab_magic_key.py
DESCRIPTION
picks out key and saves to file
SYNTAX
grab_magic_key.py [command line optins]
OPTIONS
-h prints help message and quits
-f FILE: specify input magic format file
-key KEY: specify key to print to standard output
"""
dir_path = "./"
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path = sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind = sys.argv.index('-f')
magic_file = dir_path+'/'+sys.argv[ind+1]
else:
print(main.__doc__)
sys.exit()
if '-key' in sys.argv:
ind = sys.argv.index('-key')
grab_key = sys.argv[ind+1]
else:
print(main.__doc__)
sys.exit()
#
#
# get data read in
Data, file_type = pmag.magic_read(magic_file)
if len(Data) > 0:
for rec in Data:
print(rec[grab_key])
else:
print('bad file name')
if __name__ == "__main__":
main()
|
bsd-3-clause
|
HewlettPackard/oneview-ansible
|
test/test_image_streamer_deployment_plan.py
|
1
|
5438
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2020) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import mock
import pytest
from hpe_test_utils import ImageStreamerBaseTest
from oneview_module_loader import DeploymentPlanModule
FAKE_MSG_ERROR = 'Fake message error'
PARAMS_CREATE = dict(
config='{{ config }}',
state='present',
data=dict(
description='Description of this Deployment Plan',
name='Demo Deployment Plan',
hpProvided='false',
oeBuildPlanName='Demo Build Plan'
)
)
PARAMS_FOR_PRESENT = dict(
config='config.json',
state='present',
data=dict(
name="Deployment Plan name",
uri="/rest/deployment-plans/d1c7b09a-6c7b-4ae0-b68e-ed208ccde1b0"
)
)
PARAMS_UPDATE = dict(
config='config.json',
state='present',
data=dict(
name='Demo Deployment Plan',
newName='Demo Deployment Plan (changed)',
description='New description'
)
)
PARAMS_DELETE = dict(
config='config.json',
state='absent',
data=dict(
name='Demo Deployment Plan'
)
)
@pytest.mark.resource(TestDeploymentPlanModule='deployment_plans')
class TestDeploymentPlanModule(ImageStreamerBaseTest):
"""
ImageStreamerBaseTest has common tests for main function,
also provides the mocks used in this test case
"""
@pytest.fixture(autouse=True)
def specific_set_up(self):
self.DEPLOYMENT_PLAN = mock.Mock()
self.DEPLOYMENT_PLAN.data = dict(
name="Deployment Plan name",
uri="/rest/deployment-plans/d1c7b09a-6c7b-4ae0-b68e-ed208ccde1b0")
def test_create_new_deployment_plan(self):
self.resource.get_by_name.return_value = []
self.mock_ov_client.build_plans.get_by.return_value = [{'uri': '/rest/build-plans/1'}]
self.resource.data = {"name": "name"}
self.resource.create.return_value = self.resource
self.mock_ansible_module.params = PARAMS_CREATE
DeploymentPlanModule().run()
self.resource.create.assert_called_once_with(
{'oeBuildPlanURI': '/rest/build-plans/1',
'hpProvided': 'false',
'description': 'Description of this Deployment Plan',
'name': 'Demo Deployment Plan'}
)
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=DeploymentPlanModule.MSG_CREATED,
ansible_facts=dict(deployment_plan={"name": "name"})
)
def test_update_deployment_plan(self):
self.resource.get_by_name.return_value = self.DEPLOYMENT_PLAN
self.resource.data = {"name": "name"}
self.resource.update.return_value = self.resource
self.mock_ansible_module.params = PARAMS_UPDATE
DeploymentPlanModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=DeploymentPlanModule.MSG_UPDATED,
ansible_facts=dict(deployment_plan=self.DEPLOYMENT_PLAN.data)
)
def test_should_not_update_when_data_is_equals(self):
self.resource.get_by_name.return_value = self.DEPLOYMENT_PLAN
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
DeploymentPlanModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=DeploymentPlanModule.MSG_ALREADY_PRESENT,
ansible_facts=dict(deployment_plan=self.DEPLOYMENT_PLAN.data)
)
def test_delete_deployment_plan(self):
self.resource.get_by_name.return_value = self.DEPLOYMENT_PLAN
self.mock_ansible_module.params = PARAMS_DELETE
DeploymentPlanModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=DeploymentPlanModule.MSG_DELETED
)
def test_should_do_nothing_when_deleting_a_non_existent_deployment_plan(self):
self.resource.get_by_name.return_value = []
self.mock_ansible_module.params = PARAMS_DELETE
DeploymentPlanModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=DeploymentPlanModule.MSG_ALREADY_ABSENT
)
def test_should_fail_when_build_plan_not_found(self):
self.resource.get_by_name.return_value = []
self.mock_ov_client.build_plans.get_by.return_value = None
del PARAMS_CREATE['data']['oeBuildPlanURI']
PARAMS_CREATE['data']['oeBuildPlanName'] = 'Demo Build Plan'
self.mock_ansible_module.params = PARAMS_CREATE
DeploymentPlanModule().run()
self.mock_ansible_module.fail_json.assert_called_once_with(
exception=mock.ANY,
msg=DeploymentPlanModule.MSG_BUILD_PLAN_WAS_NOT_FOUND
)
if __name__ == '__main__':
pytest.main([__file__])
|
apache-2.0
|
strongme/shadowsocks-manyuser
|
shadowsocks/encrypt.py
|
31
|
7691
|
#!/usr/bin/env python
# Copyright (c) 2014 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import hashlib
import string
import struct
import logging
import encrypt_salsa20
import encrypt_rc4_md5
def random_string(length):
try:
import M2Crypto.Rand
return M2Crypto.Rand.rand_bytes(length)
except ImportError:
# TODO really strong enough on Linux?
return os.urandom(length)
cached_tables = {}
cached_keys = {}
def get_table(key):
m = hashlib.md5()
m.update(key)
s = m.digest()
(a, b) = struct.unpack('<QQ', s)
table = [c for c in string.maketrans('', '')]
for i in xrange(1, 1024):
table.sort(lambda x, y: int(a % (ord(x) + i) - a % (ord(y) + i)))
return table
def init_table(key, method=None):
if method is not None and method == 'table':
method = None
if method:
try:
__import__('M2Crypto')
except ImportError:
logging.error(('M2Crypto is required to use %s, please run'
' `apt-get install python-m2crypto`') % method)
sys.exit(1)
if not method:
if key in cached_tables:
return cached_tables[key]
encrypt_table = ''.join(get_table(key))
decrypt_table = string.maketrans(encrypt_table,
string.maketrans('', ''))
cached_tables[key] = [encrypt_table, decrypt_table]
else:
try:
Encryptor(key, method) # test if the settings if OK
except Exception as e:
logging.error(e)
sys.exit(1)
def EVP_BytesToKey(password, key_len, iv_len):
# equivalent to OpenSSL's EVP_BytesToKey() with count 1
# so that we make the same key and iv as nodejs version
password = str(password)
r = cached_keys.get(password, None)
if r:
return r
m = []
i = 0
while len(''.join(m)) < (key_len + iv_len):
md5 = hashlib.md5()
data = password
if i > 0:
data = m[i - 1] + password
md5.update(data)
m.append(md5.digest())
i += 1
ms = ''.join(m)
key = ms[:key_len]
iv = ms[key_len:key_len + iv_len]
cached_keys[password] = (key, iv)
return (key, iv)
method_supported = {
'aes-128-cfb': (16, 16),
'aes-192-cfb': (24, 16),
'aes-256-cfb': (32, 16),
'bf-cfb': (16, 8),
'camellia-128-cfb': (16, 16),
'camellia-192-cfb': (24, 16),
'camellia-256-cfb': (32, 16),
'cast5-cfb': (16, 8),
'des-cfb': (8, 8),
'idea-cfb': (16, 8),
'rc2-cfb': (16, 8),
'rc4': (16, 0),
'rc4-md5': (16, 16),
'seed-cfb': (16, 16),
'salsa20-ctr': (32, 8),
}
class Encryptor(object):
def __init__(self, key, method=None):
if method == 'table':
method = None
self.key = key
self.method = method
self.iv = None
self.iv_sent = False
self.cipher_iv = ''
self.decipher = None
if method:
self.cipher = self.get_cipher(key, method, 1, iv=random_string(32))
else:
self.encrypt_table, self.decrypt_table = init_table(key)
self.cipher = None
def get_cipher_len(self, method):
method = method.lower()
m = method_supported.get(method, None)
return m
def iv_len(self):
return len(self.cipher_iv)
def get_cipher(self, password, method, op, iv=None):
password = password.encode('utf-8')
method = method.lower()
m = self.get_cipher_len(method)
if m:
key, iv_ = EVP_BytesToKey(password, m[0], m[1])
if iv is None:
iv = iv_
iv = iv[:m[1]]
if op == 1:
# this iv is for cipher not decipher
self.cipher_iv = iv[:m[1]]
if method == 'salsa20-ctr':
return encrypt_salsa20.Salsa20Cipher(method, key, iv, op)
elif method == 'rc4-md5':
return encrypt_rc4_md5.create_cipher(method, key, iv, op)
else:
import M2Crypto.EVP
return M2Crypto.EVP.Cipher(method.replace('-', '_'), key, iv,
op, key_as_bytes=0, d='md5',
salt=None, i=1, padding=1)
logging.error('method %s not supported' % method)
sys.exit(1)
def encrypt(self, buf):
if len(buf) == 0:
return buf
if not self.method:
return string.translate(buf, self.encrypt_table)
else:
if self.iv_sent:
return self.cipher.update(buf)
else:
self.iv_sent = True
return self.cipher_iv + self.cipher.update(buf)
def decrypt(self, buf):
if len(buf) == 0:
return buf
if not self.method:
return string.translate(buf, self.decrypt_table)
else:
if self.decipher is None:
decipher_iv_len = self.get_cipher_len(self.method)[1]
decipher_iv = buf[:decipher_iv_len]
self.decipher = self.get_cipher(self.key, self.method, 0,
iv=decipher_iv)
buf = buf[decipher_iv_len:]
if len(buf) == 0:
return buf
return self.decipher.update(buf)
def encrypt_all(password, method, op, data):
if method is not None and method.lower() == 'table':
method = None
if not method:
[encrypt_table, decrypt_table] = init_table(password)
if op:
return string.translate(data, encrypt_table)
else:
return string.translate(data, decrypt_table)
else:
import M2Crypto.EVP
result = []
method = method.lower()
(key_len, iv_len) = method_supported[method]
(key, _) = EVP_BytesToKey(password, key_len, iv_len)
if op:
iv = random_string(iv_len)
result.append(iv)
else:
iv = data[:iv_len]
data = data[iv_len:]
if method == 'salsa20-ctr':
cipher = encrypt_salsa20.Salsa20Cipher(method, key, iv, op)
elif method == 'rc4-md5':
cipher = encrypt_rc4_md5.create_cipher(method, key, iv, op)
else:
cipher = M2Crypto.EVP.Cipher(method.replace('-', '_'), key, iv,
op, key_as_bytes=0, d='md5',
salt=None, i=1, padding=1)
result.append(cipher.update(data))
return ''.join(result)
|
mit
|
CoherentLabs/depot_tools
|
recipes/nacl.py
|
3
|
1505
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import recipe_util # pylint: disable=F0401
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=W0232
class NaCl(recipe_util.Recipe):
"""Basic Recipe class for NaCl."""
@staticmethod
def fetch_spec(props):
url = ('https://chromium.googlesource.com/native_client/'
'src/native_client.git')
solution = {
'name' : 'native_client',
'url' : url,
'deps_file' : '.DEPS.git',
'managed' : False,
'custom_deps' : {},
'safesync_url': '',
}
spec = {
'solutions': [solution],
'auto': True
}
if props.get('submodule_git_svn_spec'):
spec['submodule_git_svn_spec'] = props['submodule_git_svn_spec']
if props.get('target_os'):
spec['target_os'] = props['target_os'].split(',')
if props.get('target_os_only'):
spec['target_os_only'] = props['target_os_only']
checkout_type = 'gclient_git_svn'
if props.get('nosvn'):
checkout_type = 'gclient_git'
spec_type = '%s_spec' % checkout_type
return {
'type': checkout_type,
spec_type: spec,
}
@staticmethod
def expected_root(_props):
return 'native_client'
def main(argv=None):
return NaCl().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
bsd-3-clause
|
bsipocz/scikit-image
|
setup.py
|
11
|
4995
|
#! /usr/bin/env python
descr = """Image Processing SciKit
Image processing algorithms for SciPy, including IO, morphology, filtering,
warping, color manipulation, object detection, etc.
Please refer to the online documentation at
http://scikit-image.org/
"""
DISTNAME = 'scikit-image'
DESCRIPTION = 'Image processing routines for SciPy'
LONG_DESCRIPTION = descr
MAINTAINER = 'Stefan van der Walt'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://scikit-image.org'
LICENSE = 'Modified BSD'
DOWNLOAD_URL = 'http://github.com/scikit-image/scikit-image'
import os
import sys
import setuptools
from distutils.command.build_py import build_py
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# skimage __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-image to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKIMAGE_SETUP__ = True
with open('skimage/__init__.py') as fid:
for line in fid:
if line.startswith('__version__'):
VERSION = line.strip().split()[-1][1:-1]
break
with open('requirements.txt') as fid:
INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]
# requirements for those browsing PyPI
REQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES]
REQUIRES = [r.replace('==', ' (== ') for r in REQUIRES]
REQUIRES = [r.replace('[array]', '') for r in REQUIRES]
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(
ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('skimage')
config.add_data_dir('skimage/data')
return config
if __name__ == "__main__":
try:
from numpy.distutils.core import setup
extra = {'configuration': configuration}
# Do not try and upgrade larger dependencies
for lib in ['scipy', 'numpy', 'matplotlib', 'pillow']:
try:
__import__(lib)
INSTALL_REQUIRES = [i for i in INSTALL_REQUIRES
if lib not in i]
except ImportError:
pass
except ImportError:
if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg_info', '--version',
'clean')):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install scikit-image when Numpy is not yet
# present in the system.
pass
else:
print('To install scikit-image from source, you will need numpy.\n' +
'Install numpy with pip:\n' +
'pip install numpy\n'
'Or use your operating system package manager. For more\n' +
'details, see http://scikit-image.org/docs/stable/install.html')
sys.exit(1)
setup(
name=DISTNAME,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
license=LICENSE,
download_url=DOWNLOAD_URL,
version=VERSION,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: C',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
],
install_requires=INSTALL_REQUIRES,
# install cython when running setup.py (source install)
setup_requires=['cython>=0.21'],
requires=REQUIRES,
packages=setuptools.find_packages(exclude=['doc']),
include_package_data=True,
zip_safe=False, # the package can run out of an .egg file
entry_points={
'console_scripts': ['skivi = skimage.scripts.skivi:main'],
},
cmdclass={'build_py': build_py},
**extra
)
|
bsd-3-clause
|
tyagiarpit/servo
|
tests/wpt/harness/wptrunner/reduce.py
|
156
|
6086
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import tempfile
from cStringIO import StringIO
from collections import defaultdict
import wptrunner
import wpttest
from mozlog import commandline, reader
logger = None
def setup_logging(args, defaults):
global logger
logger = commandline.setup_logging("web-platform-tests-unstable", args, defaults)
wptrunner.setup_stdlib_logger()
for name in args.keys():
if name.startswith("log_"):
args.pop(name)
return logger
def group(items, size):
rv = []
i = 0
while i < len(items):
rv.append(items[i:i + size])
i += size
return rv
def next_power_of_two(num):
rv = 1
while rv < num:
rv = rv << 1
return rv
class Reducer(object):
def __init__(self, target, **kwargs):
self.target = target
self.test_type = kwargs["test_types"][0]
run_info = wpttest.get_run_info(kwargs["metadata_root"],
kwargs["product"],
debug=False)
test_filter = wptrunner.TestFilter(include=kwargs["include"])
self.test_loader = wptrunner.TestLoader(kwargs["tests_root"],
kwargs["metadata_root"],
[self.test_type],
run_info,
manifest_filer=test_filter)
if kwargs["repeat"] == 1:
logger.critical("Need to specify --repeat with more than one repetition")
sys.exit(1)
self.kwargs = kwargs
def run(self):
all_tests = self.get_initial_tests()
tests = all_tests[:-1]
target_test = [all_tests[-1]]
if self.unstable(target_test):
return target_test
if not self.unstable(all_tests):
return []
chunk_size = next_power_of_two(int(len(tests) / 2))
logger.debug("Using chunk size %i" % chunk_size)
while chunk_size >= 1:
logger.debug("%i tests remain" % len(tests))
chunks = group(tests, chunk_size)
chunk_results = [None] * len(chunks)
for i, chunk in enumerate(chunks):
logger.debug("Running chunk %i/%i of size %i" % (i + 1, len(chunks), chunk_size))
trial_tests = []
chunk_str = ""
for j, inc_chunk in enumerate(chunks):
if i != j and chunk_results[j] in (None, False):
chunk_str += "+"
trial_tests.extend(inc_chunk)
else:
chunk_str += "-"
logger.debug("Using chunks %s" % chunk_str)
trial_tests.extend(target_test)
chunk_results[i] = self.unstable(trial_tests)
# if i == len(chunks) - 2 and all(item is False for item in chunk_results[:-1]):
# Dangerous? optimisation that if you got stability for 0..N-1 chunks
# it must be unstable with the Nth chunk
# chunk_results[i+1] = True
# continue
new_tests = []
keep_str = ""
for result, chunk in zip(chunk_results, chunks):
if not result:
keep_str += "+"
new_tests.extend(chunk)
else:
keep_str += "-"
logger.debug("Keeping chunks %s" % keep_str)
tests = new_tests
chunk_size = int(chunk_size / 2)
return tests + target_test
def unstable(self, tests):
logger.debug("Running with %i tests" % len(tests))
self.test_loader.tests = {self.test_type: tests}
stdout, stderr = sys.stdout, sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
with tempfile.NamedTemporaryFile() as f:
args = self.kwargs.copy()
args["log_raw"] = [f]
args["capture_stdio"] = False
wptrunner.setup_logging(args, {})
wptrunner.run_tests(test_loader=self.test_loader, **args)
wptrunner.logger.remove_handler(wptrunner.logger.handlers[0])
is_unstable = self.log_is_unstable(f)
sys.stdout, sys.stderr = stdout, stderr
logger.debug("Result was unstable with chunk removed"
if is_unstable else "stable")
return is_unstable
def log_is_unstable(self, log_f):
log_f.seek(0)
statuses = defaultdict(set)
def handle_status(item):
if item["test"] == self.target:
statuses[item["subtest"]].add(item["status"])
def handle_end(item):
if item["test"] == self.target:
statuses[None].add(item["status"])
reader.each_log(reader.read(log_f),
{"test_status": handle_status,
"test_end": handle_end})
logger.debug(str(statuses))
if not statuses:
logger.error("Didn't get any useful output from wptrunner")
log_f.seek(0)
for item in reader.read(log_f):
logger.debug(item)
return None
return any(len(item) > 1 for item in statuses.itervalues())
def get_initial_tests(self):
# Need to pass in arguments
all_tests = self.test_loader.tests[self.test_type]
tests = []
for item in all_tests:
tests.append(item)
if item.url == self.target:
break
logger.debug("Starting with tests: %s" % ("\n".join(item.id for item in tests)))
return tests
def do_reduce(**kwargs):
target = kwargs.pop("target")
reducer = Reducer(target, **kwargs)
unstable_set = reducer.run()
return unstable_set
|
mpl-2.0
|
DarioGT/OMS-PluginXML
|
org.modelsphere.sms/lib/jython-2.2.1/Lib/encodings/charmap.py
|
1
|
1309
|
""" Generic Python Character Mapping Codec.
Use this codec directly rather than through the automatic
conversion mechanisms supplied by unicode() and .encode().
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.charmap_encode
decode = codecs.charmap_decode
class StreamWriter(Codec,codecs.StreamWriter):
def __init__(self,stream,errors='strict',mapping=None):
codecs.StreamWriter.__init__(self,stream,errors)
self.mapping = mapping
def encode(self,input,errors='strict'):
return Codec.encode(input,errors,self.mapping)
class StreamReader(Codec,codecs.StreamReader):
def __init__(self,stream,errors='strict',mapping=None):
codecs.StreamReader.__init__(self,strict,errors)
self.mapping = mapping
def decode(self,input,errors='strict'):
return Codec.decode(input,errors,self.mapping)
### encodings module API
def getregentry():
return (Codec.encode,Codec.decode,StreamReader,StreamWriter)
|
gpl-3.0
|
Jeff-Tian/mybnb
|
Python27/Lib/multiprocessing/dummy/__init__.py
|
4
|
4634
|
#
# Support for the API of the multiprocessing package using threads
#
# multiprocessing/dummy/__init__.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
]
#
# Imports
#
import threading
import sys
import weakref
import array
import itertools
from multiprocessing import TimeoutError, cpu_count
from multiprocessing.dummy.connection import Pipe
from threading import Lock, RLock, Semaphore, BoundedSemaphore
from threading import Event
from Queue import Queue
#
#
#
class DummyProcess(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
threading.Thread.__init__(self, group, target, name, args, kwargs)
self._pid = None
self._children = weakref.WeakKeyDictionary()
self._start_called = False
self._parent = current_process()
def start(self):
assert self._parent is current_process()
self._start_called = True
if hasattr(self._parent, '_children'):
self._parent._children[self] = None
threading.Thread.start(self)
@property
def exitcode(self):
if self._start_called and not self.is_alive():
return 0
else:
return None
#
#
#
class Condition(threading._Condition):
notify_all = threading._Condition.notify_all.im_func
#
#
#
Process = DummyProcess
current_process = threading.current_thread
current_process()._children = weakref.WeakKeyDictionary()
def active_children():
children = current_process()._children
for p in list(children):
if not p.is_alive():
children.pop(p, None)
return list(children)
def freeze_support():
pass
#
#
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = self.__dict__.items()
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
dict = dict
list = list
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def _get(self):
return self._value
def _set(self, value):
self._value = value
value = property(_get, _set)
def __repr__(self):
return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
def Manager():
return sys.modules[__name__]
def shutdown():
pass
def Pool(processes=None, initializer=None, initargs=()):
from multiprocessing.pool import ThreadPool
return ThreadPool(processes, initializer, initargs)
JoinableQueue = Queue
|
apache-2.0
|
imapp-pl/golem
|
tests/golem/rpc/test_service.py
|
1
|
1244
|
import unittest
from golem.rpc.service import ServiceHelper, ServiceMethods
class MockService(object):
def method_1(self):
return 1
def method_2(self, arg):
return str(arg)
class TestService(unittest.TestCase):
def test_helper(self):
service = MockService()
methods = ServiceHelper.to_dict(service)
method_names = ServiceHelper.to_set(service)
assert len(method_names) == 2
assert len(method_names) == len(methods)
assert 'method_1' in method_names
assert 'method_2' in method_names
for method_name in method_names:
assert method_name in methods
def test_methods(self):
service = MockService()
_methods = ServiceMethods(service)
_dict = ServiceHelper.to_dict(service)
_set = ServiceHelper.to_set(service)
_list = list(_set)
def eq(f, s):
return len(f) == len(s) and all([_f in s for _f in f])
assert eq(ServiceMethods.names(service), _set)
assert eq(ServiceMethods.names(_methods), _set)
assert eq(ServiceMethods.names(_dict), _set)
assert eq(ServiceMethods.names(_set), _set)
assert eq(ServiceMethods.names(_list), _set)
|
gpl-3.0
|
houchj/selenium
|
py/test/selenium/webdriver/common/webserver.py
|
60
|
4623
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A simple web server for testing purpose.
It serves the testing html pages that are needed by the webdriver unit tests."""
import logging
import os
import socket
import threading
from io import open
try:
from urllib import request as urllib_request
except ImportError:
import urllib as urllib_request
try:
from http.server import BaseHTTPRequestHandler, HTTPServer
except ImportError:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
def updir():
dirname = os.path.dirname
return dirname(dirname(__file__))
LOGGER = logging.getLogger(__name__)
WEBDRIVER = os.environ.get("WEBDRIVER", updir())
HTML_ROOT = os.path.join(WEBDRIVER, "../../../../../../common/src/web")
if not os.path.isdir(HTML_ROOT):
message = ("Can't find 'common_web' directory, try setting WEBDRIVER"
" environment variable WEBDRIVER:" + WEBDRIVER + " HTML_ROOT:" + HTML_ROOT )
LOGGER.error(message)
assert 0, message
DEFAULT_HOST = "127.0.0.1"
DEFAULT_PORT = 8000
class HtmlOnlyHandler(BaseHTTPRequestHandler):
"""Http handler."""
def do_GET(self):
"""GET method handler."""
try:
path = self.path[1:].split('?')[0]
html = open(os.path.join(HTML_ROOT, path), 'r', encoding='latin-1')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(html.read().encode('utf-8'))
html.close()
except IOError:
self.send_error(404, 'File Not Found: %s' % path)
def log_message(self, format, *args):
"""Override default to avoid trashing stderr"""
pass
class SimpleWebServer(object):
"""A very basic web server."""
def __init__(self, host=DEFAULT_HOST, port=DEFAULT_PORT):
self.stop_serving = False
host = host
port = port
while True:
try:
self.server = HTTPServer(
(host, port), HtmlOnlyHandler)
self.host = host
self.port = port
break
except socket.error:
LOGGER.debug("port %d is in use, trying to next one"
% port)
port += 1
self.thread = threading.Thread(target=self._run_web_server)
def _run_web_server(self):
"""Runs the server loop."""
LOGGER.debug("web server started")
while not self.stop_serving:
self.server.handle_request()
self.server.server_close()
def start(self):
"""Starts the server."""
self.thread.start()
def stop(self):
"""Stops the server."""
self.stop_serving = True
try:
# This is to force stop the server loop
urllib_request.URLopener().open("http://%s:%d" % (self.host,self.port))
except IOError:
pass
LOGGER.info("Shutting down the webserver")
self.thread.join()
def where_is(self, path):
return "http://%s:%d/%s" % (self.host, self.port, path)
def main(argv=None):
from optparse import OptionParser
from time import sleep
if argv is None:
import sys
argv = sys.argv
parser = OptionParser("%prog [options]")
parser.add_option("-p", "--port", dest="port", type="int",
help="port to listen (default: %s)" % DEFAULT_PORT,
default=DEFAULT_PORT)
opts, args = parser.parse_args(argv[1:])
if args:
parser.error("wrong number of arguments") # Will exit
server = SimpleWebServer(opts.port)
server.start()
print("Server started on port %s, hit CTRL-C to quit" % opts.port)
try:
while 1:
sleep(0.1)
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main()
|
apache-2.0
|
natea/django-lfc
|
lfc/models.py
|
1
|
38119
|
# python imports
import datetime
import re
import random
# django imports
from django import forms
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import post_syncdb
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
# tagging imports
import tagging.utils
from tagging import fields
from tagging.forms import TagField
# portlets imports
from portlets.models import Portlet
from portlets.utils import register_portlet
# workflows imports
import workflows.utils
from workflows import WorkflowBase
from workflows.models import Workflow
from workflows.models import State
# permissions imports
from permissions import PermissionBase
from permissions.exceptions import Unauthorized
from permissions.models import Role
# lfc imports
import lfc.utils
from lfc.fields.thumbs import ImageWithThumbsField
from lfc.fields.autocomplete import AutoCompleteTagInput
from lfc.managers import BaseContentManager
from lfc.settings import ALLOW_COMMENTS_CHOICES
from lfc.settings import ALLOW_COMMENTS_DEFAULT
from lfc.settings import ALLOW_COMMENTS_TRUE
from lfc.settings import LANGUAGE_CHOICES
from lfc.settings import ORDER_BY_CHOICES
class Application(models.Model):
"""
"""
name = models.CharField(max_length=100, unique=True)
class WorkflowStatesInformation(models.Model):
"""Stores some information about workflows
**Attributes:**
state
The state for which information are stored.
public
True if the state is considered as public.
review
True if the state is considered as to be reviewed.
"""
state = models.ForeignKey(State)
public = models.BooleanField(default=False)
review = models.BooleanField(default=False)
def __unicode__(self):
result = self.state.name
if self.public:
result += u" " + u"Public"
if self.review:
result += u" " + "Review"
return result
class Template(models.Model):
"""A template displays the content of an object.
**Attributes:**
name
The name of the template. This is displayed to the LFC user to select
a template. Also used by developers to register a template to a content
type.
path
The relative path to the template file according to Django templating
engine.
children_columns
Stores the amount of columns for sub pages. This can be used for
templates which displays the children of an object like overviews.
images_columns
Stores the amount of columens for images. This can be used for
templates which displays the images of an object like galleries.
"""
name = models.CharField(max_length=50, unique=True)
path = models.CharField(max_length=100)
children_columns = models.IntegerField(verbose_name=_(u"Subpages columns"), default=1)
images_columns = models.IntegerField(verbose_name=_(u"Images columns"), default=1)
class Meta:
ordering = ("name", )
def __unicode__(self):
return self.name
class ContentTypeRegistration(models.Model):
"""Stores all registration relevant information of a registered content
type.
**Attributes:**
type
The type of the registered content type.
name
The name of the registered content type. This is displayed to the LFC
users to add a new content type. Also used by developers for
registration purposes.
display_select_standard
If set to true the user can select a standard page for the object.
display_position
If set to true the user can set the position of the intances.
global_addable
if set to true instances of the content type can be added to the
portal.
subtypes
Allowed sub types which can be added to instances of the content type.
templates
Allowed templates which can be selected for instances of the content
type.
default_template
The default template which is assigned when a instance of the content
type is created.
workflow
Stores the workflow of this content type. All instances "inherit" this
workflow and will get the initial state of it when created.
"""
type = models.CharField(_(u"Type"), blank=True, max_length=100, unique=True)
name = models.CharField(_(u"Name"), blank=True, max_length=100, unique=True)
display_select_standard = models.BooleanField(_(u"Display select standard"), default=True)
display_position = models.BooleanField(_(u"Display position"), default=True)
global_addable = models.BooleanField(_(u"Global addable"), default=True)
subtypes = models.ManyToManyField("self", verbose_name=_(u"Allowed sub types"), symmetrical=False, blank=True, null=True)
templates = models.ManyToManyField("Template", verbose_name=_(u"Templates"), related_name="content_type_registrations")
default_template = models.ForeignKey("Template", verbose_name=_(u"Default template"), blank=True, null=True)
workflow = models.ForeignKey(Workflow, verbose_name=_(u"Workflow"), blank=True, null=True)
class Meta:
ordering = ("name", )
def __unicode__(self):
return self.name
def get_subtypes(self):
"""Returns all allowed sub types for the belonging content type.
"""
return self.subtypes.all()
def get_templates(self):
"""Returns all allowed templates for the belonging content type.
"""
return self.templates.all()
class Portal(models.Model, PermissionBase):
"""A portal is the root of all content objects. Stores global images and
some general data about the site.
**Attributes:**
title:
The title is displayed within the title tab of the site.
standard:
The object that is displayed if one browses to the root of the
portal.
from_email
The e-mail address that is used as sender of outgoing mails.
notification_emails
These e-mail address get all notification mails. For instance all
messages which are sent via the contact form to the portal.
allow_comments
Turns comments on/off generally.
images
The images which are associated with the portal. These images are
considered global and can be used within any text editor field.
files
The files which are associated with the portal. These files are
considered global and can be used within any text editor field.
"""
title = models.CharField(_(u"Title"), blank=True, max_length=100)
standard = models.ForeignKey("BaseContent", verbose_name = _(u"Page"), blank=True, null=True)
from_email = models.EmailField(_(u"From e-mail address"))
notification_emails = models.TextField(_(u"Notification email addresses"))
allow_comments = models.BooleanField(_(u"Allow comments"), default=False)
images = generic.GenericRelation("Image", verbose_name=_(u"Images"),
object_id_field="content_id", content_type_field="content_type")
files = generic.GenericRelation("File", verbose_name=_(u"Files"),
object_id_field="content_id", content_type_field="content_type")
def __unicode__(self):
return self.title
@property
def content_type(self):
return u"portal"
def get_absolute_url(self):
"""Returns the absolute url of the portal. It takes the current
language into account.
"""
language = translation.get_language()
if language == settings.LANGUAGE_CODE:
return reverse("lfc_base_view")
else:
return reverse("lfc_base_view", kwargs={"language" : language})
def get_notification_emails(self):
"""Returns the notification e-mail addresses as list.
"""
adresses = re.split("[\s,]+", self.notification_emails)
return adresses
def are_comments_allowed(self):
"""Returns whether comments are allowed globally or not.
"""
return self.allow_comments
def get_parent_for_permissions(self):
"""Fullfills the contract of django-permissions. Returns just None as
there is no parent for portlets.
"""
return None
def get_parent_for_portlets(self):
"""Fullfills the contract of django-portlets. Returns just None as
there is no parent for portlets.
"""
return None
def get_template(self):
"""Returns the current template of the portal.
"""
# TODO: Define default template in portal
return Template.objects.get(name="Article")
def get_children(self, request=None, *args, **kwargs):
"""Returns the children of the portal. If the request is passed the
permissions of the current user is taken into account. Additionally
other valid filters can be passed, e.g. slug = "page-1".
"""
return lfc.utils.get_content_objects(request, parent=None, **kwargs)
def has_permission(self, user, codename):
"""Overwrites django-permissions' has_permission in order to add LFC
specific groups.
"""
# Every user is also anonymous user
try:
roles = [Role.objects.get(name="Anonymous").id]
except Role.DoesNotExist:
roles = []
# Check whether the current user is the creator of the current object.
try:
if user == self.creator:
roles.append(Role.objects.get(name="Owner").id)
except (AttributeError, Role.DoesNotExist):
pass
return super(Portal, self).has_permission(user, codename, roles)
def check_permission(self, user, codename):
"""Overwrites django-permissions' check_permission in order to add LFC
specific groups.
"""
if not self.has_permission(user, codename):
raise Unauthorized("%s doesn't have permission %s for portal" % (user, codename))
class AbstractBaseContent(models.Model, WorkflowBase, PermissionBase):
"""The root of all content types. It provides the inheritable
BaseContentManager.
**Attributes:**
objects
The default content manager of LFC. Provides a restricted method which
takes care of the current user's permissions.
"""
objects = BaseContentManager()
class Meta:
abstract = True
class BaseContent(AbstractBaseContent):
"""Base content object. From this class all content types should inherit.
It should never be instantiated.
**Attributes:**
content_type
The content type of the specific content object.
title
The title of the object. By default this is displayed on top of every
object.
display_title
Set to false to hide the title within the HTML of the object. This can
be helpful to provide a custom title within the text field of an
object.
slug
The part of URL within the parent object. By default the absolute URL
of an object is created by all involved content objects.
description:
The description of an object. This is used within the overview
template and search results.
position:
The ordinal number of the object within the parent object. This is
used to order the child objects of an object.
language
The language of the object's content is in.
canonical
The base object of the object if the object is a translation another
object.
tags
The tags of the object. Can be used to select certain objects or
display a tag cloud.
parent
The parent object of an object. If set to None the object is a top
object.
template
The current selected template of the object.
standard
The current selected standard object of the object. This can be
selected out of the children of the object. If there is one, this is
displayed instead of the object itself.
order_by
Defines how the children of the object are ordered (default is the
position).
exclude_from_navigation
If set to True, the object is not displayed within the navigation (top
tabs and navigation tree).
exclude_from_search
If set to True, the object is not displayed within search results.
creator
The user which has created this object.
creation_date
The date the object has been created.
modification_date
The date the object has been modified at last.
publication_date
The date the object has been published. TODO: implement this.
start_date
if given the object is only public when the start date is reached.
end_date
if given the object is only public when the end date is not reached
yet.
meta_title
The meta title of the page. This is displayed within the title tag of
the rendered HTML.
meta_keywords
The meta keywords of the object. This is displayed within the meta
keywords tag of the rendered HTML.
meta_description
The meta description of the object. This is displayed within the meta
description tag of the rendered HTML.
images
The images of the object.
files
The files of the object.
allow_comments
If set to true, the visitor of the object can leave a comment. If set
to default the allow_comments state of the parent object is overtaken.
searchable_text
The content which is searched for this object. This attribute should
not get directly. Rather the get_searchable_text method should be used.
"""
content_type = models.CharField(_(u"Content type"), max_length=100, blank=True)
title = models.CharField(_(u"Title"), max_length=100)
display_title = models.BooleanField(_(u"Display title"), default=True)
slug = models.SlugField(_(u"Slug"))
description = models.TextField(_(u"Description"), blank=True)
position = models.PositiveSmallIntegerField(_(u"Position"), default=1)
language = models.CharField(_(u"Language"), max_length=10, choices=LANGUAGE_CHOICES, default="0")
canonical = models.ForeignKey("self", verbose_name=_(u"Canonical"), related_name="translations", blank=True, null=True)
tags = fields.TagField(_(u"Tags"))
parent = models.ForeignKey("self", verbose_name=_(u"Parent"), blank=True, null=True, related_name="children")
template = models.ForeignKey("Template", verbose_name=_(u"Template"), blank=True, null=True)
standard = models.ForeignKey("self", verbose_name=_(u"Standard"), blank=True, null=True)
order_by = models.CharField("Order by", max_length=20, default="position", choices=ORDER_BY_CHOICES)
exclude_from_navigation = models.BooleanField(_(u"Exclude from navigation"), default=False)
exclude_from_search = models.BooleanField(_(u"Exclude from search results"), default=False)
creator = models.ForeignKey(User, verbose_name=_(u"Creator"), null=True)
creation_date = models.DateTimeField(_(u"Creation date"), auto_now_add=True)
modification_date = models.DateTimeField(_(u"Modification date"), auto_now=True, auto_now_add=True)
publication_date = models.DateTimeField(_(u"Publication date"), null=True, blank=True)
start_date = models.DateTimeField(_(u"Start date"), null=True, blank=True)
end_date = models.DateTimeField(_(u"End date"), null=True, blank=True)
meta_title = models.CharField(_(u"Meta title"), max_length=100, default="<portal_title> - <title>")
meta_keywords = models.TextField(_(u"Meta keywords"), blank=True, default="<tags>")
meta_description = models.TextField(_(u"Meta description"), blank=True, default="<description>")
images = generic.GenericRelation("Image", verbose_name=_(u"Images"),
object_id_field="content_id", content_type_field="content_type")
files = generic.GenericRelation("File", verbose_name=_(u"Files"),
object_id_field="content_id", content_type_field="content_type")
allow_comments = models.PositiveSmallIntegerField(_(u"Commentable"),
choices=ALLOW_COMMENTS_CHOICES, default=ALLOW_COMMENTS_DEFAULT)
searchable_text = models.TextField(blank=True)
class Meta:
ordering = ["position"]
unique_together = ["parent", "slug", "language"]
def __unicode__(self):
return unicode(self.title)
def save(self, force_insert=False, force_update=False):
"""Djangos default save method. This is overwritten to do some LFC
related stuff if a content object is saved.
"""
self.searchable_text = self.get_searchable_text()
if self.content_type == "":
self.content_type = self.__class__.__name__.lower()
super(BaseContent, self).save()
# Set the initial state if there is none yet
co = self.get_content_object()
if workflows.utils.get_state(co) is None:
workflows.utils.set_initial_state(co)
lfc.utils.clear_cache()
def get_absolute_url(self):
"""Returns the absolute url of the instance. Takes care of nested
content objects.
"""
page = self.standard or self
obj = page
slugs = []
while obj is not None:
slugs.append(obj.slug)
obj = obj.parent
slugs.reverse()
slug = "/".join(slugs)
if page.language == settings.LANGUAGE_CODE:
return ("lfc_base_view", (), {"slug" : slug})
elif page.language == "0":
if page.parent:
language = page.parent.language
else:
language = translation.get_language()
if language == settings.LANGUAGE_CODE:
return ("lfc_base_view", (), {"slug" : slug})
else:
return ("lfc_base_view", (), {"slug" : slug, "language" : language})
else:
return ("lfc_base_view", (), {"slug" : slug, "language" : page.language})
get_absolute_url = models.permalink(get_absolute_url)
def get_content_object(self):
"""Returns the specific content object of the instance. This method
can be called if one has a BaseContent and want the specific content
type e.g. Page.
"""
# TODO: Ugly but works. There must be a cleaner way. isinstance doesn't
# work of course.
if self.__class__.__name__.lower() == "basecontent":
return getattr(self, self.content_type)
else:
return self
def get_content_type(self):
"""Returns the content type as string.
"""
return self.__class__.__name__
def get_searchable_text(self):
"""Returns the searchable text of this content type. By default it
takes the title the description of the instance into account. Sub
classes can overwrite this method in order to add specific data.
"""
result = self.title + " " + self.description
return result.strip()
def edit_form(self, **kwargs):
"""Returns the edit form for the object.
"""
raise NotImplementedError, "form has to be implemented by sub classed"
def add_form(self, **kwargs):
"""Returns the add/edit form for the object.
"""
from lfc.manage.forms import AddForm
return AddForm(**kwargs)
def get_ancestors(self):
"""Returns all ancestors of a content object.
"""
ancestors = []
obj = self
while obj and obj.parent is not None:
temp = obj.parent.get_content_object()
ancestors.append(temp)
obj = obj.parent
return ancestors
def get_ancestors_reverse(self):
"""Returns all ancestors of the page in reverse order.
"""
ancestors = self.get_ancestors()
ancestors.reverse()
return ancestors
def get_content_type(self):
"""
"""
try:
return ContentTypeRegistration.objects.get(type=self.content_type).name
except ContentTypeRegistration.DoesNotExist:
return _(u"n/a")
def get_descendants(self, request=None, result=None):
"""Returns all descendants of the content object. If the request is
passed the permissions of the current user is taken into account.
"""
if result is None:
result = []
for child in self.get_children(request):
result.append(child)
child.get_descendants(request, result)
return result
def has_children(self, request=None, *args, **kwargs):
"""Returns True if the object has children. If the request is
passed the permissions of the current user is taken into account.
Other valid filters can be passed also, e.g. slug = "page-1".
"""
return len(lfc.utils.get_content_objects(request, parent=self, **kwargs)) > 0
def get_children(self, request=None, *args, **kwargs):
"""Returns the children of the content object. If the request is
passed the permissions of the current user is taken into account.
Other valid filters can be passed also, e.g. slug = "page-1".
"""
return lfc.utils.get_content_objects(request, parent=self, **kwargs)
def get_image(self):
"""Returns the first image of a content object. If there is none it
returns None.
"""
images = self.images.all()
try:
return images[0]
except IndexError:
return None
def get_meta_title(self):
"""Returns the meta title of the instance. Replaces some placeholders
with the according content.
"""
title = self.meta_title.replace("<title>", self.title)
title = title.replace("<portal_title>", lfc.utils.get_portal().title)
return title
def get_meta_keywords(self):
"""Returns the meta keywords of the instance. Replaces some
placeholders with the according content.
"""
keywords = self.meta_keywords.replace("<title>", self.title)
keywords = keywords.replace("<description>", self.description)
keywords = keywords.replace("<tags>", self.tags)
return keywords
def get_meta_description(self):
"""Returns the meta description of the instance. Replaces some
placeholders with the according content.
"""
description = self.meta_description.replace("<title>", self.title)
description = description.replace("<description>", self.description)
description = description.replace("<tags>", self.tags)
return description
def get_template(self):
"""Returns the current selected template of the object.
"""
if self.template is not None:
return self.template
else:
template = lfc.utils.registration.get_default_template(self)
if template is not None:
return template
else:
return lfc.utils.get_portal().get_template()
def get_title(self):
"""Returns the title of the object. Takes display_title into account.
"""
return self.display_title and self.title or ""
def is_canonical(self):
"""Returns true if the language of the page is the default language.
"""
return self.language in (settings.LANGUAGE_CODE, "0")
def get_canonical(self, request):
"""Returns the canonical object of this instance. If the instance is
the canonical object it returns itself. Takes care of the current
user's permission (therefore it needs the request).
"""
if self.is_canonical():
return self
else:
if self.canonical:
obj = BaseContent.objects.get(pk=self.canonical.id)
if self.has_permission(request.user, "view"):
return obj
else:
return None
def is_translation(self):
"""Returns true if the instance is a translation of another instance.
"""
return not self.is_canonical()
def has_language(self, request, language):
"""Returns true if there is a translation of the instance in the
requested language. It returns also true if the instance itself is
within the requested language or if there is a connected instance with
neutral language.
"""
if self.language == "0":
return True
if self.language == language:
return True
if self.is_translation():
canonical = self.get_canonical(request)
if canonical and canonical.language == language:
return True
if canonical and canonical.get_translation(request, language):
return True
if self.is_canonical():
if self.get_translation(request, language):
return True
return False
def get_translation(self, request, language):
"""Returns connected translation for requested language. Returns None
if the requested language doesn't exist.
"""
# TODO: Should there a instance be returned even if the instance is a
# translation?
if self.is_translation():
return None
try:
translation = self.translations.get(language=language).get_content_object()
if translation.has_permission(request.user, "view"):
return translation
else:
return None
except BaseContent.DoesNotExist:
return None
def are_comments_allowed(self):
"""Returns true if comments for this instance are allowed. Takes also
the setup of parent objects into account (if the instance' comments
setup is set to "default").
"""
if self.allow_comments == ALLOW_COMMENTS_DEFAULT:
if self.parent:
return self.parent.are_comments_allowed()
else:
return lfc.utils.get_portal().are_comments_allowed()
else:
if self.allow_comments == ALLOW_COMMENTS_TRUE:
return True
else:
return False
def get_parent_for_portlets(self):
"""Returns the parent from which portlets should be inherited portlets.
The implementation of this method is a requirement from django-portlets.
"""
return self.parent and self.parent.get_content_object() or lfc.utils.get_portal()
# django-permissions
def get_parent_for_permissions(self):
"""Returns the parent from which permissions are inherited. The
implementation of this method is a requirement from django-permissions.
"""
return self.parent and self.parent.get_content_object() or lfc.utils.get_portal()
def has_permission(self, user, codename):
"""Overwrites django-permissions' has_permission in order to add LFC
specific groups.
"""
# CACHE
cache_key = "%s-object-%s-%s-%s" % \
(settings.CACHE_MIDDLEWARE_KEY_PREFIX, self.id, user.id, codename)
result = cache.get(cache_key)
if result:
return result
# Every user is also anonymous user
try:
roles = [Role.objects.get(name="Anonymous")]
except Role.DoesNotExist:
roles = []
# Check whether the current user is the creator of the current object.
try:
if user == self.creator:
roles.append(Role.objects.get(name="Owner"))
except (AttributeError, Role.DoesNotExist):
pass
result = super(BaseContent, self).has_permission(user, codename, roles)
# set cache
cache.set(cache_key, result)
return result
def check_permission(self, user, codename):
"""Overwrites django-permissions' check_permission in order to add LFC
specific groups.
"""
if not self.has_permission(user, codename):
raise Unauthorized("%s doesn't have permission %s for object %s" % (user, codename, self.slug))
def is_active(self, user):
"""Returns True if now is between start and end date of the object.
"""
if user.is_superuser:
return True
if self.start_date or self.end_date:
started = True
ended = False
now = datetime.datetime.now()
if self.start_date and self.start_date > now:
started = False
if self.end_date and now >= self.end_date:
ended = True
return started and not ended
else:
return True
# django-workflows
def get_allowed_transitions(self, user):
"""Returns all allowed permissions for the passed user.
"""
state = self.get_state()
if state is None:
return []
transitions = []
for transition in state.transitions.all():
permission = transition.permission
if permission is None or self.has_permission(user, permission.codename):
transitions.append(transition)
return transitions
class Page(BaseContent):
"""A page is the foremost object within lfc which shows information to the
user.
**Attributes**:
text:
The main text of the page.
"""
text = models.TextField(_(u"Text"), blank=True)
def get_searchable_text(self):
"""Returns the searchable text of the page. This adds the text to
the default searchable text.
"""
result = self.title + " " + self.description + " " + self.text
return result.strip()
def edit_form(self, **kwargs):
"""Returns the edit form of the page.
"""
from lfc.manage.forms import CoreDataForm
return CoreDataForm(**kwargs)
class Image(models.Model):
"""An image which can be displayes within HTML. Generates automatically
various sizes.
title
The title of the image. Used within the title and alt tag of the
image.
slug
The URL of the image
content
The content object the image belongs to (optional)
position
The ord number within the content object
caption
The caption of the image. Can be used within the content (optional)
description
A description of the image. Can be used within the content
(optional)
image
The image file.
"""
title = models.CharField(blank=True, max_length=100)
slug = models.SlugField()
content_type = models.ForeignKey(ContentType, verbose_name=_(u"Content type"), related_name="images", blank=True, null=True)
content_id = models.PositiveIntegerField(_(u"Content id"), blank=True, null=True)
content = generic.GenericForeignKey(ct_field="content_type", fk_field="content_id")
position = models.SmallIntegerField(default=999)
caption = models.CharField(blank=True, max_length=100)
description = models.TextField(blank=True)
creation_date = models.DateTimeField(_(u"Creation date"), auto_now_add=True)
image = ImageWithThumbsField(_(u"Image"), upload_to="uploads",
sizes=((60, 60), (100, 100), (200, 200), (400, 400), (600, 600), (800, 800)))
class Meta:
ordering = ("position", )
def __unicode__(self):
return self.title
def get_absolute_url(self):
return ("gallery.views.photo", (), {"slug" : self.slug})
get_absolute_url = models.permalink(get_absolute_url)
class File(models.Model):
"""A downloadable file.
**Attributes:**
title
The title of the file.
slug
The URL of the file.
content
The content object the file belongs to (optional).
position
The ordinal number within the content object. Used to order the files.
description
A long description of the file. Can be used within the content
(optional).
file
The binary file.
"""
title = models.CharField(blank=True, max_length=100)
slug = models.SlugField()
content_type = models.ForeignKey(ContentType, verbose_name=_(u"Content type"), related_name="files", blank=True, null=True)
content_id = models.PositiveIntegerField(_(u"Content id"), blank=True, null=True)
content = generic.GenericForeignKey(ct_field="content_type", fk_field="content_id")
position = models.SmallIntegerField(default=999)
description = models.TextField(blank=True)
creation_date = models.DateTimeField(_(u"Creation date"), auto_now_add=True)
file = models.FileField(upload_to="files")
class Meta:
ordering = ("position", )
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse("lfc_file", kwargs={"id" : self.id})
#### Portlets
###############################################################################
class NavigationPortlet(Portlet):
"""A portlet to display the navigation tree.
Note: this reuses mainly the navigation inclusion tag.
Parameters:
- start_level:
The tree is displayed from this level 1. The tree starts with 1
- expand_level:
The tree is expanded up this level. Default is 0, which means the
tree is not expanded at all but the current node.
"""
start_level = models.PositiveSmallIntegerField(default=1)
expand_level = models.PositiveSmallIntegerField(default=0)
def render(self, context):
"""Renders the portlet as HTML.
"""
request = context.get("request")
return render_to_string("lfc/portlets/navigation_portlet.html", RequestContext(request, {
"start_level" : self.start_level,
"expand_level" : self.expand_level,
"title" : self.title,
}))
def form(self, **kwargs):
"""
"""
return NavigationPortletForm(instance=self, **kwargs)
class NavigationPortletForm(forms.ModelForm):
"""Add/edit form for the navigation portlet.
"""
class Meta:
model = NavigationPortlet
# TODO: Rename as it is able to display all content types. ContentPortlet, DocumentPortlet, ...?
class PagesPortlet(Portlet):
"""A portlet to display arbitrary objects. The objects can be selected by
tags.
**Attributes:**
limit:
The amount of objects which are displayed at maximum.
tags:
The tags an object must have to be displayed.
"""
limit = models.PositiveSmallIntegerField(default=5)
tags = models.CharField(blank=True, max_length=100)
def __unicode__(self):
return "%s" % self.id
def render(self, context):
"""Renders the portlet as HTML.
"""
objs = BaseContent.objects.filter(
language__in=("0", translation.get_language()))
if self.tags:
objs = tagging.managers.ModelTaggedItemManager().with_all(self.tags, objs)[:self.limit]
return render_to_string("lfc/portlets/pages_portlet.html", {
"title" : self.title,
"objs" : objs,
})
def form(self, **kwargs):
"""Returns the add/edit form of the portlet.
"""
return PagesPortletForm(instance=self, **kwargs)
class PagesPortletForm(forms.ModelForm):
"""Add/edit form of the pages portlet.
"""
tags = TagField(widget=AutoCompleteTagInput(), required=False)
class Meta:
model = PagesPortlet
class RandomPortlet(Portlet):
"""A portlet to display random objects. The objects can be selected by
tags.
**Attributes:**
limit:
The amount of objects which are displayed at maximum.
tags:
The tags an object must have to be displayed.
"""
limit = models.PositiveSmallIntegerField(default=1)
tags = models.CharField(blank=True, max_length=100)
def render(self, context):
"""Renders the portlet as HTML.
"""
items = BaseContent.objects.filter(
language__in=("0", translation.get_language()))
if self.tags:
items = tagging.managers.ModelTaggedItemManager().with_all(self.tags, items)[:self.limit]
items = list(items)
random.shuffle(items)
return render_to_string("lfc/portlets/random_portlet.html", {
"title" : self.title,
"items" : items[:self.limit],
})
def form(self, **kwargs):
"""Returns the form of the portlet.
"""
return RandomPortletForm(instance=self, **kwargs)
class RandomPortletForm(forms.ModelForm):
"""Add/Edit form for the random portlet.
"""
tags = TagField(widget=AutoCompleteTagInput(), required=False)
class Meta:
model = RandomPortlet
class TextPortlet(Portlet):
"""A portlet to display arbitrary HTML text.
**Attributes:**
text:
The HTML text which is displayed. Can contain any HTML text.
"""
text = models.TextField(_(u"Text"), blank=True)
def __unicode__(self):
return "%s" % self.id
def render(self, context):
"""Renders the portlet as HTML.
"""
return render_to_string("lfc/portlets/text_portlet.html", {
"title" : self.title,
"text" : self.text
})
def form(self, **kwargs):
"""
"""
return TextPortletForm(instance=self, **kwargs)
class TextPortletForm(forms.ModelForm):
"""Add/Edit form for the text portlet.
"""
class Meta:
model = TextPortlet
|
bsd-3-clause
|
with-git/tensorflow
|
tensorflow/python/framework/versions.py
|
127
|
1607
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow versions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
__version__ = pywrap_tensorflow.__version__
__git_version__ = pywrap_tensorflow.__git_version__
__compiler_version__ = pywrap_tensorflow.__compiler_version__
VERSION = __version__
GIT_VERSION = __git_version__
COMPILER_VERSION = __compiler_version__
GRAPH_DEF_VERSION = pywrap_tensorflow.GRAPH_DEF_VERSION
GRAPH_DEF_VERSION_MIN_CONSUMER = (
pywrap_tensorflow.GRAPH_DEF_VERSION_MIN_CONSUMER)
GRAPH_DEF_VERSION_MIN_PRODUCER = (
pywrap_tensorflow.GRAPH_DEF_VERSION_MIN_PRODUCER)
__all__ = [
"__version__",
"__git_version__",
"__compiler_version__",
"COMPILER_VERSION",
"GIT_VERSION",
"GRAPH_DEF_VERSION",
"GRAPH_DEF_VERSION_MIN_CONSUMER",
"GRAPH_DEF_VERSION_MIN_PRODUCER",
"VERSION",
]
|
apache-2.0
|
kaiweifan/vse-lbaas-plugin-poc
|
quantum/plugins/metaplugin/meta_db_v2.py
|
8
|
1680
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.orm import exc
from quantum.plugins.metaplugin import meta_models_v2
def get_flavor_by_network(session, net_id):
try:
binding = (session.query(meta_models_v2.NetworkFlavor).
filter_by(network_id=net_id).
one())
except exc.NoResultFound:
return None
return binding.flavor
def add_network_flavor_binding(session, flavor, net_id):
binding = meta_models_v2.NetworkFlavor(flavor=flavor, network_id=net_id)
session.add(binding)
return binding
def get_flavor_by_router(session, router_id):
try:
binding = (session.query(meta_models_v2.RouterFlavor).
filter_by(router_id=router_id).
one())
except exc.NoResultFound:
return None
return binding.flavor
def add_router_flavor_binding(session, flavor, router_id):
binding = meta_models_v2.RouterFlavor(flavor=flavor, router_id=router_id)
session.add(binding)
return binding
|
apache-2.0
|
gtko/CouchPotatoServer
|
libs/pyutil/iputil.py
|
92
|
9752
|
# from the Python Standard Library
import os, re, socket, sys, subprocess
# from Twisted
from twisted.internet import defer, threads, reactor
from twisted.internet.protocol import DatagramProtocol
from twisted.python.procutils import which
from twisted.python import log
try:
import resource
def increase_rlimits():
# We'd like to raise our soft resource.RLIMIT_NOFILE, since certain
# systems (OS-X, probably solaris) start with a relatively low limit
# (256), and some unit tests want to open up more sockets than this.
# Most linux systems start with both hard and soft limits at 1024,
# which is plenty.
# unfortunately the values to pass to setrlimit() vary widely from
# one system to another. OS-X reports (256, HUGE), but the real hard
# limit is 10240, and accepts (-1,-1) to mean raise it to the
# maximum. Cygwin reports (256, -1), then ignores a request of
# (-1,-1): instead you have to guess at the hard limit (it appears to
# be 3200), so using (3200,-1) seems to work. Linux reports a
# sensible (1024,1024), then rejects (-1,-1) as trying to raise the
# maximum limit, so you could set it to (1024,1024) but you might as
# well leave it alone.
try:
current = resource.getrlimit(resource.RLIMIT_NOFILE)
except AttributeError:
# we're probably missing RLIMIT_NOFILE
return
if current[0] >= 1024:
# good enough, leave it alone
return
try:
if current[1] > 0 and current[1] < 1000000:
# solaris reports (256, 65536)
resource.setrlimit(resource.RLIMIT_NOFILE,
(current[1], current[1]))
else:
# this one works on OS-X (bsd), and gives us 10240, but
# it doesn't work on linux (on which both the hard and
# soft limits are set to 1024 by default).
resource.setrlimit(resource.RLIMIT_NOFILE, (-1,-1))
new = resource.getrlimit(resource.RLIMIT_NOFILE)
if new[0] == current[0]:
# probably cygwin, which ignores -1. Use a real value.
resource.setrlimit(resource.RLIMIT_NOFILE, (3200,-1))
except ValueError:
log.msg("unable to set RLIMIT_NOFILE: current value %s"
% (resource.getrlimit(resource.RLIMIT_NOFILE),))
except:
# who knows what. It isn't very important, so log it and continue
log.err()
except ImportError:
def _increase_rlimits():
# TODO: implement this for Windows. Although I suspect the
# solution might be "be running under the iocp reactor and
# make this function be a no-op".
pass
# pyflakes complains about two 'def FOO' statements in the same time,
# since one might be shadowing the other. This hack appeases pyflakes.
increase_rlimits = _increase_rlimits
def get_local_addresses_async(target="198.41.0.4"): # A.ROOT-SERVERS.NET
"""
Return a Deferred that fires with a list of IPv4 addresses (as dotted-quad
strings) that are currently configured on this host, sorted in descending
order of how likely we think they are to work.
@param target: we want to learn an IP address they could try using to
connect to us; The default value is fine, but it might help if you
pass the address of a host that you are actually trying to be
reachable to.
"""
addresses = []
local_ip = get_local_ip_for(target)
if local_ip:
addresses.append(local_ip)
if sys.platform == "cygwin":
d = _cygwin_hack_find_addresses(target)
else:
d = _find_addresses_via_config()
def _collect(res):
for addr in res:
if addr != "0.0.0.0" and not addr in addresses:
addresses.append(addr)
return addresses
d.addCallback(_collect)
return d
def get_local_ip_for(target):
"""Find out what our IP address is for use by a given target.
@return: the IP address as a dotted-quad string which could be used by
to connect to us. It might work for them, it might not. If
there is no suitable address (perhaps we don't currently have an
externally-visible interface), this will return None.
"""
try:
target_ipaddr = socket.gethostbyname(target)
except socket.gaierror:
# DNS isn't running, or somehow we encountered an error
# note: if an interface is configured and up, but nothing is
# connected to it, gethostbyname("A.ROOT-SERVERS.NET") will take 20
# seconds to raise socket.gaierror . This is synchronous and occurs
# for each node being started, so users of
# test.common.SystemTestMixin (like test_system) will see something
# like 120s of delay, which may be enough to hit the default trial
# timeouts. For that reason, get_local_addresses_async() was changed
# to default to the numerical ip address for A.ROOT-SERVERS.NET, to
# avoid this DNS lookup. This also makes node startup fractionally
# faster.
return None
udpprot = DatagramProtocol()
port = reactor.listenUDP(0, udpprot)
try:
udpprot.transport.connect(target_ipaddr, 7)
localip = udpprot.transport.getHost().host
except socket.error:
# no route to that host
localip = None
port.stopListening() # note, this returns a Deferred
return localip
# k: result of sys.platform, v: which kind of IP configuration reader we use
_platform_map = {
"linux-i386": "linux", # redhat
"linux-ppc": "linux", # redhat
"linux2": "linux", # debian
"linux3": "linux", # debian
"win32": "win32",
"irix6-n32": "irix",
"irix6-n64": "irix",
"irix6": "irix",
"openbsd2": "bsd",
"openbsd3": "bsd",
"openbsd4": "bsd",
"openbsd5": "bsd",
"darwin": "bsd", # Mac OS X
"freebsd4": "bsd",
"freebsd5": "bsd",
"freebsd6": "bsd",
"freebsd7": "bsd",
"freebsd8": "bsd",
"freebsd9": "bsd",
"netbsd1": "bsd",
"netbsd2": "bsd",
"netbsd3": "bsd",
"netbsd4": "bsd",
"netbsd5": "bsd",
"netbsd6": "bsd",
"dragonfly2": "bsd",
"sunos5": "sunos",
"cygwin": "cygwin",
}
class UnsupportedPlatformError(Exception):
pass
# Wow, I'm really amazed at home much mileage we've gotten out of calling
# the external route.exe program on windows... It appears to work on all
# versions so far. Still, the real system calls would much be preferred...
# ... thus wrote Greg Smith in time immemorial...
_win32_path = 'route.exe'
_win32_args = ('print',)
_win32_re = re.compile('^\s*\d+\.\d+\.\d+\.\d+\s.+\s(?P<address>\d+\.\d+\.\d+\.\d+)\s+(?P<metric>\d+)\s*$', flags=re.M|re.I|re.S)
# These work in Redhat 6.x and Debian 2.2 potato
_linux_path = '/sbin/ifconfig'
_linux_re = re.compile('^\s*inet [a-zA-Z]*:?(?P<address>\d+\.\d+\.\d+\.\d+)\s.+$', flags=re.M|re.I|re.S)
# NetBSD 1.4 (submitted by Rhialto), Darwin, Mac OS X
_netbsd_path = '/sbin/ifconfig'
_netbsd_args = ('-a',)
_netbsd_re = re.compile('^\s+inet [a-zA-Z]*:?(?P<address>\d+\.\d+\.\d+\.\d+)\s.+$', flags=re.M|re.I|re.S)
# Irix 6.5
_irix_path = '/usr/etc/ifconfig'
# Solaris 2.x
_sunos_path = '/usr/sbin/ifconfig'
# k: platform string as provided in the value of _platform_map
# v: tuple of (path_to_tool, args, regex,)
_tool_map = {
"linux": (_linux_path, (), _linux_re,),
"win32": (_win32_path, _win32_args, _win32_re,),
"cygwin": (_win32_path, _win32_args, _win32_re,),
"bsd": (_netbsd_path, _netbsd_args, _netbsd_re,),
"irix": (_irix_path, _netbsd_args, _netbsd_re,),
"sunos": (_sunos_path, _netbsd_args, _netbsd_re,),
}
def _find_addresses_via_config():
return threads.deferToThread(_synchronously_find_addresses_via_config)
def _synchronously_find_addresses_via_config():
# originally by Greg Smith, hacked by Zooko to conform to Brian's API
platform = _platform_map.get(sys.platform)
if not platform:
raise UnsupportedPlatformError(sys.platform)
(pathtotool, args, regex,) = _tool_map[platform]
# If pathtotool is a fully qualified path then we just try that.
# If it is merely an executable name then we use Twisted's
# "which()" utility and try each executable in turn until one
# gives us something that resembles a dotted-quad IPv4 address.
if os.path.isabs(pathtotool):
return _query(pathtotool, args, regex)
else:
exes_to_try = which(pathtotool)
for exe in exes_to_try:
try:
addresses = _query(exe, args, regex)
except Exception:
addresses = []
if addresses:
return addresses
return []
def _query(path, args, regex):
env = {'LANG': 'en_US.UTF-8'}
p = subprocess.Popen([path] + list(args), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
(output, err) = p.communicate()
addresses = []
outputsplit = output.split('\n')
for outline in outputsplit:
m = regex.match(outline)
if m:
addr = m.groupdict()['address']
if addr not in addresses:
addresses.append(addr)
return addresses
def _cygwin_hack_find_addresses(target):
addresses = []
for h in [target, "localhost", "127.0.0.1",]:
try:
addr = get_local_ip_for(h)
if addr not in addresses:
addresses.append(addr)
except socket.gaierror:
pass
return defer.succeed(addresses)
|
gpl-3.0
|
synweap15/pyload
|
module/plugins/accounts/RehostTo.py
|
6
|
1796
|
# -*- coding: utf-8 -*-
from module.plugins.internal.Account import Account
class RehostTo(Account):
__name__ = "RehostTo"
__type__ = "account"
__version__ = "0.19"
__status__ = "testing"
__description__ = """Rehost.to account plugin"""
__license__ = "GPLv3"
__authors__ = [("RaNaN", "[email protected]")]
def grab_info(self, user, password, data, req):
premium = False
trafficleft = None
validuntil = -1
session = ""
html = self.load("https://rehost.to/api.php",
get={'cmd' : "login",
'user': user,
'pass': password})
try:
session = html.split(",")[1].split("=")[1]
html = self.load("http://rehost.to/api.php",
get={'cmd' : "get_premium_credits",
'long_ses': session})
if html.strip() == "0,0" or "ERROR" in html:
self.log_debug(html)
else:
traffic, valid = html.split(",")
premium = True
trafficleft = self.parse_traffic(traffic + "MB")
validuntil = float(valid)
finally:
return {'premium' : premium,
'trafficleft': trafficleft,
'validuntil' : validuntil,
'session' : session}
def login(self, user, password, data, req):
html = self.load("https://rehost.to/api.php",
get={'cmd': "login",
'user': user,
'pass': password})
if "ERROR" in html:
self.log_debug(html)
self.fail_login()
|
gpl-3.0
|
redhat-openstack/glance_store
|
glance_store/_drivers/vmware_datastore.py
|
2
|
28716
|
# Copyright 2014 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Storage backend for VMware Datastore"""
import hashlib
import logging
import os
from oslo_config import cfg
from oslo_utils import excutils
from oslo_utils import netutils
from oslo_utils import units
try:
from oslo_vmware import api
import oslo_vmware.exceptions as vexc
from oslo_vmware.objects import datacenter as oslo_datacenter
from oslo_vmware.objects import datastore as oslo_datastore
from oslo_vmware import vim_util
except ImportError:
api = None
from six.moves import http_client
from six.moves import urllib
import six
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
import glance_store
from glance_store import capabilities
from glance_store import exceptions
from glance_store.i18n import _
from glance_store.i18n import _LE
from glance_store import location
LOG = logging.getLogger(__name__)
MAX_REDIRECTS = 5
DEFAULT_STORE_IMAGE_DIR = '/openstack_glance'
DS_URL_PREFIX = '/folder'
STORE_SCHEME = 'vsphere'
# check that datacenter/datastore combination is valid
_datastore_info_valid = False
_VMWARE_OPTS = [
cfg.StrOpt('vmware_server_host',
help=_('ESX/ESXi or vCenter Server target system. '
'The server value can be an IP address or a DNS name.')),
cfg.StrOpt('vmware_server_username',
help=_('Username for authenticating with '
'VMware ESX/VC server.')),
cfg.StrOpt('vmware_server_password',
help=_('Password for authenticating with '
'VMware ESX/VC server.'),
secret=True),
cfg.StrOpt('vmware_datacenter_path',
default='ha-datacenter',
help=_('DEPRECATED. Inventory path to a datacenter. '
'If the vmware_server_host specified is an ESX/ESXi, '
'the vmware_datacenter_path is optional. If specified, '
'it should be "ha-datacenter". This option is '
'deprecated in favor of vmware_datastores and will be '
'removed in the Liberty release.'),
deprecated_for_removal=True),
cfg.StrOpt('vmware_datastore_name',
help=_('DEPRECATED. Datastore associated with the datacenter. '
'This option is deprecated in favor of '
'vmware_datastores and will be removed in the Liberty '
'release.'),
deprecated_for_removal=True),
cfg.IntOpt('vmware_api_retry_count',
default=10,
help=_('Number of times VMware ESX/VC server API must be '
'retried upon connection related issues.')),
cfg.IntOpt('vmware_task_poll_interval',
default=5,
help=_('The interval used for polling remote tasks '
'invoked on VMware ESX/VC server.')),
cfg.StrOpt('vmware_store_image_dir',
default=DEFAULT_STORE_IMAGE_DIR,
help=_('The name of the directory where the glance images '
'will be stored in the VMware datastore.')),
cfg.BoolOpt('vmware_api_insecure',
default=False,
help=_('Allow to perform insecure SSL requests to ESX/VC.')),
cfg.MultiStrOpt(
'vmware_datastores',
help=_(
'A list of datastores where the image can be stored. This option '
'may be specified multiple times for specifying multiple '
'datastores. Either one of vmware_datastore_name or '
'vmware_datastores is required. The datastore name should be '
'specified after its datacenter path, seperated by ":". An '
'optional weight may be given after the datastore name, seperated '
'again by ":". Thus, the required format becomes '
'<datacenter_path>:<datastore_name>:<optional_weight>. When '
'adding an image, the datastore with highest weight will be '
'selected, unless there is not enough free space available in '
'cases where the image size is already known. If no weight is '
'given, it is assumed to be zero and the directory will be '
'considered for selection last. If multiple datastores have the '
'same weight, then the one with the most free space available is '
'selected.'))]
def http_response_iterator(conn, response, size):
"""Return an iterator for a file-like object.
:param conn: HTTP(S) Connection
:param response: http_client.HTTPResponse object
:param size: Chunk size to iterate with
"""
try:
chunk = response.read(size)
while chunk:
yield chunk
chunk = response.read(size)
finally:
conn.close()
class _Reader(object):
def __init__(self, data):
self._size = 0
self.data = data
self.checksum = hashlib.md5()
def read(self, size=None):
result = self.data.read(size)
self._size += len(result)
self.checksum.update(result)
return result
@property
def size(self):
return self._size
class _ChunkReader(_Reader):
def __init__(self, data, blocksize=8192):
self.blocksize = blocksize
self.current_chunk = b""
self.closed = False
super(_ChunkReader, self).__init__(data)
def read(self, size=None):
ret = b""
while size is None or size >= len(self.current_chunk):
ret += self.current_chunk
if size is not None:
size -= len(self.current_chunk)
if self.closed:
self.current_chunk = b""
break
self._get_chunk()
else:
ret += self.current_chunk[:size]
self.current_chunk = self.current_chunk[size:]
return ret
def _get_chunk(self):
if not self.closed:
chunk = self.data.read(self.blocksize)
chunk_len = len(chunk)
self._size += chunk_len
self.checksum.update(chunk)
if chunk:
if six.PY3:
size_header = ('%x\r\n' % chunk_len).encode('ascii')
self.current_chunk = b''.join((size_header, chunk,
b'\r\n'))
else:
self.current_chunk = b'%x\r\n%s\r\n' % (chunk_len, chunk)
else:
self.current_chunk = b'0\r\n\r\n'
self.closed = True
class StoreLocation(location.StoreLocation):
"""Class describing an VMware URI.
An VMware URI can look like any of the following:
vsphere://server_host/folder/file_path?dcPath=dc_path&dsName=ds_name
"""
def __init__(self, store_specs, conf):
super(StoreLocation, self).__init__(store_specs, conf)
self.datacenter_path = None
self.datastore_name = None
def process_specs(self):
self.scheme = self.specs.get('scheme', STORE_SCHEME)
self.server_host = self.specs.get('server_host')
self.path = os.path.join(DS_URL_PREFIX,
self.specs.get('image_dir').strip('/'),
self.specs.get('image_id'))
self.datacenter_path = self.specs.get('datacenter_path')
self.datstore_name = self.specs.get('datastore_name')
param_list = {'dsName': self.datstore_name}
if self.datacenter_path:
param_list['dcPath'] = self.datacenter_path
self.query = urllib.parse.urlencode(param_list)
def get_uri(self):
if netutils.is_valid_ipv6(self.server_host):
base_url = '%s://[%s]%s' % (self.scheme,
self.server_host, self.path)
else:
base_url = '%s://%s%s' % (self.scheme,
self.server_host, self.path)
return '%s?%s' % (base_url, self.query)
# NOTE(flaper87): Commenting out for now, it's probably better to do
# it during image add/get. This validation relies on a config param
# which doesn't make sense to have in the StoreLocation instance.
# def _is_valid_path(self, path):
# sdir = self.conf.glance_store.vmware_store_image_dir.strip('/')
# return path.startswith(os.path.join(DS_URL_PREFIX, sdir))
def parse_uri(self, uri):
if not uri.startswith('%s://' % STORE_SCHEME):
reason = (_("URI %(uri)s must start with %(scheme)s://") %
{'uri': uri, 'scheme': STORE_SCHEME})
LOG.info(reason)
raise exceptions.BadStoreUri(message=reason)
(self.scheme, self.server_host,
path, params, query, fragment) = urllib.parse.urlparse(uri)
if not query:
path, query = path.split('?')
self.path = path
self.query = query
# NOTE(flaper87): Read comment on `_is_valid_path`
# reason = 'Badly formed VMware datastore URI %(uri)s.' % {'uri': uri}
# LOG.debug(reason)
# raise exceptions.BadStoreUri(reason)
parts = urllib.parse.parse_qs(self.query)
dc_path = parts.get('dcPath')
if dc_path:
self.datacenter_path = dc_path[0]
ds_name = parts.get('dsName')
if ds_name:
self.datastore_name = ds_name[0]
class Store(glance_store.Store):
"""An implementation of the VMware datastore adapter."""
_CAPABILITIES = (capabilities.BitMasks.RW_ACCESS |
capabilities.BitMasks.DRIVER_REUSABLE)
OPTIONS = _VMWARE_OPTS
WRITE_CHUNKSIZE = units.Mi
def __init__(self, conf):
super(Store, self).__init__(conf)
self.datastores = {}
def reset_session(self):
self.session = api.VMwareAPISession(
self.server_host, self.server_username, self.server_password,
self.api_retry_count, self.tpoll_interval)
return self.session
def get_schemes(self):
return (STORE_SCHEME,)
def _sanity_check(self):
if self.conf.glance_store.vmware_api_retry_count <= 0:
msg = _('vmware_api_retry_count should be greater than zero')
LOG.error(msg)
raise exceptions.BadStoreConfiguration(
store_name='vmware_datastore', reason=msg)
if self.conf.glance_store.vmware_task_poll_interval <= 0:
msg = _('vmware_task_poll_interval should be greater than zero')
LOG.error(msg)
raise exceptions.BadStoreConfiguration(
store_name='vmware_datastore', reason=msg)
if not (self.conf.glance_store.vmware_datastore_name
or self.conf.glance_store.vmware_datastores):
msg = (_("Specify at least 'vmware_datastore_name' or "
"'vmware_datastores' option"))
LOG.error(msg)
raise exceptions.BadStoreConfiguration(
store_name='vmware_datastore', reason=msg)
if (self.conf.glance_store.vmware_datastore_name and
self.conf.glance_store.vmware_datastores):
msg = (_("Specify either 'vmware_datastore_name' or "
"'vmware_datastores' option"))
LOG.error(msg)
raise exceptions.BadStoreConfiguration(
store_name='vmware_datastore', reason=msg)
def configure(self, re_raise_bsc=False):
self._sanity_check()
self.scheme = STORE_SCHEME
self.server_host = self._option_get('vmware_server_host')
self.server_username = self._option_get('vmware_server_username')
self.server_password = self._option_get('vmware_server_password')
self.api_retry_count = self.conf.glance_store.vmware_api_retry_count
self.tpoll_interval = self.conf.glance_store.vmware_task_poll_interval
self.api_insecure = self.conf.glance_store.vmware_api_insecure
if api is None:
msg = _("Missing dependencies: oslo_vmware")
raise exceptions.BadStoreConfiguration(
store_name="vmware_datastore", reason=msg)
self.session = self.reset_session()
super(Store, self).configure(re_raise_bsc=re_raise_bsc)
def _get_datacenter(self, datacenter_path):
search_index_moref = self.session.vim.service_content.searchIndex
dc_moref = self.session.invoke_api(
self.session.vim,
'FindByInventoryPath',
search_index_moref,
inventoryPath=datacenter_path)
dc_name = datacenter_path.rsplit('/', 1)[-1]
# TODO(sabari): Add datacenter_path attribute in oslo.vmware
dc_obj = oslo_datacenter.Datacenter(ref=dc_moref, name=dc_name)
dc_obj.path = datacenter_path
return dc_obj
def _get_datastore(self, datacenter_path, datastore_name):
dc_obj = self._get_datacenter(datacenter_path)
datastore_ret = self.session.invoke_api(
vim_util, 'get_object_property', self.session.vim, dc_obj.ref,
'datastore')
if datastore_ret:
datastore_refs = datastore_ret.ManagedObjectReference
for ds_ref in datastore_refs:
ds_obj = oslo_datastore.get_datastore_by_ref(self.session,
ds_ref)
if ds_obj.name == datastore_name:
ds_obj.datacenter = dc_obj
return ds_obj
def _get_freespace(self, ds_obj):
# TODO(sabari): Move this function into oslo_vmware's datastore object.
return self.session.invoke_api(
vim_util, 'get_object_property', self.session.vim, ds_obj.ref,
'summary.freeSpace')
def _parse_datastore_info_and_weight(self, datastore):
weight = 0
parts = [part.strip() for part in datastore.rsplit(":", 2)]
if len(parts) < 2:
msg = _('vmware_datastores format must be '
'datacenter_path:datastore_name:weight or '
'datacenter_path:datastore_name')
LOG.error(msg)
raise exceptions.BadStoreConfiguration(
store_name='vmware_datastore', reason=msg)
if len(parts) == 3 and parts[2]:
weight = parts[2]
if not weight.isdigit():
msg = (_('Invalid weight value %(weight)s in '
'vmware_datastores configuration') %
{'weight': weight})
LOG.exception(msg)
raise exceptions.BadStoreConfiguration(
store_name="vmware_datastore", reason=msg)
datacenter_path, datastore_name = parts[0], parts[1]
if not datacenter_path or not datastore_name:
msg = _('Invalid datacenter_path or datastore_name specified '
'in vmware_datastores configuration')
LOG.exception(msg)
raise exceptions.BadStoreConfiguration(
store_name="vmware_datastore", reason=msg)
return datacenter_path, datastore_name, weight
def _build_datastore_weighted_map(self, datastores):
"""Build an ordered map where the key is a weight and the value is a
Datastore object.
:param: a list of datastores in the format
datacenter_path:datastore_name:weight
:return: a map with key-value <weight>:<Datastore>
"""
ds_map = {}
for ds in datastores:
dc_path, name, weight = self._parse_datastore_info_and_weight(ds)
# Fetch the server side reference.
ds_obj = self._get_datastore(dc_path, name)
if not ds_obj:
msg = (_("Could not find datastore %(ds_name)s "
"in datacenter %(dc_path)s")
% {'ds_name': name,
'dc_path': dc_path})
LOG.error(msg)
raise exceptions.BadStoreConfiguration(
store_name='vmware_datastore', reason=msg)
ds_map.setdefault(int(weight), []).append(ds_obj)
return ds_map
def configure_add(self):
if self.conf.glance_store.vmware_datastores:
datastores = self.conf.glance_store.vmware_datastores
else:
# Backwards compatibility for vmware_datastore_name and
# vmware_datacenter_path.
datacenter_path = self.conf.glance_store.vmware_datacenter_path
datastore_name = self._option_get('vmware_datastore_name')
datastores = ['%s:%s:%s' % (datacenter_path, datastore_name, 0)]
self.datastores = self._build_datastore_weighted_map(datastores)
self.store_image_dir = self.conf.glance_store.vmware_store_image_dir
def select_datastore(self, image_size):
"""Select a datastore with free space larger than image size."""
for k, v in sorted(six.iteritems(self.datastores), reverse=True):
max_ds = None
max_fs = 0
for ds in v:
# Update with current freespace
ds.freespace = self._get_freespace(ds)
if ds.freespace > max_fs:
max_ds = ds
max_fs = ds.freespace
if max_ds and max_ds.freespace >= image_size:
return max_ds
msg = _LE("No datastore found with enough free space to contain an "
"image of size %d") % image_size
LOG.error(msg)
raise exceptions.StorageFull()
def _option_get(self, param):
result = getattr(self.conf.glance_store, param)
if not result:
reason = (_("Could not find %(param)s in configuration "
"options.") % {'param': param})
raise exceptions.BadStoreConfiguration(
store_name='vmware_datastore', reason=reason)
return result
def _build_vim_cookie_header(self, verify_session=False):
"""Build ESX host session cookie header."""
if verify_session and not self.session.is_current_session_active():
self.reset_session()
vim_cookies = self.session.vim.client.options.transport.cookiejar
if len(list(vim_cookies)) > 0:
cookie = list(vim_cookies)[0]
return cookie.name + '=' + cookie.value
@capabilities.check
def add(self, image_id, image_file, image_size, context=None):
"""Stores an image file with supplied identifier to the backend
storage system and returns a tuple containing information
about the stored image.
:param image_id: The opaque image identifier
:param image_file: The image data to write, as a file-like object
:param image_size: The size of the image data to write, in bytes
:retval tuple of URL in backing store, bytes written, checksum
and a dictionary with storage system specific information
:raises `glance.common.exceptions.Duplicate` if the image already
existed
`glance.common.exceptions.UnexpectedStatus` if the upload
request returned an unexpected status. The expected responses
are 201 Created and 200 OK.
"""
ds = self.select_datastore(image_size)
if image_size > 0:
headers = {'Content-Length': image_size}
image_file = _Reader(image_file)
else:
# NOTE (arnaud): use chunk encoding when the image is still being
# generated by the server (ex: stream optimized disks generated by
# Nova).
headers = {'Transfer-Encoding': 'chunked'}
image_file = _ChunkReader(image_file)
loc = StoreLocation({'scheme': self.scheme,
'server_host': self.server_host,
'image_dir': self.store_image_dir,
'datacenter_path': ds.datacenter.path,
'datastore_name': ds.name,
'image_id': image_id}, self.conf)
# NOTE(arnaud): use a decorator when the config is not tied to self
cookie = self._build_vim_cookie_header(True)
headers = dict(headers)
headers['Cookie'] = cookie
conn_class = self._get_http_conn_class()
conn = conn_class(loc.server_host)
url = urllib.parse.quote('%s?%s' % (loc.path, loc.query))
try:
conn.request('PUT', url, image_file, headers)
except IOError as e:
# When a session is not authenticated, the socket is closed by
# the server after sending the response. http_client has an open
# issue with https that raises Broken Pipe
# error instead of returning the response.
# See http://bugs.python.org/issue16062. Here, we log the error
# and continue to look into the response.
msg = _LE('Communication error sending http %(method)s request'
'to the url %(url)s.\n'
'Got IOError %(e)s') % {'method': 'PUT',
'url': url,
'e': e}
LOG.error(msg)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to upload content of image '
'%(image)s'), {'image': image_id})
res = conn.getresponse()
if res.status == http_client.CONFLICT:
raise exceptions.Duplicate(_("Image file %(image_id)s already "
"exists!") %
{'image_id': image_id})
if res.status not in (http_client.CREATED, http_client.OK):
msg = (_LE('Failed to upload content of image %(image)s. '
'The request returned an unexpected status: %(status)s.'
'\nThe response body:\n%(body)s') %
{'image': image_id,
'status': res.status,
'body': res.body})
LOG.error(msg)
raise exceptions.BackendException(msg)
return (loc.get_uri(), image_file.size,
image_file.checksum.hexdigest(), {})
@capabilities.check
def get(self, location, offset=0, chunk_size=None, context=None):
"""Takes a `glance_store.location.Location` object that indicates
where to find the image file, and returns a tuple of generator
(for reading the image file) and image_size
:param location: `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
"""
conn, resp, content_length = self._query(location, 'GET')
iterator = http_response_iterator(conn, resp, self.READ_CHUNKSIZE)
class ResponseIndexable(glance_store.Indexable):
def another(self):
try:
return next(self.wrapped)
except StopIteration:
return ''
return (ResponseIndexable(iterator, content_length), content_length)
def get_size(self, location, context=None):
"""Takes a `glance_store.location.Location` object that indicates
where to find the image file, and returns the size
:param location: `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
"""
return self._query(location, 'HEAD')[2]
@capabilities.check
def delete(self, location, context=None):
"""Takes a `glance_store.location.Location` object that indicates
where to find the image file to delete
:location `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
:raises NotFound if image does not exist
"""
file_path = '[%s] %s' % (
location.store_location.datastore_name,
location.store_location.path[len(DS_URL_PREFIX):])
dc_obj = self._get_datacenter(location.store_location.datacenter_path)
delete_task = self.session.invoke_api(
self.session.vim,
'DeleteDatastoreFile_Task',
self.session.vim.service_content.fileManager,
name=file_path,
datacenter=dc_obj.ref)
try:
self.session.wait_for_task(delete_task)
except vexc.FileNotFoundException:
msg = _('Image file %s not found') % file_path
LOG.warn(msg)
raise exceptions.NotFound(message=msg)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to delete image %(image)s '
'content.') % {'image': location.image_id})
def _query(self, location, method, depth=0):
if depth > MAX_REDIRECTS:
msg = ("The HTTP URL exceeded %(max_redirects)s maximum "
"redirects.", {'max_redirects': MAX_REDIRECTS})
LOG.debug(msg)
raise exceptions.MaxRedirectsExceeded(redirects=MAX_REDIRECTS)
loc = location.store_location
# NOTE(arnaud): use a decorator when the config is not tied to self
for i in range(self.api_retry_count + 1):
cookie = self._build_vim_cookie_header()
headers = {'Cookie': cookie}
try:
conn = self._get_http_conn(method, loc, headers)
resp = conn.getresponse()
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to access image %(image)s '
'content.') % {'image':
location.image_id})
if resp.status >= 400:
if resp.status == http_client.UNAUTHORIZED:
self.reset_session()
continue
if resp.status == http_client.NOT_FOUND:
reason = _('VMware datastore could not find image at URI.')
LOG.info(reason)
raise exceptions.NotFound(message=reason)
msg = ('HTTP request returned a %(status)s status code.'
% {'status': resp.status})
LOG.debug(msg)
raise exceptions.BadStoreUri(msg)
break
location_header = resp.getheader('location')
if location_header:
if resp.status not in (301, 302):
reason = (_("The HTTP URL %(path)s attempted to redirect "
"with an invalid %(status)s status code.")
% {'path': loc.path, 'status': resp.status})
LOG.info(reason)
raise exceptions.BadStoreUri(message=reason)
location_class = glance_store.location.Location
new_loc = location_class(location.store_name,
location.store_location.__class__,
uri=location_header,
image_id=location.image_id,
store_specs=location.store_specs)
return self._query(new_loc, method, depth + 1)
content_length = int(resp.getheader('content-length', 0))
return (conn, resp, content_length)
def _get_http_conn(self, method, loc, headers, content=None):
conn_class = self._get_http_conn_class()
conn = conn_class(loc.server_host)
url = urllib.parse.quote('%s?%s' % (loc.path, loc.query))
conn.request(method, url, content, headers)
return conn
def _get_http_conn_class(self):
if self.api_insecure:
return http_client.HTTPConnection
return http_client.HTTPSConnection
|
apache-2.0
|
tempbottle/kbengine
|
kbe/res/scripts/common/Lib/test/test_smtpd.py
|
118
|
22585
|
import unittest
from test import support, mock_socket
import socket
import io
import smtpd
import asyncore
class DummyServer(smtpd.SMTPServer):
def __init__(self, localaddr, remoteaddr):
smtpd.SMTPServer.__init__(self, localaddr, remoteaddr)
self.messages = []
def process_message(self, peer, mailfrom, rcpttos, data):
self.messages.append((peer, mailfrom, rcpttos, data))
if data == 'return status':
return '250 Okish'
class DummyDispatcherBroken(Exception):
pass
class BrokenDummyServer(DummyServer):
def listen(self, num):
raise DummyDispatcherBroken()
class SMTPDServerTest(unittest.TestCase):
def setUp(self):
smtpd.socket = asyncore.socket = mock_socket
def test_process_message_unimplemented(self):
server = smtpd.SMTPServer('a', 'b')
conn, addr = server.accept()
channel = smtpd.SMTPChannel(server, conn, addr)
def write_line(line):
channel.socket.queue_recv(line)
channel.handle_read()
write_line(b'HELO example')
write_line(b'MAIL From:eggs@example')
write_line(b'RCPT To:spam@example')
write_line(b'DATA')
self.assertRaises(NotImplementedError, write_line, b'spam\r\n.\r\n')
def tearDown(self):
asyncore.close_all()
asyncore.socket = smtpd.socket = socket
class SMTPDChannelTest(unittest.TestCase):
def setUp(self):
smtpd.socket = asyncore.socket = mock_socket
self.old_debugstream = smtpd.DEBUGSTREAM
self.debug = smtpd.DEBUGSTREAM = io.StringIO()
self.server = DummyServer('a', 'b')
conn, addr = self.server.accept()
self.channel = smtpd.SMTPChannel(self.server, conn, addr)
def tearDown(self):
asyncore.close_all()
asyncore.socket = smtpd.socket = socket
smtpd.DEBUGSTREAM = self.old_debugstream
def write_line(self, line):
self.channel.socket.queue_recv(line)
self.channel.handle_read()
def test_broken_connect(self):
self.assertRaises(DummyDispatcherBroken, BrokenDummyServer, 'a', 'b')
def test_server_accept(self):
self.server.handle_accept()
def test_missing_data(self):
self.write_line(b'')
self.assertEqual(self.channel.socket.last,
b'500 Error: bad syntax\r\n')
def test_EHLO(self):
self.write_line(b'EHLO example')
self.assertEqual(self.channel.socket.last, b'250 HELP\r\n')
def test_EHLO_bad_syntax(self):
self.write_line(b'EHLO')
self.assertEqual(self.channel.socket.last,
b'501 Syntax: EHLO hostname\r\n')
def test_EHLO_duplicate(self):
self.write_line(b'EHLO example')
self.write_line(b'EHLO example')
self.assertEqual(self.channel.socket.last,
b'503 Duplicate HELO/EHLO\r\n')
def test_EHLO_HELO_duplicate(self):
self.write_line(b'EHLO example')
self.write_line(b'HELO example')
self.assertEqual(self.channel.socket.last,
b'503 Duplicate HELO/EHLO\r\n')
def test_HELO(self):
name = smtpd.socket.getfqdn()
self.write_line(b'HELO example')
self.assertEqual(self.channel.socket.last,
'250 {}\r\n'.format(name).encode('ascii'))
def test_HELO_EHLO_duplicate(self):
self.write_line(b'HELO example')
self.write_line(b'EHLO example')
self.assertEqual(self.channel.socket.last,
b'503 Duplicate HELO/EHLO\r\n')
def test_HELP(self):
self.write_line(b'HELP')
self.assertEqual(self.channel.socket.last,
b'250 Supported commands: EHLO HELO MAIL RCPT ' + \
b'DATA RSET NOOP QUIT VRFY\r\n')
def test_HELP_command(self):
self.write_line(b'HELP MAIL')
self.assertEqual(self.channel.socket.last,
b'250 Syntax: MAIL FROM: <address>\r\n')
def test_HELP_command_unknown(self):
self.write_line(b'HELP SPAM')
self.assertEqual(self.channel.socket.last,
b'501 Supported commands: EHLO HELO MAIL RCPT ' + \
b'DATA RSET NOOP QUIT VRFY\r\n')
def test_HELO_bad_syntax(self):
self.write_line(b'HELO')
self.assertEqual(self.channel.socket.last,
b'501 Syntax: HELO hostname\r\n')
def test_HELO_duplicate(self):
self.write_line(b'HELO example')
self.write_line(b'HELO example')
self.assertEqual(self.channel.socket.last,
b'503 Duplicate HELO/EHLO\r\n')
def test_HELO_parameter_rejected_when_extensions_not_enabled(self):
self.extended_smtp = False
self.write_line(b'HELO example')
self.write_line(b'MAIL from:<[email protected]> SIZE=1234')
self.assertEqual(self.channel.socket.last,
b'501 Syntax: MAIL FROM: <address>\r\n')
def test_MAIL_allows_space_after_colon(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL from: <[email protected]>')
self.assertEqual(self.channel.socket.last,
b'250 OK\r\n')
def test_extended_MAIL_allows_space_after_colon(self):
self.write_line(b'EHLO example')
self.write_line(b'MAIL from: <[email protected]> size=20')
self.assertEqual(self.channel.socket.last,
b'250 OK\r\n')
def test_NOOP(self):
self.write_line(b'NOOP')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
def test_HELO_NOOP(self):
self.write_line(b'HELO example')
self.write_line(b'NOOP')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
def test_NOOP_bad_syntax(self):
self.write_line(b'NOOP hi')
self.assertEqual(self.channel.socket.last,
b'501 Syntax: NOOP\r\n')
def test_QUIT(self):
self.write_line(b'QUIT')
self.assertEqual(self.channel.socket.last, b'221 Bye\r\n')
def test_HELO_QUIT(self):
self.write_line(b'HELO example')
self.write_line(b'QUIT')
self.assertEqual(self.channel.socket.last, b'221 Bye\r\n')
def test_QUIT_arg_ignored(self):
self.write_line(b'QUIT bye bye')
self.assertEqual(self.channel.socket.last, b'221 Bye\r\n')
def test_bad_state(self):
self.channel.smtp_state = 'BAD STATE'
self.write_line(b'HELO example')
self.assertEqual(self.channel.socket.last,
b'451 Internal confusion\r\n')
def test_command_too_long(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL from: ' +
b'a' * self.channel.command_size_limit +
b'@example')
self.assertEqual(self.channel.socket.last,
b'500 Error: line too long\r\n')
def test_MAIL_command_limit_extended_with_SIZE(self):
self.write_line(b'EHLO example')
fill_len = self.channel.command_size_limit - len('MAIL from:<@example>')
self.write_line(b'MAIL from:<' +
b'a' * fill_len +
b'@example> SIZE=1234')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
self.write_line(b'MAIL from:<' +
b'a' * (fill_len + 26) +
b'@example> SIZE=1234')
self.assertEqual(self.channel.socket.last,
b'500 Error: line too long\r\n')
def test_data_longer_than_default_data_size_limit(self):
# Hack the default so we don't have to generate so much data.
self.channel.data_size_limit = 1048
self.write_line(b'HELO example')
self.write_line(b'MAIL From:eggs@example')
self.write_line(b'RCPT To:spam@example')
self.write_line(b'DATA')
self.write_line(b'A' * self.channel.data_size_limit +
b'A\r\n.')
self.assertEqual(self.channel.socket.last,
b'552 Error: Too much mail data\r\n')
def test_MAIL_size_parameter(self):
self.write_line(b'EHLO example')
self.write_line(b'MAIL FROM:<eggs@example> SIZE=512')
self.assertEqual(self.channel.socket.last,
b'250 OK\r\n')
def test_MAIL_invalid_size_parameter(self):
self.write_line(b'EHLO example')
self.write_line(b'MAIL FROM:<eggs@example> SIZE=invalid')
self.assertEqual(self.channel.socket.last,
b'501 Syntax: MAIL FROM: <address> [SP <mail-parameters>]\r\n')
def test_MAIL_RCPT_unknown_parameters(self):
self.write_line(b'EHLO example')
self.write_line(b'MAIL FROM:<eggs@example> ham=green')
self.assertEqual(self.channel.socket.last,
b'555 MAIL FROM parameters not recognized or not implemented\r\n')
self.write_line(b'MAIL FROM:<eggs@example>')
self.write_line(b'RCPT TO:<eggs@example> ham=green')
self.assertEqual(self.channel.socket.last,
b'555 RCPT TO parameters not recognized or not implemented\r\n')
def test_MAIL_size_parameter_larger_than_default_data_size_limit(self):
self.channel.data_size_limit = 1048
self.write_line(b'EHLO example')
self.write_line(b'MAIL FROM:<eggs@example> SIZE=2096')
self.assertEqual(self.channel.socket.last,
b'552 Error: message size exceeds fixed maximum message size\r\n')
def test_need_MAIL(self):
self.write_line(b'HELO example')
self.write_line(b'RCPT to:spam@example')
self.assertEqual(self.channel.socket.last,
b'503 Error: need MAIL command\r\n')
def test_MAIL_syntax_HELO(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL from eggs@example')
self.assertEqual(self.channel.socket.last,
b'501 Syntax: MAIL FROM: <address>\r\n')
def test_MAIL_syntax_EHLO(self):
self.write_line(b'EHLO example')
self.write_line(b'MAIL from eggs@example')
self.assertEqual(self.channel.socket.last,
b'501 Syntax: MAIL FROM: <address> [SP <mail-parameters>]\r\n')
def test_MAIL_missing_address(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL from:')
self.assertEqual(self.channel.socket.last,
b'501 Syntax: MAIL FROM: <address>\r\n')
def test_MAIL_chevrons(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL from:<eggs@example>')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
def test_MAIL_empty_chevrons(self):
self.write_line(b'EHLO example')
self.write_line(b'MAIL from:<>')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
def test_MAIL_quoted_localpart(self):
self.write_line(b'EHLO example')
self.write_line(b'MAIL from: <"Fred Blogs"@example.com>')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
self.assertEqual(self.channel.mailfrom, '"Fred Blogs"@example.com')
def test_MAIL_quoted_localpart_no_angles(self):
self.write_line(b'EHLO example')
self.write_line(b'MAIL from: "Fred Blogs"@example.com')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
self.assertEqual(self.channel.mailfrom, '"Fred Blogs"@example.com')
def test_MAIL_quoted_localpart_with_size(self):
self.write_line(b'EHLO example')
self.write_line(b'MAIL from: <"Fred Blogs"@example.com> SIZE=1000')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
self.assertEqual(self.channel.mailfrom, '"Fred Blogs"@example.com')
def test_MAIL_quoted_localpart_with_size_no_angles(self):
self.write_line(b'EHLO example')
self.write_line(b'MAIL from: "Fred Blogs"@example.com SIZE=1000')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
self.assertEqual(self.channel.mailfrom, '"Fred Blogs"@example.com')
def test_nested_MAIL(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL from:eggs@example')
self.write_line(b'MAIL from:spam@example')
self.assertEqual(self.channel.socket.last,
b'503 Error: nested MAIL command\r\n')
def test_VRFY(self):
self.write_line(b'VRFY eggs@example')
self.assertEqual(self.channel.socket.last,
b'252 Cannot VRFY user, but will accept message and attempt ' + \
b'delivery\r\n')
def test_VRFY_syntax(self):
self.write_line(b'VRFY')
self.assertEqual(self.channel.socket.last,
b'501 Syntax: VRFY <address>\r\n')
def test_EXPN_not_implemented(self):
self.write_line(b'EXPN')
self.assertEqual(self.channel.socket.last,
b'502 EXPN not implemented\r\n')
def test_no_HELO_MAIL(self):
self.write_line(b'MAIL from:<[email protected]>')
self.assertEqual(self.channel.socket.last,
b'503 Error: send HELO first\r\n')
def test_need_RCPT(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL From:eggs@example')
self.write_line(b'DATA')
self.assertEqual(self.channel.socket.last,
b'503 Error: need RCPT command\r\n')
def test_RCPT_syntax_HELO(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL From: eggs@example')
self.write_line(b'RCPT to eggs@example')
self.assertEqual(self.channel.socket.last,
b'501 Syntax: RCPT TO: <address>\r\n')
def test_RCPT_syntax_EHLO(self):
self.write_line(b'EHLO example')
self.write_line(b'MAIL From: eggs@example')
self.write_line(b'RCPT to eggs@example')
self.assertEqual(self.channel.socket.last,
b'501 Syntax: RCPT TO: <address> [SP <mail-parameters>]\r\n')
def test_RCPT_lowercase_to_OK(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL From: eggs@example')
self.write_line(b'RCPT to: <eggs@example>')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
def test_no_HELO_RCPT(self):
self.write_line(b'RCPT to eggs@example')
self.assertEqual(self.channel.socket.last,
b'503 Error: send HELO first\r\n')
def test_data_dialog(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL From:eggs@example')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
self.write_line(b'RCPT To:spam@example')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
self.write_line(b'DATA')
self.assertEqual(self.channel.socket.last,
b'354 End data with <CR><LF>.<CR><LF>\r\n')
self.write_line(b'data\r\nmore\r\n.')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
self.assertEqual(self.server.messages,
[('peer', 'eggs@example', ['spam@example'], 'data\nmore')])
def test_DATA_syntax(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL From:eggs@example')
self.write_line(b'RCPT To:spam@example')
self.write_line(b'DATA spam')
self.assertEqual(self.channel.socket.last, b'501 Syntax: DATA\r\n')
def test_no_HELO_DATA(self):
self.write_line(b'DATA spam')
self.assertEqual(self.channel.socket.last,
b'503 Error: send HELO first\r\n')
def test_data_transparency_section_4_5_2(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL From:eggs@example')
self.write_line(b'RCPT To:spam@example')
self.write_line(b'DATA')
self.write_line(b'..\r\n.\r\n')
self.assertEqual(self.channel.received_data, '.')
def test_multiple_RCPT(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL From:eggs@example')
self.write_line(b'RCPT To:spam@example')
self.write_line(b'RCPT To:ham@example')
self.write_line(b'DATA')
self.write_line(b'data\r\n.')
self.assertEqual(self.server.messages,
[('peer', 'eggs@example', ['spam@example','ham@example'], 'data')])
def test_manual_status(self):
# checks that the Channel is able to return a custom status message
self.write_line(b'HELO example')
self.write_line(b'MAIL From:eggs@example')
self.write_line(b'RCPT To:spam@example')
self.write_line(b'DATA')
self.write_line(b'return status\r\n.')
self.assertEqual(self.channel.socket.last, b'250 Okish\r\n')
def test_RSET(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL From:eggs@example')
self.write_line(b'RCPT To:spam@example')
self.write_line(b'RSET')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
self.write_line(b'MAIL From:foo@example')
self.write_line(b'RCPT To:eggs@example')
self.write_line(b'DATA')
self.write_line(b'data\r\n.')
self.assertEqual(self.server.messages,
[('peer', 'foo@example', ['eggs@example'], 'data')])
def test_HELO_RSET(self):
self.write_line(b'HELO example')
self.write_line(b'RSET')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
def test_RSET_syntax(self):
self.write_line(b'RSET hi')
self.assertEqual(self.channel.socket.last, b'501 Syntax: RSET\r\n')
def test_unknown_command(self):
self.write_line(b'UNKNOWN_CMD')
self.assertEqual(self.channel.socket.last,
b'500 Error: command "UNKNOWN_CMD" not ' + \
b'recognized\r\n')
def test_attribute_deprecations(self):
with support.check_warnings(('', DeprecationWarning)):
spam = self.channel._SMTPChannel__server
with support.check_warnings(('', DeprecationWarning)):
self.channel._SMTPChannel__server = 'spam'
with support.check_warnings(('', DeprecationWarning)):
spam = self.channel._SMTPChannel__line
with support.check_warnings(('', DeprecationWarning)):
self.channel._SMTPChannel__line = 'spam'
with support.check_warnings(('', DeprecationWarning)):
spam = self.channel._SMTPChannel__state
with support.check_warnings(('', DeprecationWarning)):
self.channel._SMTPChannel__state = 'spam'
with support.check_warnings(('', DeprecationWarning)):
spam = self.channel._SMTPChannel__greeting
with support.check_warnings(('', DeprecationWarning)):
self.channel._SMTPChannel__greeting = 'spam'
with support.check_warnings(('', DeprecationWarning)):
spam = self.channel._SMTPChannel__mailfrom
with support.check_warnings(('', DeprecationWarning)):
self.channel._SMTPChannel__mailfrom = 'spam'
with support.check_warnings(('', DeprecationWarning)):
spam = self.channel._SMTPChannel__rcpttos
with support.check_warnings(('', DeprecationWarning)):
self.channel._SMTPChannel__rcpttos = 'spam'
with support.check_warnings(('', DeprecationWarning)):
spam = self.channel._SMTPChannel__data
with support.check_warnings(('', DeprecationWarning)):
self.channel._SMTPChannel__data = 'spam'
with support.check_warnings(('', DeprecationWarning)):
spam = self.channel._SMTPChannel__fqdn
with support.check_warnings(('', DeprecationWarning)):
self.channel._SMTPChannel__fqdn = 'spam'
with support.check_warnings(('', DeprecationWarning)):
spam = self.channel._SMTPChannel__peer
with support.check_warnings(('', DeprecationWarning)):
self.channel._SMTPChannel__peer = 'spam'
with support.check_warnings(('', DeprecationWarning)):
spam = self.channel._SMTPChannel__conn
with support.check_warnings(('', DeprecationWarning)):
self.channel._SMTPChannel__conn = 'spam'
with support.check_warnings(('', DeprecationWarning)):
spam = self.channel._SMTPChannel__addr
with support.check_warnings(('', DeprecationWarning)):
self.channel._SMTPChannel__addr = 'spam'
class SMTPDChannelWithDataSizeLimitTest(unittest.TestCase):
def setUp(self):
smtpd.socket = asyncore.socket = mock_socket
self.old_debugstream = smtpd.DEBUGSTREAM
self.debug = smtpd.DEBUGSTREAM = io.StringIO()
self.server = DummyServer('a', 'b')
conn, addr = self.server.accept()
# Set DATA size limit to 32 bytes for easy testing
self.channel = smtpd.SMTPChannel(self.server, conn, addr, 32)
def tearDown(self):
asyncore.close_all()
asyncore.socket = smtpd.socket = socket
smtpd.DEBUGSTREAM = self.old_debugstream
def write_line(self, line):
self.channel.socket.queue_recv(line)
self.channel.handle_read()
def test_data_limit_dialog(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL From:eggs@example')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
self.write_line(b'RCPT To:spam@example')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
self.write_line(b'DATA')
self.assertEqual(self.channel.socket.last,
b'354 End data with <CR><LF>.<CR><LF>\r\n')
self.write_line(b'data\r\nmore\r\n.')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
self.assertEqual(self.server.messages,
[('peer', 'eggs@example', ['spam@example'], 'data\nmore')])
def test_data_limit_dialog_too_much_data(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL From:eggs@example')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
self.write_line(b'RCPT To:spam@example')
self.assertEqual(self.channel.socket.last, b'250 OK\r\n')
self.write_line(b'DATA')
self.assertEqual(self.channel.socket.last,
b'354 End data with <CR><LF>.<CR><LF>\r\n')
self.write_line(b'This message is longer than 32 bytes\r\n.')
self.assertEqual(self.channel.socket.last,
b'552 Error: Too much mail data\r\n')
if __name__ == "__main__":
unittest.main()
|
lgpl-3.0
|
aringh/odl
|
odl/tomo/geometry/spect.py
|
3
|
8247
|
# Copyright 2014-2017 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Single-photon emission computed tomography (SPECT) geometry."""
from __future__ import print_function, division, absolute_import
import numpy as np
from odl.tomo.geometry.parallel import Parallel3dAxisGeometry
from odl.tomo.util.utility import transform_system
from odl.util import signature_string, indent, array_str
__all__ = ('ParallelHoleCollimatorGeometry', )
class ParallelHoleCollimatorGeometry(Parallel3dAxisGeometry):
"""Geometry for SPECT Parallel hole collimator.
For details, check `the online docs
<https://odlgroup.github.io/odl/guide/geometry_guide.html>`_.
"""
def __init__(self, apart, dpart, det_radius, axis=(0, 0, 1), **kwargs):
"""Initialize a new instance.
Parameters
----------
apart : 1-dim. `RectPartition`
Partition of the angle interval.
dpart : 2-dim. `RectPartition`
Partition of the detector parameter rectangle.
det_radius : positive float
Radius of the circular detector orbit.
axis : `array-like`, shape ``(3,)``, optional
Vector defining the fixed rotation axis of this geometry.
Other Parameters
----------------
orig_to_det_init : `array-like`, shape ``(3,)``, optional
Vector pointing towards the initial position of the detector
reference point. The default depends on ``axis``, see Notes.
The zero vector is not allowed.
det_axes_init : 2-tuple of `array-like`'s (shape ``(3,)``), optional
Initial axes defining the detector orientation. The default
depends on ``axis``, see Notes.
translation : `array-like`, shape ``(3,)``, optional
Global translation of the geometry. This is added last in any
method that computes an absolute vector, e.g., `det_refpoint`,
and also shifts the axis of rotation.
Default: ``(0, 0, 0)``
check_bounds : bool, optional
If ``True``, methods perform sanity checks on provided input
parameters.
Default: ``True``
Notes
-----
In the default configuration, the rotation axis is ``(0, 0, 1)``,
the vector towards the initial detector reference point is
``(0, 1, 0)``, and the default detector axes are
``[(1, 0, 0), (0, 0, 1)]``.
If a different ``axis`` is provided, the new default initial
position and the new default axes are the computed by rotating
the original ones by a matrix that transforms ``(0, 0, 1)`` to the
new (normalized) ``axis``. This matrix is calculated with the
`rotation_matrix_from_to` function. Expressed in code, we have ::
init_rot = rotation_matrix_from_to((0, 0, 1), axis)
orig_to_det_init = init_rot.dot((0, 1, 0))
det_axes_init[0] = init_rot.dot((1, 0, 0))
det_axes_init[1] = init_rot.dot((0, 0, 1))
"""
self.__det_radius = float(det_radius)
if self.det_radius <= 0:
raise ValueError('`det_radius` must be positive, got {}'
''.format(det_radius))
orig_to_det_init = kwargs.pop('orig_to_det_init', None)
if orig_to_det_init is not None:
orig_to_det_init = np.asarray(orig_to_det_init, dtype=float)
orig_to_det_norm = np.linalg.norm(orig_to_det_init)
if orig_to_det_norm == 0:
raise ValueError('`orig_to_det_init` cannot be zero')
else:
det_pos_init = (orig_to_det_init / orig_to_det_norm *
self.det_radius)
kwargs['det_pos_init'] = det_pos_init
self._orig_to_det_init_arg = orig_to_det_init
super(ParallelHoleCollimatorGeometry, self).__init__(
apart, dpart, axis, **kwargs)
@classmethod
def frommatrix(cls, apart, dpart, det_radius, init_matrix, **kwargs):
"""Create a `ParallelHoleCollimatorGeometry` using a matrix.
This alternative constructor uses a matrix to rotate and
translate the default configuration. It is most useful when
the transformation to be applied is already given as a matrix.
Parameters
----------
apart : 1-dim. `RectPartition`
Partition of the parameter interval.
dpart : 2-dim. `RectPartition`
Partition of the detector parameter set.
det_radius : positive float
Radius of the circular detector orbit.
init_matrix : `array_like`, shape ``(3, 3)`` or ``(3, 4)``, optional
Transformation matrix whose left ``(3, 3)`` block is multiplied
with the default ``det_pos_init`` and ``det_axes_init`` to
determine the new vectors. If present, the fourth column acts
as a translation after the initial transformation.
The resulting ``det_axes_init`` will be normalized.
kwargs :
Further keyword arguments passed to the class constructor.
Returns
-------
geometry : `ParallelHoleCollimatorGeometry`
The resulting geometry.
"""
# Get transformation and translation parts from `init_matrix`
init_matrix = np.asarray(init_matrix, dtype=float)
if init_matrix.shape not in ((3, 3), (3, 4)):
raise ValueError('`matrix` must have shape (3, 3) or (3, 4), '
'got array with shape {}'
''.format(init_matrix.shape))
trafo_matrix = init_matrix[:, :3]
translation = init_matrix[:, 3:].squeeze()
# Transform the default vectors
default_axis = cls._default_config['axis']
# Normalized version, just in case
default_orig_to_det_init = (
np.array(cls._default_config['det_pos_init'], dtype=float) /
np.linalg.norm(cls._default_config['det_pos_init']))
default_det_axes_init = cls._default_config['det_axes_init']
vecs_to_transform = ((default_orig_to_det_init,) +
default_det_axes_init)
transformed_vecs = transform_system(
default_axis, None, vecs_to_transform, matrix=trafo_matrix)
# Use the standard constructor with these vectors
axis, orig_to_det, det_axis_0, det_axis_1 = transformed_vecs
if translation.size != 0:
kwargs['translation'] = translation
return cls(apart, dpart, det_radius, axis,
orig_to_det_init=orig_to_det,
det_axes_init=[det_axis_0, det_axis_1],
**kwargs)
@property
def det_radius(self):
"""Radius of the detector orbit."""
return self.__det_radius
@property
def orig_to_det_init(self):
"""Unit vector from rotation center to initial detector position."""
return self.det_pos_init / np.linalg.norm(self.det_pos_init)
def __repr__(self):
"""Return ``repr(self)``."""
posargs = [self.motion_partition, self.det_partition]
optargs = [('det_radius', self.det_radius, -1)]
if not np.allclose(self.axis, self._default_config['axis']):
optargs.append(['axis', array_str(self.axis), ''])
if self._orig_to_det_init_arg is not None:
optargs.append(['orig_to_det_init',
array_str(self._orig_to_det_init_arg),
''])
if self._det_axes_init_arg is not None:
optargs.append(
['det_axes_init',
tuple(array_str(a) for a in self._det_axes_init_arg),
None])
if not np.array_equal(self.translation, (0, 0, 0)):
optargs.append(['translation', array_str(self.translation), ''])
sig_str = signature_string(posargs, optargs, sep=',\n')
return '{}(\n{}\n)'.format(self.__class__.__name__, indent(sig_str))
|
mpl-2.0
|
kstaniek/has
|
examples/hc2_demo.py
|
1
|
10440
|
#!/usr/bin/env python3
# Copyright (c) Klaudisz Staniek.
# See LICENSE for details.
"""
This is an example application demonstrating the event driven driver capabilities and API usage
"""
from has.manager.manager import Manager
from has.utils.notification import Notification
from threading import Lock, RLock, Condition
from datetime import datetime
import configparser
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from queue import Queue, Empty
import threading
class NodeInfo:
"""
This is a class containing mimimal information required to store the information about the nodes
"""
def __init__(self):
self.network_id = None
self.node_id = None
self.value_ids = []
nodes = []
initFailed = True
criticalSection = Lock()
def get_value_obj(value_id):
for node in nodes:
for value in node.value_ids:
if value.id == value_id:
return value
def get_node_info( notification ):
network_id = notification.network_id
node_id = notification.node_id
for node in nodes:
if node.network_id == network_id and node.node_id == node_id:
return node
return None
def OnNotification( notification, context ):
global initFailed
global criticalSection
global initCondition
with criticalSection:
notification_type = notification.type
if notification_type == Notification.Type_DriverReady:
context.on_message("StatusUpdate","Driver Ready")
initFailed = False
elif notification_type == Notification.Type_DriverFailed:
context.on_message("StatusUpdate","Driver Failed")
elif notification_type == Notification.Type_DriverReset:
context.on_message("StatusUpdate","Driver Reset")
elif notification_type == Notification.Type_AllNodesQueried:
context.on_message("StatusUpdate","All Nodes Queried")
elif notification_type == Notification.Type_NodeAdded:
node_info = NodeInfo()
node_info.network_id = notification.network_id
node_info.node_id = notification.node_id
nodes.append(node_info)
context.on_message('NodeAdded', notification)
elif notification_type == Notification.Type_NodeRemoved:
network_id = notification.network_id
node_id = notification.node_id
for node in nodes[:]:
if node_id == node.node_id and network_id == node.network_id:
nodes.remove(node)
del node
context.on_message('NodeRemoved', notification)
break
elif notification_type == Notification.Type_NodeChanged:
context.on_message('NodeChanged', notification)
elif notification_type == Notification.Type_ValueAdded:
#print("Manager: Value Added %s" % (notification.node_id ) )
node_info = get_node_info( notification )
if node_info is not None:
node_info.value_ids.append( notification.value_id )
context.on_message('ValueAdded', notification)
elif notification_type == Notification.Type_ValueChanged:
node_info = get_node_info( notification )
network_id = node_info.network_id
node_id = node_info.node_id
value_id = notification.value_id
value_type = Manager.get_value_type( value_id )
value_id = Manager.get_value_id( value_id )
value = Manager.get_value_as_string( value_id )
units = Manager.get_value_units( value_id )
node_name = Manager.get_node_name( network_id, node_id )
node_location_name = Manager.get_node_location_name( network_id, node_id )
text = "{0} Node {1}: {2} @ {3} changed {4} to {5}".format( str(datetime.today()), node_id, node_name, node_location_name, value_type, value )
context.on_message('ValueChanged', notification)
context.on_message("StatusUpdate", text)
elif notification_type == Notification.Type_NodeQueriesComplete:
node_name = Manager.get_node_name( notification.network_id, notification.node_id )
context.on_message('NodeQueriesComplete', notification)
class HASApp():
def __init__(self, root):
self.queue = Queue()
self.root = root
#self.root.protocol("WM_DELETE_WINDOW", self.callback)
self.tree = ttk.Treeview() #columns=('Node ID', 'type', 'size'), displaycolumns='size')
self.tree.tag_configure('updated', foreground='red')
self.tree.tag_configure('normal', foreground='black')
self.tree.pack(side=TOP, fill=BOTH, expand=Y)
self.status = StringVar()
Label(root, textvariable=self.status).pack()
root.bind('<<open-config-dialog>>', self.config_dialog)
root.createcommand('::tk::mac::ShowPreferences', self.config_dialog)
root.bind('<<close-all-windows>>', self.callback)
root.createcommand('exit', self.callback)
def status_update(self, notification):
self.status.set(str(notification))
def callback(self):
if messagebox.askokcancel("Quit", "Do you really wish to quit?"):
self.running = 0
#self.root.quit()
def run(self):
self.running = 1
Manager.add_watcher( OnNotification, self)
Manager.read_config("manager.ini")
#self.thread1 = threading.Thread(target=self.worker_thread)
#self.thread1.start()
self.queue_check()
def config_dialog(event=None):
if messagebox.askokcancel("Quit", "Do you really wish to quit?"):
print("config")
def add_node(self, notification):
item_id = "{0}:{1}".format(notification.network_id, notification.node_id)
if not self.tree.exists(item_id):
text = "Node {0}:".format(notification.node_id)
self.tree.insert("", "end", item_id, text=text)
def node_queries_complete(self, notification):
item_id = "{0}:{1}".format(notification.network_id, notification.node_id)
node_name = Manager.get_node_name( notification.network_id, notification.node_id )
node_location_name = Manager.get_node_location_name( notification.network_id, notification.node_id )
node_type = Manager.get_node_type( notification.network_id, notification.node_id )
#print(node_location_name)
if not self.tree.exists(node_location_name):
self.tree.insert("", "end", node_location_name, text=node_location_name)
text = "{1} (Node:{0}:{2})".format(notification.node_id, node_name, node_type)
self.tree.item(item_id, text=text)
self.tree.move(item_id, node_location_name, "end")
def remove_node(self, notification):
item_id = "{0}:{1}".format(notification.network_id, notification.node_id)
if self.tree.exists(item_id):
self.tree.delete(item_id)
def update_node(self, notification):
item_id = "{0}:{1}".format(notification.network_id, notification.node_id)
self.tree.item(item_id, tags=('updated'))
self.root.after(10000, self.reset_foreground, item_id)
def add_value(self, notification):
item_id = "{0}:{1}".format(notification.network_id, notification.node_id)
obj_value_id = notification.value_id
value_type = Manager.get_value_type( obj_value_id )
value = Manager.get_value_as_string( obj_value_id )
last_changed = Manager.get_value_last_changed(obj_value_id)
text="{0}={1} ({2})".format(value_type,value,last_changed)
self.tree.insert(item_id,"end", obj_value_id.id, text=text)
def change_value(self, notification):
obj_value_id = notification.value_id
if self.tree.exists(obj_value_id.id):
value_type = Manager.get_value_type( obj_value_id )
value = Manager.get_value_as_string( obj_value_id )
last_changed = Manager.get_value_last_changed(obj_value_id)
text="{0}={1} ({2})".format(value_type,value,last_changed)
self.tree.item(obj_value_id.id, text=text, tags=('updated'))
self.root.after(10000, self.reset_foreground, obj_value_id.id)
def reset_foreground(self, item):
self.tree.item(item, tags=('normal'))
def on_message(self, message, notification):
self.queue.put_nowait((message,notification))
def queue_check(self):
while self.queue.qsize():
try:
message, notification = self.queue.get_nowait()
if message == 'NodeAdded':
self.add_node(notification)
elif message == 'ValueAdded':
self.add_value(notification)
elif message == 'ValueChanged':
self.change_value(notification)
elif message == 'NodeRemoved':
self.remove_node(notification)
elif message == 'NodeChanged':
self.update_node(notification)
elif message == 'NodeQueriesComplete':
self.node_queries_complete(notification)
elif message == 'StatusUpdate':
self.status_update(notification)
except Empty:
pass
if not self.running:
Manager.close()
print("Done")
self.root.destroy()
else:
self.root.after(1000, self.queue_check)
def main():
root = Tk(className="Home Automation System")
title=root.title("Home Automation System")
width=root.winfo_screenwidth()
height=root.winfo_screenheight()
root.geometry("{0}x{1}".format( width,height ) )
app = HASApp(root)
app.run()
root.mainloop()
if __name__ == "__main__":
main()
exit()
|
mit
|
mquandalle/rethinkdb
|
external/v8_3.30.33.16/testing/gmock/gtest/scripts/pump.py
|
2471
|
23673
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.2.0 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True iff the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def StripMetaComments(str):
"""Strip meta comments from each line in the given string."""
# First, completely remove lines containing nothing but a meta
# comment, including the trailing \n.
str = re.sub(r'^\s*\$\$.*\n', '', str)
# Then, remove meta comments from contentful lines.
return re.sub(r'\s*\$\$.*', '', str)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
"""A generator that yields the tokens in the given string."""
if s != '':
lines = s.splitlines(True)
for token in TokenizeLines(lines, Cursor(0, 0)):
yield token
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
next = PeekToken(tokens)
if not next:
return None
if next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
next = PeekToken(tokens)
if next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def ParseToAST(pump_src_text):
"""Convert the given Pump source text into an AST."""
tokens = list(Tokenize(pump_src_text))
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsSingleLineComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirective(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirective(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsMultiLineIWYUPragma(line):
return re.search(r'/\* IWYU pragma: ', line)
def IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line) or
# Don't break IWYU pragmas, either; that causes iwyu.py problems.
re.search(r'// IWYU pragma: ', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsSingleLineComment(line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirective(output, line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapPreprocessorDirective(line, output)
elif IsMultiLineIWYUPragma(line):
output.append(line)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def ConvertFromPumpSource(src_text):
"""Return the text generated from the given Pump source text."""
ast = ParseToAST(StripMetaComments(src_text))
output = Output()
RunCode(Env(), ast, output)
return BeautifyCode(output.string)
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
output_str = ConvertFromPumpSource(file(file_path, 'r').read())
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
|
agpl-3.0
|
open-synergy/account-payment
|
account_payment_return_import/wizard/payment_return_import.py
|
2
|
11987
|
# -*- coding: utf-8 -*-
# © 2016 Carlos Dauden <[email protected]>
# © 2016 Pedro M. Baeza <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import base64
from StringIO import StringIO
from zipfile import ZipFile, BadZipfile # BadZipFile in Python >= 3.2
from openerp import api, models, fields
from openerp.tools.translate import _
from openerp.exceptions import Warning as UserError, RedirectWarning
from openerp.addons.base_iban.base_iban import _pretty_iban
class PaymentReturnImport(models.TransientModel):
_name = 'payment.return.import'
_description = 'Import Payment Return'
@api.model
def _get_hide_journal_field(self):
""" Return False if the journal_id can't be provided by the parsed
file and must be provided by the wizard."""
return True
journal_id = fields.Many2one(
'account.journal', string='Journal',
help='Accounting journal related to the bank payment return you\'re '
'importing. It has be be manually chosen for payment return formats '
'which doesn\'t allow automatic journal detection.')
hide_journal_field = fields.Boolean(
string='Hide the journal field in the view',
compute='_get_hide_journal_field')
data_file = fields.Binary(
'Payment Return File', required=True,
help='Get you bank payment returns in electronic format from your '
'bank and select them here.')
match_after_import = fields.Boolean(default=True)
@api.multi
def import_file(self):
"""Process the file chosen in the wizard, create bank payment return(s)
and go to reconciliation."""
self.ensure_one()
data_file = base64.b64decode(self.data_file)
payment_returns, notifications = self.with_context(
active_id=self.id
)._import_file(data_file)
result = self.env.ref(
'account_payment_return.payment_return_action').read()[0]
if self.match_after_import:
payment_returns.button_match()
if len(payment_returns) != 1:
result['domain'] = "[('id', 'in', %s)]" % payment_returns.ids
else:
form_view = self.env.ref(
'account_payment_return.payment_return_form_view')
result.update({
'views': [(form_view.id, 'form')],
'res_id': payment_returns.id,
'context': {
'notifications': notifications,
},
})
return result
@api.model
def _parse_all_files(self, data_file):
"""Parse one or multiple files from zip-file.
:param data_file: Decoded raw content of the file
:return: List of payment returns dictionaries for further processing.
"""
payment_return_raw_list = []
files = [data_file]
try:
with ZipFile(StringIO(data_file), 'r') as archive:
files = [
archive.read(filename) for filename in archive.namelist()
if not filename.endswith('/')
]
except BadZipfile:
pass
# Parse the file(s)
for import_file in files:
# The appropriate implementation module(s) returns the payment
# returns. We support a list of dictionaries or a simple
# dictionary.
# Actually we don't care wether all the files have the
# same format.
vals = self._parse_file(import_file)
if isinstance(vals, list):
payment_return_raw_list += vals
else:
payment_return_raw_list.append(vals)
return payment_return_raw_list
@api.model
def _import_file(self, data_file):
""" Create bank payment return(s) from file."""
# The appropriate implementation module returns the required data
payment_returns = self.env['payment.return']
notifications = []
payment_return_raw_list = self._parse_all_files(data_file)
# Check raw data:
self._check_parsed_data(payment_return_raw_list)
# Import all payment returns:
for payret_vals in payment_return_raw_list:
payret_vals = self._complete_payment_return(payret_vals)
payment_return, new_notifications = self._create_payment_return(
payret_vals)
if payment_return:
payment_returns += payment_return
notifications.extend(new_notifications)
if not payment_returns:
raise UserError(_('You have already imported this file.'))
return payment_returns, notifications
@api.model
def _parse_file(self, data_file):
""" Each module adding a file support must extends this method. It
processes the file if it can, returns super otherwise, resulting in a
chain of responsability.
This method parses the given file and returns the data required by
the bank payment return import process, as specified below.
- bank payment returns data: list of dict containing (optional
items marked by o) :
-o account number: string (e.g: 'BE1234567890')
The number of the bank account which the payment return
belongs to
- 'name': string (e.g: '000000123')
- 'date': date (e.g: 2013-06-26)
- 'transactions': list of dict containing :
- 'amount': float
- 'unique_import_id': string
-o 'concept': string
-o 'reason_code': string
-o 'reason': string
-o 'partner_name': string
-o 'reference': string
"""
raise UserError(_(
'Could not make sense of the given file.\n'
'Did you install the module to support this type of file?'
))
@api.model
def _check_parsed_data(self, payment_returns):
""" Basic and structural verifications """
if not payment_returns:
raise UserError(_(
'This file doesn\'t contain any payment return.'))
for payret_vals in payment_returns:
if payret_vals.get('transactions'):
return
# If we get here, no transaction was found:
raise UserError(_('This file doesn\'t contain any transaction.'))
@api.model
def _find_bank_account_id(self, account_number):
""" Get res.partner.bank ID """
bank_account_id = None
if account_number and len(account_number) > 4:
iban_number = _pretty_iban(account_number)
bank_account = self.env['res.partner.bank'].search(
[('acc_number', '=', iban_number)], limit=1)
if bank_account:
bank_account_id = bank_account.id
return bank_account_id
@api.model
def _get_journal(self, bank_account_id):
""" Find the journal """
bank_model = self.env['res.partner.bank']
# Find the journal from context, wizard or bank account
journal_id = self.env.context.get('journal_id') or self.journal_id.id
if bank_account_id:
bank_account = bank_model.browse(bank_account_id)
if journal_id:
if (bank_account.journal_id.id and
bank_account.journal_id.id != journal_id):
raise UserError(
_('The account of this payment return is linked to '
'another journal.'))
if not bank_account.journal_id.id:
bank_model.write({'journal_id': journal_id})
else:
if bank_account.journal_id.id:
journal_id = bank_account.journal_id.id
return journal_id
@api.model
def _complete_payment_return(self, payret_vals):
"""Complete payment return from information passed."""
account_number = payret_vals.pop('account_number')
if not payret_vals.get('journal_id'):
bank_account_id = self._find_bank_account_id(account_number)
if not bank_account_id and account_number:
raise UserError(
_('Can not find the account number %s.') % account_number)
payret_vals['journal_id'] = self._get_journal(bank_account_id)
# By now journal and account_number must be known
if not payret_vals['journal_id']:
raise UserError(_('Can not determine journal for import.'))
for line_vals in payret_vals['transactions']:
unique_import_id = line_vals.get('unique_import_id', False)
if unique_import_id:
line_vals['unique_import_id'] = (
(account_number and (account_number + '-') or '') +
unique_import_id
)
if not line_vals.get('reason'):
reason = self.env['payment.return.reason'].name_search(
line_vals.pop('reason_code'))
if reason:
line_vals['reason'] = reason[0][0]
if 'date' in payret_vals and 'period_id' not in payret_vals:
# if the parser found a date but didn't set a period for this date,
# do this now
try:
payret_vals['period_id'] = (
self.env['account.period'].find(dt=payret_vals['date']).id)
except RedirectWarning:
# if there's no period for the date, ignore resulting exception
pass
return payret_vals
@api.model
def _create_payment_return(self, payret_vals):
""" Create bank payment return from imported values, filtering out
already imported transactions, and return data used by the
reconciliation widget
"""
pr_model = self.env['payment.return']
prl_model = self.env['payment.return.line']
# Filter out already imported transactions and create payment return
ignored_line_ids = []
filtered_st_lines = []
for line_vals in payret_vals['transactions']:
unique_id = (
'unique_import_id' in line_vals and
line_vals['unique_import_id']
)
if not unique_id or not bool(prl_model.sudo().search(
[('unique_import_id', '=', unique_id)], limit=1)):
filtered_st_lines.append(line_vals)
else:
ignored_line_ids.append(unique_id)
payment_return = pr_model.browse()
if len(filtered_st_lines) > 0:
# Remove values that won't be used to create records
payret_vals.pop('transactions', None)
for line_vals in filtered_st_lines:
line_vals.pop('account_number', None)
# Create the payment return
payret_vals['line_ids'] = [
[0, False, line] for line in filtered_st_lines]
payment_return = pr_model.create(payret_vals)
# Prepare import feedback
notifications = []
num_ignored = len(ignored_line_ids)
if num_ignored > 0:
notifications += [{
'type': 'warning',
'message':
_("%d transactions had already been imported and "
"were ignored.") % num_ignored
if num_ignored > 1
else _("1 transaction had already been imported and "
"was ignored."),
'details': {
'name': _('Already imported items'),
'model': 'payment.return.line',
'ids': prl_model.search(
[('unique_import_id', 'in', ignored_line_ids)]).ids}
}]
return payment_return, notifications
|
agpl-3.0
|
WhireCrow/openwrt-mt7620
|
staging_dir/host/lib/python2.7/bsddb/test/test_dbtables.py
|
74
|
15366
|
#!/usr/bin/env python
#
#-----------------------------------------------------------------------
# A test suite for the table interface built on bsddb.db
#-----------------------------------------------------------------------
#
# Copyright (C) 2000, 2001 by Autonomous Zone Industries
# Copyright (C) 2002 Gregory P. Smith
#
# March 20, 2000
#
# License: This is free software. You may use this software for any
# purpose including modification/redistribution, so long as
# this header remains intact and that you do not claim any
# rights of ownership or authorship of this software. This
# software has been tested, but no warranty is expressed or
# implied.
#
# -- Gregory P. Smith <[email protected]>
#
# $Id$
import os, re, sys
if sys.version_info[0] < 3 :
try:
import cPickle
pickle = cPickle
except ImportError:
import pickle
else :
import pickle
import unittest
from test_all import db, dbtables, test_support, verbose, \
get_new_environment_path, get_new_database_path
#----------------------------------------------------------------------
class TableDBTestCase(unittest.TestCase):
db_name = 'test-table.db'
def setUp(self):
import sys
if sys.version_info[0] >= 3 :
from test_all import do_proxy_db_py3k
self._flag_proxy_db_py3k = do_proxy_db_py3k(False)
self.testHomeDir = get_new_environment_path()
self.tdb = dbtables.bsdTableDB(
filename='tabletest.db', dbhome=self.testHomeDir, create=1)
def tearDown(self):
self.tdb.close()
import sys
if sys.version_info[0] >= 3 :
from test_all import do_proxy_db_py3k
do_proxy_db_py3k(self._flag_proxy_db_py3k)
test_support.rmtree(self.testHomeDir)
def test01(self):
tabname = "test01"
colname = 'cool numbers'
try:
self.tdb.Drop(tabname)
except dbtables.TableDBError:
pass
self.tdb.CreateTable(tabname, [colname])
import sys
if sys.version_info[0] < 3 :
self.tdb.Insert(tabname, {colname: pickle.dumps(3.14159, 1)})
else :
self.tdb.Insert(tabname, {colname: pickle.dumps(3.14159,
1).decode("iso8859-1")}) # 8 bits
if verbose:
self.tdb._db_print()
values = self.tdb.Select(
tabname, [colname], conditions={colname: None})
import sys
if sys.version_info[0] < 3 :
colval = pickle.loads(values[0][colname])
else :
colval = pickle.loads(bytes(values[0][colname], "iso8859-1"))
self.assertTrue(colval > 3.141)
self.assertTrue(colval < 3.142)
def test02(self):
tabname = "test02"
col0 = 'coolness factor'
col1 = 'but can it fly?'
col2 = 'Species'
import sys
if sys.version_info[0] < 3 :
testinfo = [
{col0: pickle.dumps(8, 1), col1: 'no', col2: 'Penguin'},
{col0: pickle.dumps(-1, 1), col1: 'no', col2: 'Turkey'},
{col0: pickle.dumps(9, 1), col1: 'yes', col2: 'SR-71A Blackbird'}
]
else :
testinfo = [
{col0: pickle.dumps(8, 1).decode("iso8859-1"),
col1: 'no', col2: 'Penguin'},
{col0: pickle.dumps(-1, 1).decode("iso8859-1"),
col1: 'no', col2: 'Turkey'},
{col0: pickle.dumps(9, 1).decode("iso8859-1"),
col1: 'yes', col2: 'SR-71A Blackbird'}
]
try:
self.tdb.Drop(tabname)
except dbtables.TableDBError:
pass
self.tdb.CreateTable(tabname, [col0, col1, col2])
for row in testinfo :
self.tdb.Insert(tabname, row)
import sys
if sys.version_info[0] < 3 :
values = self.tdb.Select(tabname, [col2],
conditions={col0: lambda x: pickle.loads(x) >= 8})
else :
values = self.tdb.Select(tabname, [col2],
conditions={col0: lambda x:
pickle.loads(bytes(x, "iso8859-1")) >= 8})
self.assertEqual(len(values), 2)
if values[0]['Species'] == 'Penguin' :
self.assertEqual(values[1]['Species'], 'SR-71A Blackbird')
elif values[0]['Species'] == 'SR-71A Blackbird' :
self.assertEqual(values[1]['Species'], 'Penguin')
else :
if verbose:
print "values= %r" % (values,)
raise RuntimeError("Wrong values returned!")
def test03(self):
tabname = "test03"
try:
self.tdb.Drop(tabname)
except dbtables.TableDBError:
pass
if verbose:
print '...before CreateTable...'
self.tdb._db_print()
self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e'])
if verbose:
print '...after CreateTable...'
self.tdb._db_print()
self.tdb.Drop(tabname)
if verbose:
print '...after Drop...'
self.tdb._db_print()
self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e'])
try:
self.tdb.Insert(tabname,
{'a': "",
'e': pickle.dumps([{4:5, 6:7}, 'foo'], 1),
'f': "Zero"})
self.fail('Expected an exception')
except dbtables.TableDBError:
pass
try:
self.tdb.Select(tabname, [], conditions={'foo': '123'})
self.fail('Expected an exception')
except dbtables.TableDBError:
pass
self.tdb.Insert(tabname,
{'a': '42',
'b': "bad",
'c': "meep",
'e': 'Fuzzy wuzzy was a bear'})
self.tdb.Insert(tabname,
{'a': '581750',
'b': "good",
'd': "bla",
'c': "black",
'e': 'fuzzy was here'})
self.tdb.Insert(tabname,
{'a': '800000',
'b': "good",
'd': "bla",
'c': "black",
'e': 'Fuzzy wuzzy is a bear'})
if verbose:
self.tdb._db_print()
# this should return two rows
values = self.tdb.Select(tabname, ['b', 'a', 'd'],
conditions={'e': re.compile('wuzzy').search,
'a': re.compile('^[0-9]+$').match})
self.assertEqual(len(values), 2)
# now lets delete one of them and try again
self.tdb.Delete(tabname, conditions={'b': dbtables.ExactCond('good')})
values = self.tdb.Select(
tabname, ['a', 'd', 'b'],
conditions={'e': dbtables.PrefixCond('Fuzzy')})
self.assertEqual(len(values), 1)
self.assertEqual(values[0]['d'], None)
values = self.tdb.Select(tabname, ['b'],
conditions={'c': lambda c: c == 'meep'})
self.assertEqual(len(values), 1)
self.assertEqual(values[0]['b'], "bad")
def test04_MultiCondSelect(self):
tabname = "test04_MultiCondSelect"
try:
self.tdb.Drop(tabname)
except dbtables.TableDBError:
pass
self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e'])
try:
self.tdb.Insert(tabname,
{'a': "",
'e': pickle.dumps([{4:5, 6:7}, 'foo'], 1),
'f': "Zero"})
self.fail('Expected an exception')
except dbtables.TableDBError:
pass
self.tdb.Insert(tabname, {'a': "A", 'b': "B", 'c': "C", 'd': "D",
'e': "E"})
self.tdb.Insert(tabname, {'a': "-A", 'b': "-B", 'c': "-C", 'd': "-D",
'e': "-E"})
self.tdb.Insert(tabname, {'a': "A-", 'b': "B-", 'c': "C-", 'd': "D-",
'e': "E-"})
if verbose:
self.tdb._db_print()
# This select should return 0 rows. it is designed to test
# the bug identified and fixed in sourceforge bug # 590449
# (Big Thanks to "Rob Tillotson (n9mtb)" for tracking this down
# and supplying a fix!! This one caused many headaches to say
# the least...)
values = self.tdb.Select(tabname, ['b', 'a', 'd'],
conditions={'e': dbtables.ExactCond('E'),
'a': dbtables.ExactCond('A'),
'd': dbtables.PrefixCond('-')
} )
self.assertEqual(len(values), 0, values)
def test_CreateOrExtend(self):
tabname = "test_CreateOrExtend"
self.tdb.CreateOrExtendTable(
tabname, ['name', 'taste', 'filling', 'alcohol content', 'price'])
try:
self.tdb.Insert(tabname,
{'taste': 'crap',
'filling': 'no',
'is it Guinness?': 'no'})
self.fail("Insert should've failed due to bad column name")
except:
pass
self.tdb.CreateOrExtendTable(tabname,
['name', 'taste', 'is it Guinness?'])
# these should both succeed as the table should contain the union of both sets of columns.
self.tdb.Insert(tabname, {'taste': 'crap', 'filling': 'no',
'is it Guinness?': 'no'})
self.tdb.Insert(tabname, {'taste': 'great', 'filling': 'yes',
'is it Guinness?': 'yes',
'name': 'Guinness'})
def test_CondObjs(self):
tabname = "test_CondObjs"
self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e', 'p'])
self.tdb.Insert(tabname, {'a': "the letter A",
'b': "the letter B",
'c': "is for cookie"})
self.tdb.Insert(tabname, {'a': "is for aardvark",
'e': "the letter E",
'c': "is for cookie",
'd': "is for dog"})
self.tdb.Insert(tabname, {'a': "the letter A",
'e': "the letter E",
'c': "is for cookie",
'p': "is for Python"})
values = self.tdb.Select(
tabname, ['p', 'e'],
conditions={'e': dbtables.PrefixCond('the l')})
self.assertEqual(len(values), 2, values)
self.assertEqual(values[0]['e'], values[1]['e'], values)
self.assertNotEqual(values[0]['p'], values[1]['p'], values)
values = self.tdb.Select(
tabname, ['d', 'a'],
conditions={'a': dbtables.LikeCond('%aardvark%')})
self.assertEqual(len(values), 1, values)
self.assertEqual(values[0]['d'], "is for dog", values)
self.assertEqual(values[0]['a'], "is for aardvark", values)
values = self.tdb.Select(tabname, None,
{'b': dbtables.Cond(),
'e':dbtables.LikeCond('%letter%'),
'a':dbtables.PrefixCond('is'),
'd':dbtables.ExactCond('is for dog'),
'c':dbtables.PrefixCond('is for'),
'p':lambda s: not s})
self.assertEqual(len(values), 1, values)
self.assertEqual(values[0]['d'], "is for dog", values)
self.assertEqual(values[0]['a'], "is for aardvark", values)
def test_Delete(self):
tabname = "test_Delete"
self.tdb.CreateTable(tabname, ['x', 'y', 'z'])
# prior to 2001-05-09 there was a bug where Delete() would
# fail if it encountered any rows that did not have values in
# every column.
# Hunted and Squashed by <Donwulff> (Jukka Santala - [email protected])
self.tdb.Insert(tabname, {'x': 'X1', 'y':'Y1'})
self.tdb.Insert(tabname, {'x': 'X2', 'y':'Y2', 'z': 'Z2'})
self.tdb.Delete(tabname, conditions={'x': dbtables.PrefixCond('X')})
values = self.tdb.Select(tabname, ['y'],
conditions={'x': dbtables.PrefixCond('X')})
self.assertEqual(len(values), 0)
def test_Modify(self):
tabname = "test_Modify"
self.tdb.CreateTable(tabname, ['Name', 'Type', 'Access'])
self.tdb.Insert(tabname, {'Name': 'Index to MP3 files.doc',
'Type': 'Word', 'Access': '8'})
self.tdb.Insert(tabname, {'Name': 'Nifty.MP3', 'Access': '1'})
self.tdb.Insert(tabname, {'Type': 'Unknown', 'Access': '0'})
def set_type(type):
if type is None:
return 'MP3'
return type
def increment_access(count):
return str(int(count)+1)
def remove_value(value):
return None
self.tdb.Modify(tabname,
conditions={'Access': dbtables.ExactCond('0')},
mappings={'Access': remove_value})
self.tdb.Modify(tabname,
conditions={'Name': dbtables.LikeCond('%MP3%')},
mappings={'Type': set_type})
self.tdb.Modify(tabname,
conditions={'Name': dbtables.LikeCond('%')},
mappings={'Access': increment_access})
try:
self.tdb.Modify(tabname,
conditions={'Name': dbtables.LikeCond('%')},
mappings={'Access': 'What is your quest?'})
except TypeError:
# success, the string value in mappings isn't callable
pass
else:
raise RuntimeError, "why was TypeError not raised for bad callable?"
# Delete key in select conditions
values = self.tdb.Select(
tabname, None,
conditions={'Type': dbtables.ExactCond('Unknown')})
self.assertEqual(len(values), 1, values)
self.assertEqual(values[0]['Name'], None, values)
self.assertEqual(values[0]['Access'], None, values)
# Modify value by select conditions
values = self.tdb.Select(
tabname, None,
conditions={'Name': dbtables.ExactCond('Nifty.MP3')})
self.assertEqual(len(values), 1, values)
self.assertEqual(values[0]['Type'], "MP3", values)
self.assertEqual(values[0]['Access'], "2", values)
# Make sure change applied only to select conditions
values = self.tdb.Select(
tabname, None, conditions={'Name': dbtables.LikeCond('%doc%')})
self.assertEqual(len(values), 1, values)
self.assertEqual(values[0]['Type'], "Word", values)
self.assertEqual(values[0]['Access'], "9", values)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TableDBTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
gpl-2.0
|
eBay/cronus-agent
|
agent/agent/lib/monitors/system_monitor.py
|
1
|
2306
|
#pylint: disable=E1101,W0105
'''
Copyright 2014 eBay Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
Creatted on Nov 16, 2010
@author: ppa
'''
import os
import platform
import subprocess
import logging
LOG = logging.getLogger(__name__)
class SystemMonitor():
''' monitor that gets system status '''
def __init__(self):
''' constructor '''
LOG.debug("creating %s" %(__name__))
def getOSinfo(self):
''' get value os info '''
return platform.uname()[0] + " " + platform.uname()[2] + " " + platform.uname()[3] + " " + platform.uname()[4]
def getNodeName(self):
''' get node name '''
return platform.uname()[1]
def getPid(self):
''' get process pid'''
return os.getpid()
def getFreeMemory(self):
''' get free real memory in KB '''
try:
vmstat = subprocess.Popen("vmstat 1 2", shell = True, stdout=subprocess.PIPE)
output = vmstat.communicate()[0].split('\n')
fields = output[1].split()
values = output[3].split()
return int(values[fields.index('free')])
except BaseException:
return 0
def getCpuUsage(self):
''' get cpu usage '''
try:
vmstat = subprocess.Popen("vmstat 1 2", shell = True, stdout=subprocess.PIPE)
output = vmstat.communicate()[0].split('\n')
fields = output[1].split()
values = output[3].split()
userTime = int(values[fields.index('us')])
systemTime = int(values[fields.index('sy')])
idleTime = int(values[fields.index('id')])
return float(int(userTime) + int(systemTime)) / (int(userTime) + int(systemTime) + int(idleTime)) * 100
except BaseException:
return 0
|
apache-2.0
|
FlintHill/SUAS-Competition
|
env/lib/python3.7/site-packages/setuptools/command/alias.py
|
455
|
2426
|
from distutils.errors import DistutilsOptionError
from setuptools.extern.six.moves import map
from setuptools.command.setopt import edit_config, option_base, config_file
def shquote(arg):
"""Quote an argument for later parsing by shlex.split()"""
for c in '"', "'", "\\", "#":
if c in arg:
return repr(arg)
if arg.split() != [arg]:
return repr(arg)
return arg
class alias(option_base):
"""Define a shortcut that invokes one or more commands"""
description = "define a shortcut to invoke one or more commands"
command_consumes_arguments = True
user_options = [
('remove', 'r', 'remove (unset) the alias'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.args = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.remove and len(self.args) != 1:
raise DistutilsOptionError(
"Must specify exactly one argument (the alias name) when "
"using --remove"
)
def run(self):
aliases = self.distribution.get_option_dict('aliases')
if not self.args:
print("Command Aliases")
print("---------------")
for alias in aliases:
print("setup.py alias", format_alias(alias, aliases))
return
elif len(self.args) == 1:
alias, = self.args
if self.remove:
command = None
elif alias in aliases:
print("setup.py alias", format_alias(alias, aliases))
return
else:
print("No alias definition found for %r" % alias)
return
else:
alias = self.args[0]
command = ' '.join(map(shquote, self.args[1:]))
edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run)
def format_alias(name, aliases):
source, command = aliases[name]
if source == config_file('global'):
source = '--global-config '
elif source == config_file('user'):
source = '--user-config '
elif source == config_file('local'):
source = ''
else:
source = '--filename=%r' % source
return source + name + ' ' + command
|
mit
|
xZise/pywikibot-core
|
pywikibot/i18n.py
|
1
|
21406
|
# -*- coding: utf-8 -*-
"""
Various i18n functions.
Helper functions for both the internal translation system
and for TranslateWiki-based translations.
By default messages are assumed to reside in a package called
'scripts.i18n'. In pywikibot 2.0, that package is not packaged
with pywikibot, and pywikibot 2.0 does not have a hard dependency
on any i18n messages. However, there are three user input questions
in pagegenerators which will use i18 messages if they can be loaded.
The default message location may be changed by calling
L{set_message_package} with a package name. The package must contain
an __init__.py, and a message bundle called 'pywikibot' containing
messages. See L{twntranslate} for more information on the messages.
"""
#
# (C) Pywikibot team, 2004-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
#
import sys
import re
import locale
import json
import os
import pkgutil
from collections import defaultdict
from pywikibot import Error
from .plural import plural_rules
import pywikibot
from . import config2 as config
if sys.version_info[0] > 2:
basestring = (str, )
PLURAL_PATTERN = r'{{PLURAL:(?:%\()?([^\)]*?)(?:\)d)?\|(.*?)}}'
# Package name for the translation messages. The messages data must loaded
# relative to that package name. In the top of this package should be
# directories named after for each script/message bundle, and each directory
# should contain JSON files called <lang>.json
_messages_package_name = 'scripts.i18n'
# Flag to indicate whether translation messages are available
_messages_available = None
# Cache of translated messages
_cache = defaultdict(dict)
def set_messages_package(package_name):
"""Set the package name where i18n messages are located."""
global _messages_package_name
global _messages_available
_messages_package_name = package_name
_messages_available = None
def messages_available():
"""
Return False if there are no i18n messages available.
To determine if messages are available, it looks for the package name
set using L{set_messages_package} for a message bundle called 'pywikibot'
containing messages.
@rtype: bool
"""
global _messages_available
if _messages_available is not None:
return _messages_available
try:
__import__(_messages_package_name)
except ImportError:
_messages_available = False
return False
_messages_available = True
return True
def _altlang(code):
"""Define fallback languages for particular languages.
If no translation is available to a specified language, translate() will
try each of the specified fallback languages, in order, until it finds
one with a translation, with 'en' and '_default' as a last resort.
For example, if for language 'xx', you want the preference of languages
to be: xx > fr > ru > en, you let this method return ['fr', 'ru'].
This code is used by other translating methods below.
@param code: The language code
@type code: string
@return: language codes
@rtype: list of str
"""
# Akan
if code in ['ak', 'tw']:
return ['ak', 'tw']
# Amharic
if code in ['aa', 'ti']:
return ['am']
# Arab
if code in ['arc', 'arz', 'so']:
return ['ar']
if code == 'kab':
return ['ar', 'fr']
# Bulgarian
if code in ['cu', 'mk']:
return ['bg', 'sr', 'sh']
# Czech
if code in ['cs', 'sk']:
return ['cs', 'sk']
# German
if code in ['bar', 'frr', 'ksh', 'pdc', 'pfl']:
return ['de']
if code == 'lb':
return ['de', 'fr']
if code == 'als':
return ['gsw', 'de']
if code == 'nds':
return ['nds-nl', 'de']
if code in ['dsb', 'hsb']:
return ['hsb', 'dsb', 'de']
if code == 'sli':
return ['de', 'pl']
if code == 'rm':
return ['de', 'it']
if code == 'stq':
return ['nds', 'de']
# Greek
if code in ['grc', 'pnt']:
return ['el']
# Esperanto
if code in ['io', 'nov']:
return ['eo']
# Spanish
if code in ['an', 'arn', 'ast', 'ay', 'ca', 'ext', 'lad', 'nah', 'nv', 'qu',
'yua']:
return ['es']
if code in ['gl', 'gn']:
return ['es', 'pt']
if code == 'eu':
return ['es', 'fr']
if code == 'cbk-zam':
return ['es', 'tl']
# Estonian
if code == 'fiu-vro':
return ['et']
if code == 'liv':
return ['et', 'lv']
# Persian (Farsi)
if code == 'ps':
return ['fa']
if code in ['glk', 'mzn']:
return ['glk', 'mzn', 'fa', 'ar']
# Finnish
if code == 'vep':
return ['fi', 'ru']
if code == 'fit':
return ['fi', 'sv']
# French
if code in ['bm', 'br', 'ht', 'kg', 'ln', 'mg', 'nrm', 'pcd',
'rw', 'sg', 'ty', 'wa']:
return ['fr']
if code == 'oc':
return ['fr', 'ca', 'es']
if code in ['co', 'frp']:
return ['fr', 'it']
# Hindi
if code in ['sa']:
return ['hi']
if code in ['ne', 'new']:
return ['ne', 'new', 'hi']
# Indonesian and Malay
if code in ['ace', 'bug', 'bjn', 'id', 'jv', 'ms', 'su']:
return ['id', 'ms', 'jv']
if code == 'map-bms':
return ['jv', 'id', 'ms']
# Inuit languages
if code in ['ik', 'iu']:
return ['iu', 'kl']
if code == 'kl':
return ['da', 'iu', 'no']
# Italian
if code in ['eml', 'fur', 'lij', 'lmo', 'nap', 'pms', 'roa-tara', 'sc',
'scn', 'vec']:
return ['it']
# Lithuanian
if code in ['bat-smg']:
return ['lt']
# Latvian
if code == 'ltg':
return ['lv']
# Dutch
if code in ['af', 'fy', 'li', 'pap', 'srn', 'vls', 'zea']:
return ['nl']
if code == ['nds-nl']:
return ['nds', 'nl']
# Polish
if code in ['csb', 'szl']:
return ['pl']
# Portuguese
if code in ['fab', 'mwl', 'tet']:
return ['pt']
# Romanian
if code in ['mo', 'roa-rup']:
return ['ro']
# Russian and Belarusian
if code in ['ab', 'av', 'ba', 'bxr', 'ce', 'cv', 'inh', 'kk', 'koi', 'krc',
'kv', 'ky', 'lbe', 'lez', 'mdf', 'mhr', 'mn', 'mrj', 'myv',
'os', 'sah', 'tg', 'udm', 'uk', 'xal']:
return ['ru']
if code in ['kbd', 'ady']:
return ['kbd', 'ady', 'ru']
if code == 'tt':
return ['tt-cyrl', 'ru']
if code in ['be', 'be-x-old']:
return ['be', 'be-x-old', 'ru']
if code == 'kaa':
return ['uz', 'ru']
# Serbocroatian
if code in ['bs', 'hr', 'sh']:
return ['sh', 'hr', 'bs', 'sr', 'sr-el']
if code == 'sr':
return ['sr-el', 'sh', 'hr', 'bs']
# Tagalog
if code in ['bcl', 'ceb', 'ilo', 'pag', 'pam', 'war']:
return ['tl']
# Turkish and Kurdish
if code in ['diq', 'ku']:
return ['ku', 'ku-latn', 'tr']
if code == 'gag':
return ['tr']
if code == 'ckb':
return ['ku']
# Ukrainian
if code in ['crh', 'rue']:
return ['uk', 'ru']
# Chinese
if code in ['minnan', 'zh', 'zh-classical', 'zh-min-nan', 'zh-tw',
'zh-hans', 'zh-hant']:
return ['zh', 'zh-tw', 'zh-cn', 'zh-classical']
if code in ['cdo', 'gan', 'hak', 'ii', 'wuu', 'za', 'zh-cdo',
'zh-classical', 'zh-cn', 'zh-yue']:
return ['zh', 'zh-cn', 'zh-tw', 'zh-classical']
# Scandinavian languages
if code in ['da', 'sv']:
return ['da', 'no', 'nb', 'sv', 'nn']
if code in ['fo', 'is']:
return ['da', 'no', 'nb', 'nn', 'sv']
if code == 'nn':
return ['no', 'nb', 'sv', 'da']
if code in ['nb', 'no']:
return ['no', 'nb', 'da', 'nn', 'sv']
if code == 'se':
return ['sv', 'no', 'nb', 'nn', 'fi']
# Other languages
if code in ['bi', 'tpi']:
return ['bi', 'tpi']
if code == 'yi':
return ['he', 'de']
if code in ['ia', 'ie']:
return ['ia', 'la', 'it', 'fr', 'es']
if code == 'xmf':
return ['ka']
if code in ['nso', 'st']:
return ['st', 'nso']
if code in ['kj', 'ng']:
return ['kj', 'ng']
if code in ['meu', 'hmo']:
return ['meu', 'hmo']
if code == ['as']:
return ['bn']
# Default value
return []
class TranslationError(Error, ImportError):
"""Raised when no correct translation could be found."""
# Inherits from ImportError, as this exception is now used
# where previously an ImportError would have been raised,
# and may have been caught by scripts as such.
pass
def _get_translation(lang, twtitle):
"""
Return message of certain twtitle if exists.
For internal use, don't use it directly.
"""
if twtitle in _cache[lang]:
return _cache[lang][twtitle]
message_bundle = twtitle.split('-')[0]
trans_text = None
filename = '%s/%s.json' % (message_bundle, lang)
try:
trans_text = pkgutil.get_data(
_messages_package_name, filename).decode('utf-8')
except (OSError, IOError): # file open can cause several exceptions
_cache[lang][twtitle] = None
return
transdict = json.loads(trans_text)
_cache[lang].update(transdict)
try:
return transdict[twtitle]
except KeyError:
return
def _extract_plural(code, message, parameters):
"""Check for the plural variants in message and replace them.
@param message: the message to be replaced
@type message: unicode string
@param parameters: plural parameters passed from other methods
@type parameters: int, basestring, tuple, list, dict
"""
plural_items = re.findall(PLURAL_PATTERN, message)
if plural_items: # we found PLURAL patterns, process it
if len(plural_items) > 1 and isinstance(parameters, (tuple, list)) and \
len(plural_items) != len(parameters):
raise ValueError("Length of parameter does not match PLURAL "
"occurrences.")
i = 0
for selector, variants in plural_items:
if isinstance(parameters, dict):
num = int(parameters[selector])
elif isinstance(parameters, basestring):
num = int(parameters)
elif isinstance(parameters, (tuple, list)):
num = int(parameters[i])
i += 1
else:
num = parameters
# TODO: check against plural_rules[code]['nplurals']
try:
index = plural_rules[code]['plural'](num)
except KeyError:
index = plural_rules['_default']['plural'](num)
except TypeError:
# we got an int, not a function
index = plural_rules[code]['plural']
repl = variants.split('|')[index]
message = re.sub(PLURAL_PATTERN, repl, message, count=1)
return message
DEFAULT_FALLBACK = ('_default', )
def translate(code, xdict, parameters=None, fallback=False):
"""Return the most appropriate translation from a translation dict.
Given a language code and a dictionary, returns the dictionary's value for
key 'code' if this key exists; otherwise tries to return a value for an
alternative language that is most applicable to use on the wiki in
language 'code' except fallback is False.
The language itself is always checked first, then languages that
have been defined to be alternatives, and finally English. If none of
the options gives result, we just take the one language from xdict which may
not be always the same. When fallback is iterable it'll return None if no
code applies (instead of returning one).
For PLURAL support have a look at the twntranslate method
@param code: The language code
@type code: string or Site object
@param xdict: dictionary with language codes as keys or extended dictionary
with family names as keys containing language dictionaries or
a single (unicode) string. May contain PLURAL tags as
described in twntranslate
@type xdict: dict, string, unicode
@param parameters: For passing (plural) parameters
@type parameters: dict, string, unicode, int
@param fallback: Try an alternate language code. If it's iterable it'll
also try those entries and choose the first match.
@type fallback: boolean or iterable
"""
family = pywikibot.config.family
# If a site is given instead of a code, use its language
if hasattr(code, 'code'):
family = code.family.name
code = code.code
# Check whether xdict has multiple projects
if isinstance(xdict, dict):
if family in xdict:
xdict = xdict[family]
elif 'wikipedia' in xdict:
xdict = xdict['wikipedia']
# Get the translated string
if not isinstance(xdict, dict):
trans = xdict
elif not xdict:
trans = None
else:
codes = [code]
if fallback is True:
codes += _altlang(code) + ['_default', 'en']
elif fallback is not False:
codes += list(fallback)
for code in codes:
if code in xdict:
trans = xdict[code]
break
else:
if fallback is not True:
# this shouldn't simply return "any one" code but when fallback
# was True before 65518573d2b0, it did just that. When False it
# did just return None. It's now also returning None in the new
# iterable mode.
return
code = list(xdict.keys())[0]
trans = xdict[code]
if trans is None:
return # return None if we have no translation found
if parameters is None:
return trans
# else we check for PLURAL variants
trans = _extract_plural(code, trans, parameters)
if parameters:
try:
return trans % parameters
except (KeyError, TypeError):
# parameter is for PLURAL variants only, don't change the string
pass
return trans
def twtranslate(code, twtitle, parameters=None, fallback=True):
"""
Translate a message.
The translations are retrieved from json files in messages_package_name.
fallback parameter must be True for i18n and False for L10N or testing
purposes.
@param code: The language code
@param twtitle: The TranslateWiki string title, in <package>-<key> format
@param parameters: For passing parameters.
@param fallback: Try an alternate language code
@type fallback: boolean
"""
if not messages_available():
raise TranslationError(
'Unable to load messages package %s for bundle %s'
'\nIt can happen due to lack of i18n submodule or files. '
'Read https://mediawiki.org/wiki/PWB/i18n'
% (_messages_package_name, twtitle))
code_needed = False
# If a site is given instead of a code, use its language
if hasattr(code, 'code'):
lang = code.code
# check whether we need the language code back
elif isinstance(code, list):
lang = code.pop()
code_needed = True
else:
lang = code
# There are two possible failure modes: the translation dict might not have
# the language altogether, or a specific key could be untranslated. Both
# modes are caught with the KeyError.
langs = [lang]
if fallback:
langs += _altlang(lang) + ['en']
for alt in langs:
trans = _get_translation(alt, twtitle)
if trans:
break
else:
raise TranslationError(
'No English translation has been defined for TranslateWiki key'
' %r\nIt can happen due to lack of i18n submodule or files. '
'Read https://mediawiki.org/wiki/PWB/i18n' % twtitle)
# send the language code back via the given list
if code_needed:
code.append(alt)
if parameters:
return trans % parameters
else:
return trans
# Maybe this function should be merged with twtranslate
def twntranslate(code, twtitle, parameters=None):
r"""Translate a message with plural support.
Support is implemented like in MediaWiki extension. If the TranslateWiki
message contains a plural tag inside which looks like::
{{PLURAL:<number>|<variant1>|<variant2>[|<variantn>]}}
it takes that variant calculated by the plural_rules depending on the number
value. Multiple plurals are allowed.
As an examples, if we had several json dictionaries in test folder like:
en.json:
{
"test-plural": "Bot: Changing %(num)s {{PLURAL:%(num)d|page|pages}}.",
}
fr.json:
{
"test-plural": "Robot: Changer %(descr)s {{PLURAL:num|une page|quelques pages}}.",
}
and so on.
>>> from pywikibot import i18n
>>> i18n.set_messages_package('tests.i18n')
>>> # use a number
>>> str(i18n.twntranslate('en', 'test-plural', 0) % {'num': 'no'})
'Bot: Changing no pages.'
>>> # use a string
>>> str(i18n.twntranslate('en', 'test-plural', '1') % {'num': 'one'})
'Bot: Changing one page.'
>>> # use a dictionary
>>> str(i18n.twntranslate('en', 'test-plural', {'num':2}))
'Bot: Changing 2 pages.'
>>> # use additional format strings
>>> str(i18n.twntranslate('fr', 'test-plural', {'num': 1, 'descr': 'seulement'}))
'Robot: Changer seulement une page.'
>>> # use format strings also outside
>>> str(i18n.twntranslate('fr', 'test-plural', 10) % {'descr': 'seulement'})
'Robot: Changer seulement quelques pages.'
The translations are retrieved from i18n.<package>, based on the callers
import table.
@param code: The language code
@param twtitle: The TranslateWiki string title, in <package>-<key> format
@param parameters: For passing (plural) parameters.
"""
# If a site is given instead of a code, use its language
if hasattr(code, 'code'):
code = code.code
# we send the code via list and get the alternate code back
code = [code]
trans = twtranslate(code, twtitle)
# get the alternate language code modified by twtranslate
lang = code.pop()
# check for PLURAL variants
trans = _extract_plural(lang, trans, parameters)
# we always have a dict for replacement of translatewiki messages
if parameters and isinstance(parameters, dict):
try:
return trans % parameters
except KeyError:
# parameter is for PLURAL variants only, don't change the string
pass
return trans
def twhas_key(code, twtitle):
"""
Check if a message has a translation in the specified language code.
The translations are retrieved from i18n.<package>, based on the callers
import table.
No code fallback is made.
@param code: The language code
@param twtitle: The TranslateWiki string title, in <package>-<key> format
"""
# If a site is given instead of a code, use its language
if hasattr(code, 'code'):
code = code.code
transdict = _get_translation(code, twtitle)
if transdict is None:
return False
return True
def twget_keys(twtitle):
"""
Return all language codes for a special message.
@param twtitle: The TranslateWiki string title, in <package>-<key> format
"""
# obtain the directory containing all the json files for this package
package = twtitle.split("-")[0]
mod = __import__(_messages_package_name, fromlist=[str('__file__')])
pathname = os.path.join(os.path.dirname(mod.__file__), package)
# build a list of languages in that directory
langs = [filename.partition('.')[0]
for filename in sorted(os.listdir(pathname))
if filename.endswith('.json')]
# exclude languages does not have this specific message in that package
# i.e. an incomplete set of translated messages.
return [lang for lang in langs
if lang != 'qqq' and
_get_translation(lang, twtitle)]
def input(twtitle, parameters=None, password=False, fallback_prompt=None):
"""
Ask the user a question, return the user's answer.
The prompt message is retrieved via L{twtranslate} and either uses the
config variable 'userinterface_lang' or the default locale as the language
code.
@param twtitle: The TranslateWiki string title, in <package>-<key> format
@param parameters: The values which will be applied to the translated text
@param password: Hides the user's input (for password entry)
@param fallback_prompt: The English prompt if i18n is not available.
@rtype: unicode string
"""
if not messages_available():
if not fallback_prompt:
raise TranslationError(
'Unable to load messages package %s for bundle %s'
% (_messages_package_name, twtitle))
else:
prompt = fallback_prompt
else:
code = config.userinterface_lang or \
locale.getdefaultlocale()[0].split('_')[0]
prompt = twtranslate(code, twtitle, parameters)
return pywikibot.input(prompt, password)
|
mit
|
auerj/flask-oauthlib
|
docs/conf.py
|
18
|
8262
|
# -*- coding: utf-8 -*-
#
# Flask-OAuthlib documentation build configuration file, created by
# sphinx-quickstart on Fri May 17 21:54:48 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.append(os.path.abspath('_themes'))
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('..'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Flask-OAuthlib'
import datetime
copyright = u'2013 - %i, <a href="http://lepture.com/">Hsiaoming Yang</a>' % datetime.datetime.utcnow().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import flask_oauthlib
version = flask_oauthlib.__version__
# The full version, including alpha/beta/rc tags.
release = flask_oauthlib.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'flask'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'flask-oauthlib.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['brand.html', 'sidebarintro.html', 'searchbox.html'],
'**': ['brand.html', 'localtoc.html', 'relations.html', 'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flask-OAuthlibdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Flask-OAuthlib.tex', u'Flask-OAuthlib Documentation',
u'Hsiaoming Yang', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'flask-oauthlib', u'Flask-OAuthlib Documentation',
[u'Hsiaoming Yang'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Flask-OAuthlib', u'Flask-OAuthlib Documentation',
u'Hsiaoming Yang', 'Flask-OAuthlib', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
bsd-3-clause
|
cockpituous/cockpit
|
test/selenium/selenium-base.py
|
1
|
8170
|
#!/usr/bin/python3
# we need to be able to find and import seleniumlib, so add this directory
from testlib_avocado.seleniumlib import SeleniumTest, user, clickable, passwd, visible
import os
import sys
machine_test_dir = os.path.dirname(os.path.abspath(__file__))
if machine_test_dir not in sys.path:
sys.path.insert(1, machine_test_dir)
class BasicTestSuite(SeleniumTest):
"""
:avocado: enable
"""
def test10Base(self):
# this is minimal cockpit test what checks login page
self.wait_id('server-name')
def test15BaseSSHKeyAdded(self):
# calling self.login() ensures there is added public ssh key to user to be able to call
# machine.execute(...)
self.login()
self.logout()
out = self.machine.execute("hostname")
server_element = self.wait_id('server-name')
self.assertIn(out.strip(), str(server_element.text))
def test20Login(self):
self.login()
user_element = self.wait_id("content-user-name")
self.assertEqual(user_element.text, user)
self.logout()
self.wait_id('server-name')
self.login("baduser", "badpasswd", wait_hostapp=False, add_ssh_key=False)
message_element = self.wait_id('login-error-message')
self.assertIn("Wrong", message_element.text)
self.login()
username_element = self.wait_id("content-user-name")
self.assertEqual(username_element.text, user)
def test30ChangeTabServices(self):
self.login()
self.click(self.wait_link('Services', cond=clickable))
self.wait_frame("services")
self.wait_id("services-list")
self.click(self.wait_text("Socket", cond=clickable))
self.wait_text("udev")
self.wait_id("services-list")
self.click(self.wait_text("Target", cond=clickable))
self.wait_id("services-list")
self.wait_text("reboot.target")
self.click(self.wait_text("System Services", cond=clickable))
self.wait_id("services-list")
self.wait_text("sshd")
self.mainframe()
def test50ChangeTabLogs(self):
self.login()
self.click(self.wait_link('Logs', cond=clickable))
self.wait_frame("logs")
self.wait_id("journal")
self.wait_id("journal-current-day-menu")
self.wait_id("journal-prio")
self.click(self.wait_xpath(
"//span[@id='journal-prio' and contains(text(), '%s')]" % "Error and above"))
self.wait_id("prio-lists")
self.click(self.wait_xpath(
"//a[@data-prio='*' and contains(text(), '%s')]" % "Everything"))
self.wait_id("journal")
self.wait_id("journal-current-day-menu")
self.click(self.wait_xpath(
"//span[@id='journal-prio' and contains(text(), '%s')]" % "Everything"))
self.wait_id("prio-lists")
self.click(self.wait_xpath(
"//a[@data-prio='0' and contains(text(), '%s')]" % "Only Emergency"))
self.wait_id("journal")
self.wait_id("journal-current-day-menu")
self.click(self.wait_xpath(
"//span[@id='journal-prio' and contains(text(), '%s')]" % "Only Emergency"))
self.wait_id("prio-lists")
self.click(self.wait_xpath(
"//a[@data-prio='1' and contains(text(), '%s')]" % "Alert and above"))
self.wait_id("journal")
self.wait_id("journal-current-day-menu")
self.click(self.wait_xpath(
"//span[@id='journal-prio' and contains(text(), '%s')]" % "Alert and above"))
self.wait_id("prio-lists")
self.click(self.wait_xpath(
"//a[@data-prio='2' and contains(text(), '%s')]" % "Critical and above"))
self.wait_id("journal")
self.wait_id("journal-current-day-menu")
self.click(self.wait_xpath(
"//span[@id='journal-prio' and contains(text(), '%s')]" % "Critical and above"))
self.wait_id("prio-lists")
self.click(self.wait_xpath(
"//a[@data-prio='3' and contains(text(), '%s')]" % "Error and above"))
self.wait_id("journal")
self.wait_id("journal-current-day-menu")
self.click(self.wait_xpath(
"//span[@id='journal-prio' and contains(text(), '%s')]" % "Error and above"))
self.wait_id("prio-lists")
self.click(self.wait_xpath(
"//a[@data-prio='4' and contains(text(), '%s')]" % "Warning and above"))
self.wait_id("journal")
self.wait_id("journal-current-day-menu")
self.click(self.wait_xpath(
"//span[@id='journal-prio' and contains(text(), '%s')]" % "Warning and above"))
self.wait_id("prio-lists")
self.click(self.wait_xpath(
"//a[@data-prio='5' and contains(text(), '%s')]" % "Notice and above"))
self.wait_id("journal")
self.wait_id("journal-current-day-menu")
self.click(self.wait_xpath(
"//span[@id='journal-prio' and contains(text(), '%s')]" % "Notice and above"))
self.wait_id("prio-lists")
self.click(self.wait_xpath(
"//a[@data-prio='6' and contains(text(), '%s')]" % "Info and above"))
self.wait_id("journal")
self.wait_id("journal-current-day-menu")
self.click(self.wait_xpath(
"//span[@id='journal-prio' and contains(text(), '%s')]" % "Info and above"))
self.wait_id("prio-lists")
self.click(self.wait_xpath(
"//a[@data-prio='7' and contains(text(), '%s')]" % "Debug and above"))
self.wait_id("journal")
self.wait_id("journal-current-day-menu")
self.click(self.wait_xpath(
"//span[@id='journal-prio' and contains(text(), '%s')]" % "Debug and above"))
self.wait_id("prio-lists")
checkt = "ahojnotice"
self.machine.execute("systemd-cat -p debug echo '%s'" % checkt)
self.click(self.wait_text(checkt, cond=clickable))
self.wait_id('journal-entry')
self.mainframe()
def test70ChangeTabNetworking(self):
self.login()
out = self.machine.execute("/usr/sbin/ip r |grep default | head -1 | cut -d ' ' -f 5").strip()
self.click(self.wait_link('Network', cond=clickable))
self.wait_frame("network")
self.wait_id("networking-interfaces")
self.wait_id("networking-tx-graph")
self.click(self.wait_xpath("//tr[@data-interface='%s']" % out, cond=clickable))
self.wait_text("Carrier", element="td")
self.mainframe()
def test80Accounts(self):
self.login()
username = "selfcheckuser"
self.click(self.wait_link('Accounts', cond=clickable))
self.wait_frame("users")
self.click(self.wait_xpath(
"//*[@class='cockpit-account-user-name' and contains(text(), '%s')]" % user, cond=clickable))
self.wait_id('account')
self.wait_text("Full Name")
self.mainframe()
self.click(self.wait_link('Accounts', cond=clickable))
self.wait_frame('users')
self.wait_id("accounts", cond=visible)
self.click(self.wait_id("accounts-create", cond=clickable))
self.wait_id("accounts-create-dialog")
self.wait_id('accounts-create-create', cond=clickable)
self.send_keys(self.wait_id('accounts-create-real-name'), username)
self.send_keys(self.wait_id('accounts-create-pw1'), passwd)
self.send_keys(self.wait_id('accounts-create-pw2'), passwd)
self.wait_xpath("//span[@id='accounts-create-password-meter-message' and contains(text(), '%s')]" % "Excellent")
self.click(self.wait_id('accounts-create-create', cond=clickable))
self.wait_id("accounts", cond=visible)
self.click(self.wait_xpath(
"//*[@class='cockpit-account-user-name' and contains(text(), '%s')]" % username, cond=clickable))
self.click(self.wait_id('account-delete', cond=clickable))
self.wait_id('account-confirm-delete-dialog')
self.click(self.wait_id('account-confirm-delete-apply', cond=clickable))
self.wait_id("accounts", cond=visible)
self.wait_id("accounts-list", cond=visible)
self.mainframe()
|
lgpl-2.1
|
rendermotion/RMMel
|
creators/connect.py
|
2
|
11473
|
from RMPY.core import dataValidators
import pymel.core as pm
from RMPY.creators import creatorsBase
class Connect(creatorsBase.CreatorsBase):
def __init__(self, *args, **kwargs):
super(Connect, self).__init__(*args, **kwargs)
@staticmethod
def _validate_connection(attribute, input_attribute):
input_attribute = dataValidators.as_pymel_nodes(input_attribute)
if issubclass(attribute.__class__, pm.general.Attribute):
attribute >> input_attribute
else:
input_attribute.set(attribute)
def attributes(self, attribute_source, attribute_destination, operation=1):
"""
:param attribute_source: The source attribute will be pluged to the attribute destination if no
other connection is coming in on the attribute_destination.
If a connection already exists it will create an plusMinusAverage node,
and it will plug the original attribute and the attribute_source on the XD inputs of
the plusMinusAverage.
:param attribute_destination: The destination Attribut that will receive the output of the plusMinusAverage node
:param operation: the operation of the plusMinusAverage node that will connect the
attribute source with the attribute destination by default is 1 (addition).
:return:
"""
attribute_source = attribute_source
attribute_destination = attribute_destination
value = pm.listConnections(attribute_destination, destination=False, plugs=True, skipConversionNodes=False)
value_nodes = pm.listConnections(attribute_destination, destination=False, skipConversionNodes=False)
plus_minus = None
if value:
if pm.objectType(value_nodes[0]) == 'plusMinusAverage':
plus_minus = value_nodes[0]
if pm.getAttr(attribute_source, type=True) in ['float', 'double', 'doubleLinear',
'doubleAngle'] or pm.getAttr(
attribute_source).__class__ in [float, int]:
next_index = len(pm.getAttr('%s.input1D' % plus_minus, multiIndices=True))
pm.connectAttr(attribute_source, '{}.input1D[{}]'.format(plus_minus, next_index))
# elif attribute_source.get(type=True) in [vector]:
# print 'connecting vector'
elif pm.getAttr(attribute_source, type=True) in ['double3'] or pm.getAttr(
attribute_source).__class__ in [list, tuple]:
next_index = len(pm.getAttr('%s.input3D' % plus_minus, multiIndices=True)) % 3
pm.connectAttr(attribute_source, '{}.input3D[{}]'.format(plus_minus, next_index))
else:
print 'could not add data type: %s, %s' % (pm.getAttr(attribute_source, type=True),
pm.getAttr(attribute_source).__class__)
else:
if pm.getAttr(attribute_source, type=True) in ['float', 'double', 'doubleLinear',
'doubleAngle'] or pm.getAttr(
attribute_source).__class__ in [float, int]:
plus_minus = pm.shadingNode("plusMinusAverage", asUtility=True)
plus_minus = self.name_convention.rename_name_in_format(plus_minus,
name='addition{}'.format(
attribute_destination.split('.')[1].title()))
pm.setAttr('{}.operation'.format(plus_minus), operation)
pm.disconnectAttr(value[0], attribute_destination)
pm.connectAttr(value[0], '{}.input1D[0]'.format(plus_minus))
pm.connectAttr(attribute_source, '{}.input1D[1]'.format(plus_minus))
pm.connectAttr('{}.output1D'.format(plus_minus), attribute_destination)
elif pm.getAttr(attribute_source, type=True) in ['double3'] or pm.getAttr(
attribute_source).__class__ in [list, tuple]:
plus_minus = pm.shadingNode("plusMinusAverage", asUtility=True)
plus_minus = self.name_convention.rename_name_in_format(plus_minus,
name='addition{}'.format(
attribute_destination.split('.')[1].title()))
pm.setAttr('{}.operation'.format(plus_minus), operation)
pm.disconnectAttr(value[0], attribute_destination)
pm.connectAttr(value[0], '{}.input3D[0]'.format(plus_minus))
pm.connectAttr(attribute_source, '{}.input3D[1]'.format(plus_minus))
pm.connectAttr('{}.output3D'.format(plus_minus), attribute_destination)
else:
print 'could not add data type: %s class: %s' % (pm.getAttr(attribute_source, type=True),
pm.getAttr(attribute_source).__class__)
return plus_minus
else:
pm.connectAttr(attribute_source, attribute_destination)
return None
def with_limits(self, attribute_x, attrribute_y, keys, operation=1, in_tangent_type='spline',
out_tangent_type='spline', post_infinity_type='linear', pre_infinity_type='linear'):
"""
Pre/post InfinityType values: 'constant, 'linear', 'cycle', 'cycleRelative', 'oscillate'
in/out TangentType values: 'global_', 'fixed', 'linear', 'flat', 'smooth', 'step', 'slow',
'fast', 'clamped', 'plateau', 'stepNext', 'auto'
:param attribute_x:
:param attrribute_y:
:param keys:
:param operation:
:param in_tangent_type:
:param out_tangent_type:
:param post_infinity_type:
:param pre_infinity_type:
:return:
"""
attribute_x = dataValidators.as_pymel_nodes(attribute_x)
attrribute_y = dataValidators.as_pymel_nodes(attrribute_y)
value = pm.listConnections(attrribute_y, destination=False, plugs=True, skipConversionNodes=False)
plus_minus = None
if value:
if pm.objectType(value[0].node()) == 'plusMinusAverage':
plus_minus = value[0].node()
if attribute_x.get(type=True) in ['double', 'doubleLinear', 'doubleAngle', 'float']:
for eachKey in keys:
pm.setDrivenKeyframe('%s' % plus_minus.input1D[len(plus_minus.input1D.elements())],
currentDriver='%s' % attribute_x, inTangentType=in_tangent_type,
outTangentType=out_tangent_type, dv=eachKey[0], v=eachKey[1])
animation_curve_node = \
pm.listConnections('%s' % plus_minus.input1D[len(plus_minus.input1D.elements())])[0]
self.name_convention.rename_name_in_format(animation_curve_node)
elif attribute_x.get(type=True) in ['double3']:
for eachKey in keys:
pm.setDrivenKeyframe('%s' % plus_minus.input3D[len(plus_minus.input3D.elements()) % 3],
currentDriver='%s' % attribute_x,
inTangentType=in_tangent_type, outTangentType=out_tangent_type,
dv=eachKey[0], v=eachKey[1])
animation_curve_node = \
pm.listConnections('%s' % plus_minus.input3D[len(plus_minus.input3D.elements()) % 3])[0]
self.name_convention.rename_name_in_format(animation_curve_node)
else:
print 'could not add data type: %s' % attribute_x.get(type=True)
else:
if attribute_x.get(type=True) in ['double', 'doubleLinear', 'doubleAngle', 'float']:
plus_minus = pm.shadingNode("plusMinusAverage", asUtility=True, name="additiveConnection")
self.name_convention.rename_name_in_format(plus_minus)
plus_minus.operation.set(operation)
value[0] // attrribute_y
value[0] >> plus_minus.input1D[0]
plus_minus.output1D >> attrribute_y
for eachKey in keys:
pm.setDrivenKeyframe('%s' % plus_minus.input1D[1],
currentDriver='%s' % attribute_x, dv=eachKey[0], v=eachKey[1],
inTangentType=in_tangent_type, outTangentType=out_tangent_type)
animation_curve_node = pm.listConnections('%s' % plus_minus.input1D[1])[0]
self.name_convention.rename_name_in_format(animation_curve_node)
elif attribute_x.get(type=True) in ['double3']:
plus_minus = pm.shadingNode("plusMinusAverage", asUtility=True, name="additiveConnection")
self.name_convention.rename_name_in_format(plus_minus)
plus_minus.operation.set(operation)
value[0] // attrribute_y
value[0] >> plus_minus.input3D[0]
plus_minus.output3D >> attrribute_y
for eachKey in keys:
pm.setDrivenKeyframe('%s' % plus_minus.input3D[1],
currentDriver='%s' % attribute_x, dv=eachKey[0], v=eachKey[1],
inTangentType=in_tangent_type, outTangentType=out_tangent_type)
animation_curve_node = pm.listConnections('%s' % plus_minus.input3D[1])[0]
self.name_convention.rename_name_in_format(animation_curve_node)
else:
print 'could not add data type: %s' % attribute_x.get(type=True)
else:
for eachKey in keys:
pm.setDrivenKeyframe('%s' % attrribute_y, currentDriver='%s' % attribute_x, dv=eachKey[0], v=eachKey[1],
inTangentType=in_tangent_type, outTangentType=out_tangent_type)
animation_curve_node = pm.listConnections('%s' % attrribute_y)[0]
self.name_convention.rename_name_in_format(animation_curve_node)
if issubclass(animation_curve_node.__class__, pm.nodetypes.AnimCurve):
animation_curve_node.setPostInfinityType(post_infinity_type)
animation_curve_node.setPreInfinityType(pre_infinity_type)
return plus_minus, animation_curve_node
def times_factor(self, attr_a, attr_b, factor=1, name='unitConversion'):
unit_conversion = pm.createNode("unitConversion", name=name)
self.name_convention.rename_name_in_format(unit_conversion, name=name)
pm.setAttr('{}.conversionFactor'.format(unit_conversion), factor)
pm.connectAttr(attr_a, '{}.input'.format(unit_conversion))
pm.connectAttr('{}.output'.format(unit_conversion), attr_b)
return unit_conversion
|
lgpl-3.0
|
RickMohr/nyc-trees
|
src/nyc_trees/apps/survey/models.py
|
1
|
8446
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.contrib.gis.db import models
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.utils.timezone import now
from apps.core.models import User, Group
from libs.mixins import NycModel
class Blockface(NycModel, models.Model):
geom = models.MultiLineStringField()
is_available = models.BooleanField(default=True)
expert_required = models.BooleanField(default=False)
objects = models.GeoManager()
def __unicode__(self):
return '%s (available: %s)' % (self.pk, self.is_available)
class Territory(NycModel, models.Model):
group = models.ForeignKey(Group)
blockface = models.OneToOneField(Blockface)
def __unicode__(self):
return '%s -> %s' % (self.group, self.blockface_id)
class Survey(NycModel, models.Model):
# We do not anticipate deleting a Blockface, but we definitely
# should not allow it to be deleted if there is a related Survey
blockface = models.ForeignKey(Blockface, on_delete=models.PROTECT)
# We do not want to lose survey data by allowing the deletion of
# a User object to automatically cascade and delete the Survey
user = models.ForeignKey(User, on_delete=models.PROTECT)
teammate = models.ForeignKey(User, null=True, on_delete=models.PROTECT,
related_name='surveys_as_teammate',
blank=True)
is_flagged = models.BooleanField(default=False)
has_trees = models.BooleanField()
is_mapped_in_blockface_polyline_direction = models.BooleanField()
is_left_side = models.BooleanField()
quit_reason = models.TextField(blank=True)
def __unicode__(self):
return 'block %s on %s by %s' % (self.blockface_id, self.created_at,
self.user)
def get_absolute_url(self):
return reverse('survey_detail', args=[self.pk])
class Species(NycModel, models.Model):
scientific_name = models.CharField(max_length=100)
common_name = models.CharField(max_length=100)
cultivar = models.CharField(max_length=100, blank=True)
forms_id = models.CharField(max_length=10)
species_code = models.CharField(max_length=10)
def __unicode__(self):
return '%s [%s]' % (self.common_name, self.scientific_name)
class Meta:
unique_together = ("scientific_name", "cultivar", "common_name")
CURB_CHOICES = (
('OnCurb', 'Along the curb'),
('OffsetFromCurb', 'Offset from the curb'),
)
STATUS_CHOICES = (
('Alive', 'Tree is alive'),
('Dead', 'Tree is dead'),
('Stump', 'Stump < 24"'),
)
CERTAINTY_CHOICES = (
('Yes', 'Yes'),
('No', 'No'),
('Maybe', 'Maybe'),
)
HEALTH_CHOICES = (
('Good', 'Good'),
('Fair', 'Fair'),
('Poor', 'Poor'),
)
STEWARDSHIP_CHOICES = (
('None', 'Zero'),
('1or2', '1-2'),
('3or4', '3-4'),
('4orMore', '4+'),
)
GUARD_CHOICES = (
('None', 'Not installed'),
('Helpful', 'Helpful'),
('Harmful', 'Harmful'),
('Unsure', 'Unsure'),
)
SIDEWALK_CHOICES = (
('NoDamage', 'No damage'),
('Damage', 'Cracks or raised'),
)
PROBLEMS_CHOICES = (
('None', 'No problems'),
('Root problems', (
('Stones', 'Sidewalk or stones'),
('MetalGrates', 'Metal grates'),
('RootOther', 'Other'))),
('Trunk problems', (
('WiresRope', 'Wires or rope'),
('TrunkLights', 'Lights'),
('TrunkOther', 'Other'))),
('Branch problems', (
('BranchLights', 'Lights or wires'),
('Sneakers', 'Sneakers'),
('BranchOther', 'Other')))
)
def flatten_categorized_choices(choices):
flat = []
for choice in choices:
if isinstance(choice[1], tuple):
for group_choice in choice[1:]:
flat.extend(group_choice)
else:
flat.append(choice)
return tuple(flat)
class Tree(NycModel, models.Model):
survey = models.ForeignKey(Survey)
# We do not anticipate deleting a Species, but we definitely
# should not allow it to be deleted if there is a related Tree
species = models.ForeignKey(Species, null=True, blank=True,
on_delete=models.PROTECT)
distance_to_tree = models.FloatField()
distance_to_end = models.FloatField(null=True, blank=True)
circumference = models.PositiveIntegerField(null=True, blank=True)
stump_diameter = models.PositiveIntegerField(null=True, blank=True)
curb_location = models.CharField(max_length=25, choices=CURB_CHOICES)
status = models.CharField(max_length=15, choices=STATUS_CHOICES)
# The following fields are collected only when status == 'Alive',
# hence blank=True
species_certainty = models.CharField(
blank=True, max_length=15, choices=CERTAINTY_CHOICES)
health = models.CharField(
blank=True, max_length=15, choices=HEALTH_CHOICES)
stewardship = models.CharField(
blank=True, max_length=15, choices=STEWARDSHIP_CHOICES)
guards = models.CharField(
blank=True, max_length=15, choices=GUARD_CHOICES)
sidewalk_damage = models.CharField(
blank=True, max_length=15, choices=SIDEWALK_CHOICES)
problems = models.CharField(blank=True, max_length=130)
def __unicode__(self):
t = 'id: %s - survey: %s - dist: %s'
return t % (self.id, self.survey.id, self.distance_to_tree)
def clean(self):
if self.status != 'Alive':
self.species_certainty = ''
self.health = ''
self.stewardship = ''
self.guards = ''
self.sidewalk_damage = ''
self.problems = ''
if self.problems:
probs = self.problems.split(',')
if len(probs) != len(set(probs)):
raise ValidationError({'problems': ['Duplicate entry']})
codes = [pair[0]
for pair in flatten_categorized_choices(PROBLEMS_CHOICES)]
for code in probs:
if code not in codes:
raise ValidationError({'problems': [
'Invalid entry: %s' % code]})
if self.distance_to_tree is not None and self.distance_to_tree < 0:
raise ValidationError({'distance_to_tree': ['Cannot be negative']})
if self.distance_to_end is not None and self.distance_to_end < 0:
raise ValidationError({'distance_to_end': ['Cannot be negative']})
if self.circumference is not None and self.circumference <= 0:
raise ValidationError({'circumference': ['Must be positive']})
if self.stump_diameter is not None and self.stump_diameter <= 0:
raise ValidationError({'stump_diameter': ['Must be positive']})
if self.status in {'Alive', 'Dead'}:
if not self.circumference:
raise ValidationError({'circumference': ['Field is required']})
if self.stump_diameter:
raise ValidationError(
{'stump_diameter': ['Only valid for Stumps']})
elif self.status == 'Stump':
if not self.stump_diameter:
raise ValidationError(
{'stump_diameter': ['Field is required']})
if self.circumference:
raise ValidationError(
{'circumference': ['Not valid for Stumps']})
class ReservationsQuerySet(models.QuerySet):
def current(self):
return self \
.filter(canceled_at__isnull=True) \
.filter(expires_at__gt=now()) \
.filter(blockface__is_available=True)
class BlockfaceReservation(NycModel, models.Model):
user = models.ForeignKey(User)
# We do not plan on Blockface records being deleted, but we should
# make sure that a Blockface that has been reserved cannot be
# deleted out from under a User who had planned to map it.
blockface = models.ForeignKey(Blockface, on_delete=models.PROTECT)
is_mapping_with_paper = models.BooleanField(default=False)
expires_at = models.DateTimeField()
canceled_at = models.DateTimeField(null=True, blank=True)
reminder_sent_at = models.DateTimeField(null=True, blank=True)
objects = ReservationsQuerySet.as_manager()
def __unicode__(self):
return '%s -> %s' % (self.user, self.blockface_id)
|
apache-2.0
|
paolodedios/tensorflow
|
tensorflow/lite/experimental/mlir/testing/op_tests/tensor_list_length.py
|
6
|
2271
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for tensor_list_length."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
from tensorflow.python.ops import list_ops
@register_make_test_function()
def make_tensor_list_length_tests(options):
"""Make a set of tests to do TensorListLength."""
test_parameters = [
{
"element_dtype": [tf.float32, tf.int32],
"num_elements": [4, 5, 6],
"element_shape": [[], [5], [3, 3]],
},
]
def build_graph(parameters):
"""Build the TensorListLength op testing graph."""
data = tf.placeholder(
dtype=parameters["element_dtype"],
shape=[parameters["num_elements"]] + parameters["element_shape"])
tensor_list = list_ops.tensor_list_from_tensor(data,
parameters["element_shape"])
out = list_ops.tensor_list_length(tensor_list)
return [data], [out]
def build_inputs(parameters, sess, inputs, outputs):
data = create_tensor_data(parameters["element_dtype"],
[parameters["num_elements"]] +
parameters["element_shape"])
return [data], sess.run(outputs, feed_dict=dict(zip(inputs, [data])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
|
apache-2.0
|
pchauncey/ansible
|
lib/ansible/modules/system/interfaces_file.py
|
7
|
13752
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, Roman Belyakovsky <ihryamzik () gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: interfaces_file
short_description: Tweak settings in /etc/network/interfaces files
extends_documentation_fragment: files
description:
- Manage (add, remove, change) individual interface options in an interfaces-style file without having
to manage the file as a whole with, say, M(template) or M(assemble). Interface has to be presented in a file.
- Read information about interfaces from interfaces-styled files
version_added: "2.4"
options:
dest:
description:
- Path to the interfaces file
required: false
default: /etc/network/interfaces
iface:
description:
- Name of the interface, required for value changes or option remove
required: false
default: null
option:
description:
- Name of the option, required for value changes or option remove
required: false
default: null
value:
description:
- If I(option) is not presented for the I(interface) and I(state) is C(present) option will be added.
If I(option) already exists and is not C(pre-up), C(up), C(post-up) or C(down), it's value will be updated.
C(pre-up), C(up), C(post-up) and C(down) options can't be updated, only adding new options, removing existing
ones or cleaning the whole option set are supported
required: false
default: null
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
required: false
default: "no"
choices: [ "yes", "no" ]
state:
description:
- If set to C(absent) the option or section will be removed if present instead of created.
required: false
default: "present"
choices: [ "present", "absent" ]
notes:
- If option is defined multiple times last one will be updated but all will be deleted in case of an absent state
requirements: []
author: "Roman Belyakovsky (@hryamzik)"
'''
RETURN = '''
dest:
description: destination file/path
returned: success
type: string
sample: "/etc/network/interfaces"
ifaces:
description: interfaces dictionary
returned: success
type: complex
contains:
ifaces:
description: interface dictionary
returned: success
type: dictionary
contains:
eth0:
description: Name of the interface
returned: success
type: dictionary
contains:
address_family:
description: interface address family
returned: success
type: string
sample: "inet"
method:
description: interface method
returned: success
type: string
sample: "manual"
mtu:
description: other options, all values returned as strings
returned: success
type: string
sample: "1500"
pre-up:
description: list of C(pre-up) scripts
returned: success
type: list
sample:
- "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
- "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
up:
description: list of C(up) scripts
returned: success
type: list
sample:
- "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
- "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
post-up:
description: list of C(post-up) scripts
returned: success
type: list
sample:
- "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
- "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
down:
description: list of C(down) scripts
returned: success
type: list
sample:
- "route del -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
- "route del -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
...
'''
EXAMPLES = '''
# Set eth1 mtu configuration value to 8000
- interfaces_file:
dest: /etc/network/interfaces.d/eth1.cfg
iface: eth1
option: mtu
value: 8000
backup: yes
state: present
register: eth1_cfg
'''
import os
import re
import tempfile
from ansible.module_utils.basic import AnsibleModule
def lineDict(line):
return {'line': line, 'line_type': 'unknown'}
def optionDict(line, iface, option, value):
return {'line': line, 'iface': iface, 'option': option, 'value': value, 'line_type': 'option'}
def getValueFromLine(s):
spaceRe = re.compile('\s+')
for m in spaceRe.finditer(s):
pass
valueEnd = m.start()
option = s.split()[0]
optionStart = s.find(option)
optionLen = len(option)
valueStart = re.search('\s', s[optionLen + optionStart:]).end() + optionLen + optionStart
return s[valueStart:valueEnd]
def read_interfaces_file(module, filename):
f = open(filename, 'r')
return read_interfaces_lines(module, f)
def read_interfaces_lines(module, line_strings):
lines = []
ifaces = {}
currently_processing = None
i = 0
for line in line_strings:
i += 1
words = line.split()
if len(words) < 1:
lines.append(lineDict(line))
continue
if words[0][0] == "#":
lines.append(lineDict(line))
continue
if words[0] == "mapping":
# currmap = calloc(1, sizeof *currmap);
lines.append(lineDict(line))
currently_processing = "MAPPING"
elif words[0] == "source":
lines.append(lineDict(line))
currently_processing = "NONE"
elif words[0] == "source-dir":
lines.append(lineDict(line))
currently_processing = "NONE"
elif words[0] == "iface":
currif = {
"pre-up": [],
"up": [],
"down": [],
"post-up": []
}
iface_name, address_family_name, method_name = words[1:4]
if len(words) != 4:
module.fail_json(msg="Incorrect number of parameters (%d) in line %d, must be exectly 3" % (len(words), i))
# TODO: put line and count parameters
return None, None
currif['address_family'] = address_family_name
currif['method'] = method_name
ifaces[iface_name] = currif
lines.append({'line': line, 'iface': iface_name, 'line_type': 'iface', 'params': currif})
currently_processing = "IFACE"
elif words[0] == "auto":
lines.append(lineDict(line))
currently_processing = "NONE"
elif words[0] == "allow-":
lines.append(lineDict(line))
currently_processing = "NONE"
elif words[0] == "no-auto-down":
lines.append(lineDict(line))
currently_processing = "NONE"
elif words[0] == "no-scripts":
lines.append(lineDict(line))
currently_processing = "NONE"
else:
if currently_processing == "IFACE":
option_name = words[0]
# TODO: if option_name in currif.options
value = getValueFromLine(line)
lines.append(optionDict(line, iface_name, option_name, value))
if option_name in ["pre-up", "up", "down", "post-up"]:
currif[option_name].append(value)
else:
currif[option_name] = value
elif currently_processing == "MAPPING":
lines.append(lineDict(line))
elif currently_processing == "NONE":
lines.append(lineDict(line))
else:
module.fail_json(msg="misplaced option %s in line %d" % (line, i))
return None, None
return lines, ifaces
def setInterfaceOption(module, lines, iface, option, raw_value, state):
value = str(raw_value)
changed = False
iface_lines = [item for item in lines if "iface" in item and item["iface"] == iface]
if len(iface_lines) < 1:
# interface not found
module.fail_json(msg="Error: interface %s not found" % iface)
return changed
iface_options = list(filter(lambda i: i['line_type'] == 'option', iface_lines))
target_options = list(filter(lambda i: i['option'] == option, iface_options))
if state == "present":
if len(target_options) < 1:
changed = True
# add new option
last_line_dict = iface_lines[-1]
lines = addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options)
else:
if option in ["pre-up", "up", "down", "post-up"]:
if len(list(filter(lambda i: i['value'] == value, target_options))) < 1:
changed = True
lines = addOptionAfterLine(option, value, iface, lines, target_options[-1], iface_options)
else:
# if more than one option found edit the last one
if target_options[-1]['value'] != value:
changed = True
target_option = target_options[-1]
old_line = target_option['line']
old_value = target_option['value']
prefix_start = old_line.find(option)
optionLen = len(option)
old_value_position = re.search("\s+".join(old_value.split()), old_line[prefix_start + optionLen:])
start = old_value_position.start() + prefix_start + optionLen
end = old_value_position.end() + prefix_start + optionLen
line = old_line[:start] + value + old_line[end:]
index = len(lines) - lines[::-1].index(target_option) - 1
lines[index] = optionDict(line, iface, option, value)
elif state == "absent":
if len(target_options) >= 1:
if option in ["pre-up", "up", "down", "post-up"] and value is not None and value != "None":
for target_option in filter(lambda i: i['value'] == value, target_options):
changed = True
lines = list(filter(lambda l: l != target_option, lines))
else:
changed = True
for target_option in target_options:
lines = list(filter(lambda l: l != target_option, lines))
else:
module.fail_json(msg="Error: unsupported state %s, has to be either present or absent" % state)
return changed, lines
def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options):
last_line = last_line_dict['line']
prefix_start = last_line.find(last_line.split()[0])
suffix_start = last_line.rfind(last_line.split()[-1]) + len(last_line.split()[-1])
prefix = last_line[:prefix_start]
if len(iface_options) < 1:
# interface has no options, ident
prefix += " "
line = prefix + "%s %s" % (option, value) + last_line[suffix_start:]
option_dict = optionDict(line, iface, option, value)
index = len(lines) - lines[::-1].index(last_line_dict)
lines.insert(index, option_dict)
return lines
def write_changes(module, lines, dest):
tmpfd, tmpfile = tempfile.mkstemp()
f = os.fdopen(tmpfd, 'wb')
f.writelines(lines)
f.close()
module.atomic_move(tmpfile, os.path.realpath(dest))
def main():
module = AnsibleModule(
argument_spec=dict(
dest=dict(default='/etc/network/interfaces', required=False, type='path'),
iface=dict(required=False),
option=dict(required=False),
value=dict(required=False),
backup=dict(default='no', type='bool'),
state=dict(default='present', choices=['present', 'absent']),
),
add_file_common_args=True,
supports_check_mode=True
)
dest = module.params['dest']
iface = module.params['iface']
option = module.params['option']
value = module.params['value']
backup = module.params['backup']
state = module.params['state']
if option is not None and iface is None:
module.fail_json(msg="Inteface must be set if option is defined")
if option is not None and state == "present" and value is None:
module.fail_json(msg="Value must be set if option is defined and state is 'present'")
lines, ifaces = read_interfaces_file(module, dest)
changed = False
if option is not None:
changed, lines = setInterfaceOption(module, lines, iface, option, value, state)
if changed:
_, ifaces = read_interfaces_lines(module, [d['line'] for d in lines if 'line' in d])
if changed and not module.check_mode:
if backup:
module.backup_local(dest)
write_changes(module, [d['line'] for d in lines if 'line' in d], dest)
module.exit_json(dest=dest, changed=changed, ifaces=ifaces)
if __name__ == '__main__':
main()
|
gpl-3.0
|
benjaoming/kolibri
|
kolibri/plugins/learn/templatetags/learn_tags.py
|
12
|
1182
|
"""
Learn template tags
===================
Tags for including learn app javascript assets ina template. To use:
.. code-block:: html
{% load learn_tags %}
<!-- Render inclusion tag for frontend JS elements -->
{% learn_assets %}
"""
from __future__ import absolute_import, print_function, unicode_literals
from django import template
from kolibri.core.webpack.utils import webpack_asset_render
from .. import hooks
register = template.Library()
@register.simple_tag()
def learn_assets():
"""
Using in a template will inject script tags that include the javascript assets defined
by any concrete hook that subclasses ManagementSyncHook.
:return: HTML of script tags to insert into management/management.html
"""
return webpack_asset_render(hooks.LearnSyncHook, async=False)
@register.simple_tag()
def learn_async_assets():
"""
Using in a template will inject script tags that include the javascript assets defined
by any concrete hook that subclasses ManagementSyncHook.
:return: HTML of script tags to insert into management/management.html
"""
return webpack_asset_render(hooks.LearnAsyncHook, async=True)
|
mit
|
CoderBotOrg/coderbotsrv
|
server/lib/wtforms/ext/dateutil/fields.py
|
20
|
2605
|
"""
A DateTimeField and DateField that use the `dateutil` package for parsing.
"""
from dateutil import parser
from wtforms.fields import Field
from wtforms.validators import ValidationError
from wtforms.widgets import TextInput
__all__ = (
'DateTimeField', 'DateField',
)
class DateTimeField(Field):
"""
DateTimeField represented by a text input, accepts all input text formats
that `dateutil.parser.parse` will.
:param parse_kwargs:
A dictionary of keyword args to pass to the dateutil parse() function.
See dateutil docs for available keywords.
:param display_format:
A format string to pass to strftime() to format dates for display.
"""
widget = TextInput()
def __init__(self, label=None, validators=None, parse_kwargs=None,
display_format='%Y-%m-%d %H:%M', **kwargs):
super(DateTimeField, self).__init__(label, validators, **kwargs)
if parse_kwargs is None:
parse_kwargs = {}
self.parse_kwargs = parse_kwargs
self.display_format = display_format
def _value(self):
if self.raw_data:
return u' '.join(self.raw_data)
else:
return self.data and self.data.strftime(self.display_format) or u''
def process_formdata(self, valuelist):
if valuelist:
date_str = u' '.join(valuelist)
if not date_str:
self.data = None
raise ValidationError(self.gettext(u'Please input a date/time value'))
parse_kwargs = self.parse_kwargs.copy()
if 'default' not in parse_kwargs:
try:
parse_kwargs['default'] = self.default()
except TypeError:
parse_kwargs['default'] = self.default
try:
self.data = parser.parse(date_str, **parse_kwargs)
except ValueError:
self.data = None
raise ValidationError(self.gettext(u'Invalid date/time input'))
class DateField(DateTimeField):
"""
Same as the DateTimeField, but stores only the date portion.
"""
def __init__(self, label=None, validators=None, parse_kwargs=None,
display_format='%Y-%m-%d', **kwargs):
super(DateField, self).__init__(label, validators, parse_kwargs=parse_kwargs, display_format=display_format, **kwargs)
def process_formdata(self, valuelist):
super(DateField, self).process_formdata(valuelist)
if self.data is not None and hasattr(self.data, 'date'):
self.data = self.data.date()
|
gpl-3.0
|
studioml/studio
|
examples/keras/train_mnist_keras_mutligpu.py
|
1
|
1623
|
import sys
from keras.layers import Input, Dense
from keras.models import Model
from keras.datasets import mnist
from keras.utils import to_categorical
from keras.callbacks import ModelCheckpoint, TensorBoard
from studio import fs_tracker
from studio import multi_gpu
# this placeholder will contain our input digits, as flat vectors
img = Input((784,))
# fully-connected layer with 128 units and ReLU activation
x = Dense(128, activation='relu')(img)
x = Dense(128, activation='relu')(x)
# output layer with 10 units and a softmax activation
preds = Dense(10, activation='softmax')(x)
no_gpus = 2
batch_size = 128
model = Model(img, preds)
model = multi_gpu.make_parallel(model, no_gpus)
model.compile(loss='categorical_crossentropy', optimizer='adam')
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)
checkpointer = ModelCheckpoint(
fs_tracker.get_model_directory() +
'/checkpoint.{epoch:02d}-{val_loss:.2f}.hdf')
tbcallback = TensorBoard(log_dir=fs_tracker.get_tensorboard_dir(),
histogram_freq=0,
write_graph=True,
write_images=False)
model.fit(
x_train,
y_train,
validation_data=(x_test, y_test),
epochs=int(sys.argv[1]),
batch_size=batch_size * no_gpus,
callbacks=[checkpointer, tbcallback])
|
apache-2.0
|
lovexiaov/SandwichApp
|
venv/lib/python2.7/site.py
|
784
|
27543
|
"""Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
In earlier versions of Python (up to 1.5a3), scripts or modules that
needed to use site-specific modules would place ``import site''
somewhere near the top of their code. Because of the automatic
import, this is no longer necessary (but code that does it still
works).
This will append site-specific paths to the module search path. On
Unix, it starts with sys.prefix and sys.exec_prefix (if different) and
appends lib/python<version>/site-packages as well as lib/site-python.
It also supports the Debian convention of
lib/python<version>/dist-packages. On other platforms (mainly Mac and
Windows), it uses just sys.prefix (and sys.exec_prefix, if different,
but this is unlikely). The resulting directories, if they exist, are
appended to sys.path, and also inspected for path configuration files.
FOR DEBIAN, this sys.path is augmented with directories in /usr/local.
Local addons go into /usr/local/lib/python<version>/site-packages
(resp. /usr/local/lib/site-python), Debian addons install into
/usr/{lib,share}/python<version>/dist-packages.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.X/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.X/site-packages/bar
/usr/local/lib/python2.X/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
After these path manipulations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys
import os
try:
import __builtin__ as builtins
except ImportError:
import builtins
try:
set
except NameError:
from sets import Set as set
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = None
# for distutils.commands.install
USER_SITE = None
USER_BASE = None
_is_64bit = (getattr(sys, 'maxsize', None) or getattr(sys, 'maxint')) > 2**32
_is_pypy = hasattr(sys, 'pypy_version_info')
_is_jython = sys.platform[:4] == 'java'
if _is_jython:
ModuleType = type(os)
def makepath(*paths):
dir = os.path.join(*paths)
if _is_jython and (dir == '__classpath__' or
dir.startswith('__pyclasspath__')):
return dir, dir
dir = os.path.abspath(dir)
return dir, os.path.normcase(dir)
def abs__file__():
"""Set all module' __file__ attribute to an absolute path"""
for m in sys.modules.values():
if ((_is_jython and not isinstance(m, ModuleType)) or
hasattr(m, '__loader__')):
# only modules need the abspath in Jython. and don't mess
# with a PEP 302-supplied __file__
continue
f = getattr(m, '__file__', None)
if f is None:
continue
m.__file__ = os.path.abspath(f)
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
# XXX This should not be part of site.py, since it is needed even when
# using the -S option for Python. See http://www.python.org/sf/586680
def addbuilddir():
"""Append ./build/lib.<platform> in case we're running in the build dir
(especially for Guido :-)"""
from distutils.util import get_platform
s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
if hasattr(sys, 'gettotalrefcount'):
s += '-pydebug'
s = os.path.join(os.path.dirname(sys.path[-1]), s)
sys.path.append(s)
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Add a new path to known_paths by combining sitedir and 'name' or execute
sitedir if it starts with 'import'"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except IOError:
return
try:
for line in f:
if line.startswith("#"):
continue
if line.startswith("import"):
exec(line)
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
finally:
f.close()
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
names.sort()
for name in names:
if name.endswith(os.extsep + "pth"):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def addsitepackages(known_paths, sys_prefix=sys.prefix, exec_prefix=sys.exec_prefix):
"""Add site-packages (and possibly site-python) to sys.path"""
prefixes = [os.path.join(sys_prefix, "local"), sys_prefix]
if exec_prefix != sys_prefix:
prefixes.append(os.path.join(exec_prefix, "local"))
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys_prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
os.path.join(prefix, "python" + sys.version[:3], "lib-dynload")]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
if _is_64bit:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
if sys.version[0] == '2':
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
else:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[0],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return None
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if hasattr(sys, 'flags') and getattr(sys.flags, 'no_user_site', False):
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
USER_BASE is the root directory for all Python versions
USER_SITE is the user specific site-packages directory
USER_SITE/.. can be used for data.
"""
global USER_BASE, USER_SITE, ENABLE_USER_SITE
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
#if sys.platform in ('os2emx', 'riscos'):
# # Don't know what to put here
# USER_BASE = ''
# USER_SITE = ''
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser(base, "Python")
USER_SITE = os.path.join(USER_BASE,
"Python" + sys.version[0] + sys.version[2],
"site-packages")
else:
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser("~", ".local")
USER_SITE = os.path.join(USER_BASE, "lib",
"python" + sys.version[:3],
"site-packages")
if ENABLE_USER_SITE and os.path.isdir(USER_SITE):
addsitedir(USER_SITE, known_paths)
if ENABLE_USER_SITE:
for dist_libdir in ("lib", "local/lib"):
user_site = os.path.join(USER_BASE, dist_libdir,
"python" + sys.version[:3],
"dist-packages")
if os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths
def setBEGINLIBPATH():
"""The OS/2 EMX port has optional extension modules that do double duty
as DLLs (and must use the .DLL file extension) for other extensions.
The library search path needs to be amended so these will be found
during module import. Use BEGINLIBPATH so that these are at the start
of the library search path.
"""
dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
libpath = os.environ['BEGINLIBPATH'].split(';')
if libpath[-1]:
libpath.append(dllpath)
else:
libpath[-1] = dllpath
os.environ['BEGINLIBPATH'] = ';'.join(libpath)
def setquit():
"""Define new built-ins 'quit' and 'exit'.
These are simply strings that display a hint on how to exit.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
builtins.quit = Quitter('quit')
builtins.exit = Quitter('exit')
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = open(filename, "rU")
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print(self.__lines[i])
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
try:
key = raw_input(prompt)
except NameError:
key = input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
def setcopyright():
"""Set 'copyright' and 'credits' in __builtin__"""
builtins.copyright = _Printer("copyright", sys.copyright)
if _is_jython:
builtins.credits = _Printer(
"credits",
"Jython is maintained by the Jython developers (www.jython.org).")
elif _is_pypy:
builtins.credits = _Printer(
"credits",
"PyPy is maintained by the PyPy developers: http://pypy.org/")
else:
builtins.credits = _Printer("credits", """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""")
here = os.path.dirname(os.__file__)
builtins.license = _Printer(
"license", "See http://www.python.org/%.3s/license.html" % sys.version,
["LICENSE.txt", "LICENSE"],
[os.path.join(here, os.pardir), here, os.curdir])
class _Helper(object):
"""Define the built-in 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
builtins.help = _Helper()
def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
def setencoding():
"""Set the string encoding used by the Unicode implementation. The
default is 'ascii', but if you're willing to experiment, you can
change this."""
encoding = "ascii" # Default value set by _PyUnicode_Init()
if 0:
# Enable to support locale aware default string encodings.
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
# Enable to switch off string to Unicode coercion and implicit
# Unicode to string conversion.
encoding = "undefined"
if encoding != "ascii":
# On Non-Unicode builds this will raise an AttributeError...
sys.setdefaultencoding(encoding) # Needs Python Unicode build !
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
def virtual_install_main_packages():
f = open(os.path.join(os.path.dirname(__file__), 'orig-prefix.txt'))
sys.real_prefix = f.read().strip()
f.close()
pos = 2
hardcoded_relative_dirs = []
if sys.path[0] == '':
pos += 1
if _is_jython:
paths = [os.path.join(sys.real_prefix, 'Lib')]
elif _is_pypy:
if sys.version_info > (3, 2):
cpyver = '%d' % sys.version_info[0]
elif sys.pypy_version_info >= (1, 5):
cpyver = '%d.%d' % sys.version_info[:2]
else:
cpyver = '%d.%d.%d' % sys.version_info[:3]
paths = [os.path.join(sys.real_prefix, 'lib_pypy'),
os.path.join(sys.real_prefix, 'lib-python', cpyver)]
if sys.pypy_version_info < (1, 9):
paths.insert(1, os.path.join(sys.real_prefix,
'lib-python', 'modified-%s' % cpyver))
hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below
#
# This is hardcoded in the Python executable, but relative to sys.prefix:
for path in paths[:]:
plat_path = os.path.join(path, 'plat-%s' % sys.platform)
if os.path.exists(plat_path):
paths.append(plat_path)
elif sys.platform == 'win32':
paths = [os.path.join(sys.real_prefix, 'Lib'), os.path.join(sys.real_prefix, 'DLLs')]
else:
paths = [os.path.join(sys.real_prefix, 'lib', 'python'+sys.version[:3])]
hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below
lib64_path = os.path.join(sys.real_prefix, 'lib64', 'python'+sys.version[:3])
if os.path.exists(lib64_path):
if _is_64bit:
paths.insert(0, lib64_path)
else:
paths.append(lib64_path)
# This is hardcoded in the Python executable, but relative to
# sys.prefix. Debian change: we need to add the multiarch triplet
# here, which is where the real stuff lives. As per PEP 421, in
# Python 3.3+, this lives in sys.implementation, while in Python 2.7
# it lives in sys.
try:
arch = getattr(sys, 'implementation', sys)._multiarch
except AttributeError:
# This is a non-multiarch aware Python. Fallback to the old way.
arch = sys.platform
plat_path = os.path.join(sys.real_prefix, 'lib',
'python'+sys.version[:3],
'plat-%s' % arch)
if os.path.exists(plat_path):
paths.append(plat_path)
# This is hardcoded in the Python executable, but
# relative to sys.prefix, so we have to fix up:
for path in list(paths):
tk_dir = os.path.join(path, 'lib-tk')
if os.path.exists(tk_dir):
paths.append(tk_dir)
# These are hardcoded in the Apple's Python executable,
# but relative to sys.prefix, so we have to fix them up:
if sys.platform == 'darwin':
hardcoded_paths = [os.path.join(relative_dir, module)
for relative_dir in hardcoded_relative_dirs
for module in ('plat-darwin', 'plat-mac', 'plat-mac/lib-scriptpackages')]
for path in hardcoded_paths:
if os.path.exists(path):
paths.append(path)
sys.path.extend(paths)
def force_global_eggs_after_local_site_packages():
"""
Force easy_installed eggs in the global environment to get placed
in sys.path after all packages inside the virtualenv. This
maintains the "least surprise" result that packages in the
virtualenv always mask global packages, never the other way
around.
"""
egginsert = getattr(sys, '__egginsert', 0)
for i, path in enumerate(sys.path):
if i > egginsert and path.startswith(sys.prefix):
egginsert = i
sys.__egginsert = egginsert + 1
def virtual_addsitepackages(known_paths):
force_global_eggs_after_local_site_packages()
return addsitepackages(known_paths, sys_prefix=sys.real_prefix)
def fixclasspath():
"""Adjust the special classpath sys.path entries for Jython. These
entries should follow the base virtualenv lib directories.
"""
paths = []
classpaths = []
for path in sys.path:
if path == '__classpath__' or path.startswith('__pyclasspath__'):
classpaths.append(path)
else:
paths.append(path)
sys.path = paths
sys.path.extend(classpaths)
def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
def main():
global ENABLE_USER_SITE
virtual_install_main_packages()
abs__file__()
paths_in_sys = removeduppaths()
if (os.name == "posix" and sys.path and
os.path.basename(sys.path[-1]) == "Modules"):
addbuilddir()
if _is_jython:
fixclasspath()
GLOBAL_SITE_PACKAGES = not os.path.exists(os.path.join(os.path.dirname(__file__), 'no-global-site-packages.txt'))
if not GLOBAL_SITE_PACKAGES:
ENABLE_USER_SITE = False
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
paths_in_sys = addsitepackages(paths_in_sys)
paths_in_sys = addusersitepackages(paths_in_sys)
if GLOBAL_SITE_PACKAGES:
paths_in_sys = virtual_addsitepackages(paths_in_sys)
if sys.platform == 'os2emx':
setBEGINLIBPATH()
setquit()
setcopyright()
sethelper()
aliasmbcs()
setencoding()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Remove sys.setdefaultencoding() so that users cannot change the
# encoding after initialization. The test for presence is needed when
# this module is run as a script, because this code is executed twice.
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
print("sys.path = [")
for dir in sys.path:
print(" %r," % (dir,))
print("]")
def exists(path):
if os.path.isdir(path):
return "exists"
else:
return "doesn't exist"
print("USER_BASE: %r (%s)" % (USER_BASE, exists(USER_BASE)))
print("USER_SITE: %r (%s)" % (USER_SITE, exists(USER_BASE)))
print("ENABLE_USER_SITE: %r" % ENABLE_USER_SITE)
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print(os.pathsep.join(buffer))
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print(textwrap.dedent(help % (sys.argv[0], os.pathsep)))
sys.exit(10)
if __name__ == '__main__':
_script()
|
apache-2.0
|
rob356/SickRage
|
tornado/test/websocket_test.py
|
19
|
14504
|
from __future__ import absolute_import, division, print_function, with_statement
import traceback
from tornado.concurrent import Future
from tornado import gen
from tornado.httpclient import HTTPError, HTTPRequest
from tornado.log import gen_log, app_log
from tornado.testing import AsyncHTTPTestCase, gen_test, bind_unused_port, ExpectLog
from tornado.test.util import unittest
from tornado.web import Application, RequestHandler
from tornado.util import u
try:
import tornado.websocket
from tornado.util import _websocket_mask_python
except ImportError:
# The unittest module presents misleading errors on ImportError
# (it acts as if websocket_test could not be found, hiding the underlying
# error). If we get an ImportError here (which could happen due to
# TORNADO_EXTENSION=1), print some extra information before failing.
traceback.print_exc()
raise
from tornado.websocket import WebSocketHandler, websocket_connect, WebSocketError
try:
from tornado import speedups
except ImportError:
speedups = None
class TestWebSocketHandler(WebSocketHandler):
"""Base class for testing handlers that exposes the on_close event.
This allows for deterministic cleanup of the associated socket.
"""
def initialize(self, close_future, compression_options=None):
self.close_future = close_future
self.compression_options = compression_options
def get_compression_options(self):
return self.compression_options
def on_close(self):
self.close_future.set_result((self.close_code, self.close_reason))
class EchoHandler(TestWebSocketHandler):
def on_message(self, message):
self.write_message(message, isinstance(message, bytes))
class ErrorInOnMessageHandler(TestWebSocketHandler):
def on_message(self, message):
1/0
class HeaderHandler(TestWebSocketHandler):
def open(self):
try:
# In a websocket context, many RequestHandler methods
# raise RuntimeErrors.
self.set_status(503)
raise Exception("did not get expected exception")
except RuntimeError:
pass
self.write_message(self.request.headers.get('X-Test', ''))
class NonWebSocketHandler(RequestHandler):
def get(self):
self.write('ok')
class CloseReasonHandler(TestWebSocketHandler):
def open(self):
self.on_close_called = False
self.close(1001, "goodbye")
class AsyncPrepareHandler(TestWebSocketHandler):
@gen.coroutine
def prepare(self):
yield gen.moment
def on_message(self, message):
self.write_message(message)
class WebSocketBaseTestCase(AsyncHTTPTestCase):
@gen.coroutine
def ws_connect(self, path, compression_options=None):
ws = yield websocket_connect(
'ws://127.0.0.1:%d%s' % (self.get_http_port(), path),
compression_options=compression_options)
raise gen.Return(ws)
@gen.coroutine
def close(self, ws):
"""Close a websocket connection and wait for the server side.
If we don't wait here, there are sometimes leak warnings in the
tests.
"""
ws.close()
yield self.close_future
class WebSocketTest(WebSocketBaseTestCase):
def get_app(self):
self.close_future = Future()
return Application([
('/echo', EchoHandler, dict(close_future=self.close_future)),
('/non_ws', NonWebSocketHandler),
('/header', HeaderHandler, dict(close_future=self.close_future)),
('/close_reason', CloseReasonHandler,
dict(close_future=self.close_future)),
('/error_in_on_message', ErrorInOnMessageHandler,
dict(close_future=self.close_future)),
('/async_prepare', AsyncPrepareHandler,
dict(close_future=self.close_future)),
])
def test_http_request(self):
# WS server, HTTP client.
response = self.fetch('/echo')
self.assertEqual(response.code, 400)
@gen_test
def test_websocket_gen(self):
ws = yield self.ws_connect('/echo')
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
def test_websocket_callbacks(self):
websocket_connect(
'ws://127.0.0.1:%d/echo' % self.get_http_port(),
io_loop=self.io_loop, callback=self.stop)
ws = self.wait().result()
ws.write_message('hello')
ws.read_message(self.stop)
response = self.wait().result()
self.assertEqual(response, 'hello')
self.close_future.add_done_callback(lambda f: self.stop())
ws.close()
self.wait()
@gen_test
def test_binary_message(self):
ws = yield self.ws_connect('/echo')
ws.write_message(b'hello \xe9', binary=True)
response = yield ws.read_message()
self.assertEqual(response, b'hello \xe9')
yield self.close(ws)
@gen_test
def test_unicode_message(self):
ws = yield self.ws_connect('/echo')
ws.write_message(u('hello \u00e9'))
response = yield ws.read_message()
self.assertEqual(response, u('hello \u00e9'))
yield self.close(ws)
@gen_test
def test_error_in_on_message(self):
ws = yield self.ws_connect('/error_in_on_message')
ws.write_message('hello')
with ExpectLog(app_log, "Uncaught exception"):
response = yield ws.read_message()
self.assertIs(response, None)
yield self.close(ws)
@gen_test
def test_websocket_http_fail(self):
with self.assertRaises(HTTPError) as cm:
yield self.ws_connect('/notfound')
self.assertEqual(cm.exception.code, 404)
@gen_test
def test_websocket_http_success(self):
with self.assertRaises(WebSocketError):
yield self.ws_connect('/non_ws')
@gen_test
def test_websocket_network_fail(self):
sock, port = bind_unused_port()
sock.close()
with self.assertRaises(IOError):
with ExpectLog(gen_log, ".*"):
yield websocket_connect(
'ws://127.0.0.1:%d/' % port,
io_loop=self.io_loop,
connect_timeout=3600)
@gen_test
def test_websocket_close_buffered_data(self):
ws = yield websocket_connect(
'ws://127.0.0.1:%d/echo' % self.get_http_port())
ws.write_message('hello')
ws.write_message('world')
# Close the underlying stream.
ws.stream.close()
yield self.close_future
@gen_test
def test_websocket_headers(self):
# Ensure that arbitrary headers can be passed through websocket_connect.
ws = yield websocket_connect(
HTTPRequest('ws://127.0.0.1:%d/header' % self.get_http_port(),
headers={'X-Test': 'hello'}))
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_server_close_reason(self):
ws = yield self.ws_connect('/close_reason')
msg = yield ws.read_message()
# A message of None means the other side closed the connection.
self.assertIs(msg, None)
self.assertEqual(ws.close_code, 1001)
self.assertEqual(ws.close_reason, "goodbye")
# The on_close callback is called no matter which side closed.
yield self.close_future
@gen_test
def test_client_close_reason(self):
ws = yield self.ws_connect('/echo')
ws.close(1001, 'goodbye')
code, reason = yield self.close_future
self.assertEqual(code, 1001)
self.assertEqual(reason, 'goodbye')
@gen_test
def test_async_prepare(self):
# Previously, an async prepare method triggered a bug that would
# result in a timeout on test shutdown (and a memory leak).
ws = yield self.ws_connect('/async_prepare')
ws.write_message('hello')
res = yield ws.read_message()
self.assertEqual(res, 'hello')
@gen_test
def test_check_origin_valid_no_path(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': 'http://127.0.0.1:%d' % port}
ws = yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_check_origin_valid_with_path(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': 'http://127.0.0.1:%d/something' % port}
ws = yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_check_origin_invalid_partial_url(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': '127.0.0.1:%d' % port}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_check_origin_invalid(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
# Host is 127.0.0.1, which should not be accessible from some other
# domain
headers = {'Origin': 'http://somewhereelse.com'}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_check_origin_invalid_subdomains(self):
port = self.get_http_port()
url = 'ws://localhost:%d/echo' % port
# Subdomains should be disallowed by default. If we could pass a
# resolver to websocket_connect we could test sibling domains as well.
headers = {'Origin': 'http://subtenant.localhost'}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 403)
class CompressionTestMixin(object):
MESSAGE = 'Hello world. Testing 123 123'
def get_app(self):
self.close_future = Future()
return Application([
('/echo', EchoHandler, dict(
close_future=self.close_future,
compression_options=self.get_server_compression_options())),
])
def get_server_compression_options(self):
return None
def get_client_compression_options(self):
return None
@gen_test
def test_message_sizes(self):
ws = yield self.ws_connect(
'/echo',
compression_options=self.get_client_compression_options())
# Send the same message three times so we can measure the
# effect of the context_takeover options.
for i in range(3):
ws.write_message(self.MESSAGE)
response = yield ws.read_message()
self.assertEqual(response, self.MESSAGE)
self.assertEqual(ws.protocol._message_bytes_out, len(self.MESSAGE) * 3)
self.assertEqual(ws.protocol._message_bytes_in, len(self.MESSAGE) * 3)
self.verify_wire_bytes(ws.protocol._wire_bytes_in,
ws.protocol._wire_bytes_out)
yield self.close(ws)
class UncompressedTestMixin(CompressionTestMixin):
"""Specialization of CompressionTestMixin when we expect no compression."""
def verify_wire_bytes(self, bytes_in, bytes_out):
# Bytes out includes the 4-byte mask key per message.
self.assertEqual(bytes_out, 3 * (len(self.MESSAGE) + 6))
self.assertEqual(bytes_in, 3 * (len(self.MESSAGE) + 2))
class NoCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
pass
# If only one side tries to compress, the extension is not negotiated.
class ServerOnlyCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
def get_server_compression_options(self):
return {}
class ClientOnlyCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
def get_client_compression_options(self):
return {}
class DefaultCompressionTest(CompressionTestMixin, WebSocketBaseTestCase):
def get_server_compression_options(self):
return {}
def get_client_compression_options(self):
return {}
def verify_wire_bytes(self, bytes_in, bytes_out):
self.assertLess(bytes_out, 3 * (len(self.MESSAGE) + 6))
self.assertLess(bytes_in, 3 * (len(self.MESSAGE) + 2))
# Bytes out includes the 4 bytes mask key per message.
self.assertEqual(bytes_out, bytes_in + 12)
class MaskFunctionMixin(object):
# Subclasses should define self.mask(mask, data)
def test_mask(self):
self.assertEqual(self.mask(b'abcd', b''), b'')
self.assertEqual(self.mask(b'abcd', b'b'), b'\x03')
self.assertEqual(self.mask(b'abcd', b'54321'), b'TVPVP')
self.assertEqual(self.mask(b'ZXCV', b'98765432'), b'c`t`olpd')
# Include test cases with \x00 bytes (to ensure that the C
# extension isn't depending on null-terminated strings) and
# bytes with the high bit set (to smoke out signedness issues).
self.assertEqual(self.mask(b'\x00\x01\x02\x03',
b'\xff\xfb\xfd\xfc\xfe\xfa'),
b'\xff\xfa\xff\xff\xfe\xfb')
self.assertEqual(self.mask(b'\xff\xfb\xfd\xfc',
b'\x00\x01\x02\x03\x04\x05'),
b'\xff\xfa\xff\xff\xfb\xfe')
class PythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
def mask(self, mask, data):
return _websocket_mask_python(mask, data)
@unittest.skipIf(speedups is None, "tornado.speedups module not present")
class CythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
def mask(self, mask, data):
return speedups.websocket_mask(mask, data)
|
gpl-3.0
|
Godiyos/python-for-android
|
python3-alpha/python3-src/Lib/importlib/test/util.py
|
46
|
3919
|
from contextlib import contextmanager
import imp
import os.path
from test import support
import unittest
import sys
CASE_INSENSITIVE_FS = True
# Windows is the only OS that is *always* case-insensitive
# (OS X *can* be case-sensitive).
if sys.platform not in ('win32', 'cygwin'):
changed_name = __file__.upper()
if changed_name == __file__:
changed_name = __file__.lower()
if not os.path.exists(changed_name):
CASE_INSENSITIVE_FS = False
def case_insensitive_tests(test):
"""Class decorator that nullifies tests requiring a case-insensitive
file system."""
return unittest.skipIf(not CASE_INSENSITIVE_FS,
"requires a case-insensitive filesystem")(test)
@contextmanager
def uncache(*names):
"""Uncache a module from sys.modules.
A basic sanity check is performed to prevent uncaching modules that either
cannot/shouldn't be uncached.
"""
for name in names:
if name in ('sys', 'marshal', 'imp'):
raise ValueError(
"cannot uncache {0} as it will break _importlib".format(name))
try:
del sys.modules[name]
except KeyError:
pass
try:
yield
finally:
for name in names:
try:
del sys.modules[name]
except KeyError:
pass
@contextmanager
def import_state(**kwargs):
"""Context manager to manage the various importers and stored state in the
sys module.
The 'modules' attribute is not supported as the interpreter state stores a
pointer to the dict that the interpreter uses internally;
reassigning to sys.modules does not have the desired effect.
"""
originals = {}
try:
for attr, default in (('meta_path', []), ('path', []),
('path_hooks', []),
('path_importer_cache', {})):
originals[attr] = getattr(sys, attr)
if attr in kwargs:
new_value = kwargs[attr]
del kwargs[attr]
else:
new_value = default
setattr(sys, attr, new_value)
if len(kwargs):
raise ValueError(
'unrecognized arguments: {0}'.format(kwargs.keys()))
yield
finally:
for attr, value in originals.items():
setattr(sys, attr, value)
class mock_modules:
"""A mock importer/loader."""
def __init__(self, *names):
self.modules = {}
for name in names:
if not name.endswith('.__init__'):
import_name = name
else:
import_name = name[:-len('.__init__')]
if '.' not in name:
package = None
elif import_name == name:
package = name.rsplit('.', 1)[0]
else:
package = import_name
module = imp.new_module(import_name)
module.__loader__ = self
module.__file__ = '<mock __file__>'
module.__package__ = package
module.attr = name
if import_name != name:
module.__path__ = ['<mock __path__>']
self.modules[import_name] = module
def __getitem__(self, name):
return self.modules[name]
def find_module(self, fullname, path=None):
if fullname not in self.modules:
return None
else:
return self
def load_module(self, fullname):
if fullname not in self.modules:
raise ImportError
else:
sys.modules[fullname] = self.modules[fullname]
return self.modules[fullname]
def __enter__(self):
self._uncache = uncache(*self.modules.keys())
self._uncache.__enter__()
return self
def __exit__(self, *exc_info):
self._uncache.__exit__(None, None, None)
|
apache-2.0
|
looopTools/sw9-source
|
.waf-1.9.8-6657823688b736c1d1a4e2c4e8e198b4/waflib/extras/wurf/configuration.py
|
1
|
1593
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os
from.error import Error
class Configuration(object):
RESOLVE='resolve'
LOAD='load'
HELP='help'
RESOLVE_AND_LOCK='resolve_and_lock'
RESOLVE_FROM_LOCK='resolve_from_lock'
LOCK_FILE='lock_resolve.json'
def __init__(self,project_path,args,options,waf_lock_file):
self.project_path=project_path
self.args=args
self.options=options
self.waf_lock_file=waf_lock_file
def resolver_chain(self):
if self.choose_help():
return Configuration.HELP
elif self.choose_resolve_and_lock():
return Configuration.RESOLVE_AND_LOCK
elif self.choose_resolve_from_lock():
return Configuration.RESOLVE_FROM_LOCK
elif self.choose_resolve():
return Configuration.RESOLVE
else:
return Configuration.LOAD
def choose_help(self):
if'-h'in self.args or'--help'in self.args:
return True
if'configure'in self.args:
return False
waf_lock_path=os.path.join(self.project_path,self.waf_lock_file)
if not os.path.isfile(waf_lock_path):
return True
return False
def choose_resolve_from_lock(self):
if not'configure'in self.args:
return False
lock_file=os.path.join(self.project_path,'lock_resolve.json')
if not os.path.isfile(lock_file):
return False
return True
def choose_resolve_and_lock(self):
if not'configure'in self.args:
return False
if self.options.lock_paths()or self.options.lock_versions():
return True
return False
def choose_resolve(self):
if'configure'in self.args:
return True
return False
|
mit
|
keeprocking/pygelf
|
tests/helper.py
|
1
|
1566
|
import uuid
import time
import logging
import pytest
import requests
@pytest.yield_fixture
def logger(handler):
logger = logging.getLogger('test')
logger.addHandler(handler)
yield logger
logger.removeHandler(handler)
def log_warning(logger, message, args=None, fields=None):
args = args if args else []
fields = fields if fields else []
logger.warning(message, *args)
api_response = _get_api_response(message % args, fields)
return _parse_api_response(api_response)
def log_exception(logger, message, exception, fields=None):
fields = fields if fields else []
logger.exception(exception)
api_response = _get_api_response(message, fields)
return _parse_api_response(api_response)
def get_unique_message():
return str(uuid.uuid4())
DEFAULT_FIELDS = [
'message', 'full_message', 'source', 'level',
'func', 'file', 'line', 'module', 'logger_name',
]
BASE_API_URL = 'http://127.0.0.1:9000/api/search/universal/relative?query={0}&range=5&fields='
def _build_api_string(message, fields):
return BASE_API_URL.format(message) + '%2C'.join(set(DEFAULT_FIELDS + fields))
def _get_api_response(message, fields):
time.sleep(3)
url = _build_api_string(message, fields)
api_response = requests.get(url, auth=('admin', 'admin'), headers={'accept': 'application/json'})
return api_response
def _parse_api_response(api_response):
assert api_response.status_code == 200
messages = api_response.json()['messages']
assert len(messages) == 1
return messages[0]['message']
|
mit
|
GoogleCloudPlatform/professional-services-data-validator
|
third_party/ibis/ibis_DB2/tests/test_functions.py
|
1
|
26547
|
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
from datetime import datetime
import ibm_db_sa
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
from pytest import param
import ibis
import ibis.config as config
import ibis.expr.datatypes as dt
import ibis.expr.types as ir
from ibis import literal as L
sa = pytest.importorskip('sqlalchemy')
pytest.importorskip('ibm_db_sa')
pytestmark = pytest.mark.DB2
@pytest.mark.parametrize(
('left_func', 'right_func'),
[
param(
lambda t: t.double_col.cast('int8'),
lambda at: sa.cast(at.c.double_col, sa.SMALLINT),
id='double_to_int8',
),
param(
lambda t: t.double_col.cast('int16'),
lambda at: sa.cast(at.c.double_col, sa.SMALLINT),
id='double_to_int16',
),
param(
lambda t: t.string_col.cast('double'),
lambda at: sa.cast(at.c.string_col, ibm_db_sa.DOUBLE),
id='string_to_double',
),
param(
lambda t: t.string_col.cast('float'),
lambda at: sa.cast(at.c.string_col, ibm_db_sa.REAL),
id='string_to_float',
),
param(
lambda t: t.string_col.cast('decimal'),
lambda at: sa.cast(at.c.string_col, sa.NUMERIC(9, 0)),
id='string_to_decimal_no_params',
),
param(
lambda t: t.string_col.cast('decimal(9, 3)'),
lambda at: sa.cast(at.c.string_col, sa.NUMERIC(9, 3)),
id='string_to_decimal_params',
),
],
)
def test_cast(alltypes, at, translate, left_func, right_func):
left = left_func(alltypes)
right = right_func(at)
assert str(translate(left).compile()) == str(right.compile())
def test_date_cast(alltypes, at, translate):
result = alltypes.date_string_col.cast('date')
expected = sa.cast(at.c.date_string_col, sa.DATE)
assert str(translate(result)) == str(expected)
@pytest.mark.parametrize(
'column',
[
'index',
'unnamed: 0',
'id',
'tinyint_col',
'smallint_col',
'int_col',
'bigint_col',
'float_col',
'double_col',
'date_string_col',
'string_col',
'timestamp_col',
'YEAR',
'MONTH',
],
)
def test_noop_cast(alltypes, at, translate, column):
col = alltypes[column]
result = col.cast(col.type())
expected = at.c[column]
assert result.equals(col)
assert str(translate(result)) == str(expected)
def test_timestamp_cast_noop(alltypes, at, translate):
result1 = alltypes.timestamp_col.cast('timestamp')
result2 = alltypes.int_col.cast('timestamp')
assert isinstance(result1, ir.TimestampColumn)
assert isinstance(result2, ir.TimestampColumn)
expected1 = at.c.timestamp_col
# expected2 = sa.func.timezone('UTC', sa.func.to_timestamp(at.c.int_col))
assert str(translate(result1)) == str(expected1)
# assert str(translate(result2)) == str(expected2)
@pytest.mark.parametrize(
('func', 'expected'),
[
param(operator.methodcaller('year'), 2015, id='year'),
param(operator.methodcaller('month'), 9, id='month'),
param(operator.methodcaller('day'), 1, id='day'),
param(operator.methodcaller('hour'), 14, id='hour'),
param(operator.methodcaller('minute'), 48, id='minute'),
param(operator.methodcaller('second'), 5, id='second'),
param(lambda x: x.day_of_week.index(), 3, id='day_of_week_index'),
param(
lambda x: x.day_of_week.full_name(),
'Tuesday',
id='day_of_week_full_name',
),
],
)
def test_simple_datetime_operations(con, func, expected, translate):
value = L('2015-09-01 14:48:05.359').cast(dt.string)
final_value = value.cast(dt.timestamp)
assert con.execute(func(final_value)) == expected
@pytest.mark.parametrize(
('func', 'expected'),
[param(operator.methodcaller('millisecond'), 359, id='millisecond')],
)
def test_simple_datetime_operation_millisecond(con, func, expected, translate):
value = L('2015-09-01 14:48:05.359').cast(dt.string)
final_value = value.cast(dt.timestamp)
assert int(float(con.execute(func(final_value)))) == expected
@pytest.mark.parametrize(
('func', 'left', 'right', 'expected'),
[
param(operator.truediv, L(12), L(4), 3, id='truediv_no_remainder'),
param(operator.pow, L(12), L(2), 144, id='pow'),
param(operator.truediv, L(7), L(2), 3.5, id='truediv_remainder'),
param(operator.floordiv, L(7), L(2), 3, id='floordiv'),
param(
lambda x, y: x.floordiv(y), L(7), 2, 3, id='floordiv_no_literal'
),
param(
lambda x, y: x.rfloordiv(y), L(2), 7, 3, id='rfloordiv_no_literal'
),
],
)
def test_binary_arithmetic_1(con, func, left, right, expected):
expr = func(left, right)
result = con.execute(expr)
assert result == expected
@pytest.mark.parametrize(
('func', 'left', 'right', 'expected'),
[
param(
operator.add, L(3).cast(dt.int16), L(4).cast(dt.int16), 7, id='add'
),
param(operator.sub, L(3), L(4), -1, id='sub'),
param(operator.mul, L(3), L(4), 12, id='mul'),
param(operator.mod, L(12), L(5), 2, id='mod'),
],
)
def test_binary_arithmetic_2(con, func, left, right, expected):
expr = func(left, right)
result = round(float(con.execute(expr)))
assert result == expected
def test_nullifzero(alltypes, df):
expr = alltypes.limit(100).int_col.nullifzero()
result = expr.execute()
expected = df.int_col.replace(to_replace=[0], value=[np.nan])
expected.name = 'tmp'
tm.assert_series_equal(result, expected)
def test_string_length(alltypes, df):
expr = alltypes.limit(100).string_col.length()
result = expr.execute()
expected = df.string_col.str.len().astype('int32')
expected.name = 'tmp'
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('op', 'expected'),
[
param(operator.methodcaller('left', 3), 'foo', id='left'),
param(operator.methodcaller('substr', 4), 'bar', id='right'),
param(operator.methodcaller('substr', 0, 3), 'foo', id='substr_0_3'),
param(operator.methodcaller('substr', 4, 3), 'bar', id='substr_4, 3'),
param(operator.methodcaller('substr', 1), 'oo_bar', id='substr_1'),
],
)
def test_string_substring(con, op, expected):
value = L('foo_bar')
assert con.execute(op(value)) == expected
@pytest.mark.parametrize(
('opname', 'expected'),
[('lstrip', 'foo '), ('rstrip', ' foo'), ('strip', 'foo')],
)
def test_string_strip(con, opname, expected):
op = operator.methodcaller(opname)
value = L(' foo ')
assert con.execute(op(value)) == expected
@pytest.mark.parametrize(
('opname', 'count', 'char', 'expected'),
[('lpad', 6, ' ', ' foo'), ('rpad', 6, ' ', 'foo ')],
)
def test_string_pad(con, opname, count, char, expected):
op = operator.methodcaller(opname, count, char)
value = L('foo')
assert con.execute(op(value)) == expected
'''def test_string_reverse(con):
assert con.execute(L('foo').reverse()) == 'oof' '''
def test_string_upper(con):
assert con.execute(L('foo').upper()) == 'FOO'
def test_string_lower(con):
assert con.execute(L('FOO').lower()) == 'foo'
@pytest.mark.parametrize(
('value', 'expected'),
[('foo bar foo', 'Foo Bar Foo'), ('foobar Foo', 'Foobar Foo')],
)
def test_capitalize(con, value, expected):
assert con.execute(L(value).capitalize()) == expected
def test_repeat(con):
expr = L('bar ').repeat(3)
assert con.execute(expr) == 'bar bar bar '
def test_re_replace(con):
expr = L('HI HOW ARE YOU').re_replace('( ){2,}', ' ')
assert con.execute(expr) == 'HI HOW ARE YOU'
def test_translate(con):
expr = L('faab').translate('b', 'a')
assert con.execute(expr) == 'fbbb'
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(L('foobar').find('bar'), 3, id='find_pos'),
param(L('foobar').find('baz'), -1, id='find_neg'),
param(L('foobar').like('%bar'), True, id='like_left_pattern'),
param(L('foobar').like('foo%'), True, id='like_right_pattern'),
param(L('foobar').like('%baz%'), False, id='like_both_sides_pattern'),
param(L('foobar').like(['%bar']), True, id='like_list_left_side'),
param(L('foobar').like(['foo%']), True, id='like_list_right_side'),
param(L('foobar').like(['%baz%']), False, id='like_list_both_sides'),
param(
L('foobar').like(['%bar', 'foo%']), True, id='like_list_multiple'
),
param(L('foobarfoo').replace('foo', 'H'), 'HbarH', id='replace'),
param(L('a').ascii_str(), ord('a'), id='ascii_str'),
],
)
def test_string_functions(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(
L('abcd').re_extract('([a-z]+)', 0), 'abcd', id='re_extract_whole'
),
# valid group number but no match => empty string
param(
L('abcd').re_extract(r'(\d)', 0), None, id='re_extract_no_match'
),
# match but not a valid group number => NULL
param(L('abcd').re_extract('abcd', 3), None, id='re_extract_match'),
],
)
def test_regexp_extract(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
param(L('abcd').re_search('[a-z]'), True, id='re_search_match'),
param(L('abcd').re_search(r'[\d]+'), False, id='re_search_no_match'),
param(
L('1222').re_search(r'[\d]+'), True, id='re_search_match_number'
),
],
)
def test_regexp(con, expr, expected):
assert con.execute(expr) == expected
def test_fillna(pointer):
expr = pointer.limit(100).marks.fillna(5)
result = (expr.execute()).tolist()
expected = [500, 460, 480, 390, 410, 410, 500, 460, 480, 390, 5, 5]
assert result == expected
def test_numeric_builtins_work(alltypes, df):
expr = alltypes.double_col.fillna(0)
result = expr.execute()
expected = df.double_col.fillna(0).astype(object)
expected.name = 'tmp'
tm.assert_series_equal(result, expected)
def test_group_concat(alltypes, df):
expr = alltypes.string_col.group_concat()
result = expr.execute()
expected = ','.join(df.string_col.dropna())
assert result == expected
def test_distinct_aggregates(alltypes, df):
expr = alltypes.limit(100).double_col.nunique()
result = expr.execute()
assert result == df.head(100).double_col.nunique()
@pytest.mark.parametrize(
('distinct1', 'distinct2', 'expected1', 'expected2'),
[
(True, True, 'UNION', 'UNION'),
(True, False, 'UNION', 'UNION ALL'),
(False, True, 'UNION ALL', 'UNION'),
(False, False, 'UNION ALL', 'UNION ALL'),
],
)
def test_union_cte(alltypes, distinct1, distinct2, expected1, expected2):
t = alltypes
expr1 = t.group_by(t.string_col).aggregate(metric=t.double_col.sum())
expr2 = expr1.view()
expr3 = expr1.view()
expr = expr1.union(expr2, distinct=distinct1).union(
expr3, distinct=distinct2
)
result = '\n'.join(
map(
lambda line: line.rstrip(), # strip trailing whitespace
str(
expr.compile().compile(compile_kwargs=dict(literal_binds=True))
).splitlines(),
)
)
expected = """\
WITH anon_1 AS
(SELECT t0.string_col AS string_col, sum(t0.double_col) AS metric
FROM "FUNCTIONAL_ALLTYPES" AS t0 GROUP BY t0.string_col),
anon_2 AS
(SELECT t0.string_col AS string_col, sum(t0.double_col) AS metric
FROM "FUNCTIONAL_ALLTYPES" AS t0 GROUP BY t0.string_col),
anon_3 AS
(SELECT t0.string_col AS string_col, sum(t0.double_col) AS metric
FROM "FUNCTIONAL_ALLTYPES" AS t0 GROUP BY t0.string_col)
(SELECT anon_1.string_col, anon_1.metric
FROM anon_1 {} SELECT anon_2.string_col, anon_2.metric
FROM anon_2) {} SELECT anon_3.string_col, anon_3.metric
FROM anon_3""".format(
expected1, expected2
)
assert str(result) == expected
def test_interactive_repr_shows_error(alltypes):
expr = alltypes.double_col.approx_median()
with config.option_context('interactive', True):
result = repr(expr)
assert 'no translation rule' in result.lower()
def test_window_with_arithmetic(alltypes, df):
t = alltypes
w = ibis.window(order_by=t.timestamp_col)
expr = t.mutate(new_col=ibis.row_number().over(w) / 2)
df = (
df[['timestamp_col']]
.sort_values('timestamp_col')
.reset_index(drop=True)
)
expected = df.assign(new_col=[float(int(x / 2.0)) for x in range(len(df))])
result = expr['timestamp_col', 'new_col'].execute()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('func', ['sum', 'min', 'max'])
def test_cumulative_ordered_window(alltypes, func, df):
t = alltypes
df = df.sort_values('timestamp_col').reset_index(drop=True)
window = ibis.cumulative_window(order_by=t.timestamp_col)
f = getattr(t.double_col, func)
expr = t.projection([(t.double_col - f().over(window)).name('double_col')])
result = expr.execute().double_col
result = result.astype('int64')
expected = df.double_col - getattr(df.double_col, 'cum%s' % func)()
expected = expected.astype('int64')
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func', ['sum', 'min', 'max'])
def test_cumulative_partitioned_ordered_window(alltypes, func, df):
t = alltypes
df = df.sort_values(['string_col', 'timestamp_col']).reset_index(drop=True)
window = ibis.cumulative_window(
order_by=t.timestamp_col, group_by=t.string_col
)
f = getattr(t.double_col, func)
expr = t.projection([(t.double_col - f().over(window)).name('double_col')])
result = expr.execute().double_col
method = operator.methodcaller('cum{}'.format(func))
expected = df.groupby(df.string_col).double_col.transform(
lambda c: c - method(c)
)
tm.assert_series_equal(result, expected)
def test_anonymous_aggregate(alltypes, df):
t = alltypes
expr = t[t.double_col > t.double_col.mean()]
result = expr.execute()
expected = df[df.double_col > df.double_col.mean()].reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_head(con):
t = con.table('functional_alltypes')
result = t.head().execute()
expected = t.limit(5).execute()
tm.assert_frame_equal(result, expected)
def test_rank(con):
t = con.table('functional_alltypes')
expr = t.double_col.rank()
sqla_expr = expr.compile()
result = str(sqla_expr.compile(compile_kwargs=dict(literal_binds=True)))
expected = (
"SELECT rank() OVER (ORDER BY t0.double_col) - 1 AS tmp \n"
"FROM functional_alltypes AS t0"
)
assert result == expected
def test_percent_rank(con):
t = con.table('functional_alltypes')
expr = t.double_col.percent_rank()
sqla_expr = expr.compile()
result = str(sqla_expr.compile(compile_kwargs=dict(literal_binds=True)))
expected = (
"SELECT percent_rank() OVER (ORDER BY t0.double_col) AS "
"tmp \nFROM functional_alltypes AS t0"
)
assert result == expected
def test_ntile(con):
t = con.table('functional_alltypes')
expr = t.double_col.ntile(7)
sqla_expr = expr.compile()
result = str(sqla_expr.compile(compile_kwargs=dict(literal_binds=False)))
expected = (
"SELECT ntile(?) OVER (ORDER BY t0.double_col) - ? AS tmp \n"
"FROM functional_alltypes AS t0"
)
assert result == expected
def test_null_column(alltypes):
t = alltypes
nrows = t.execute().shape[0]
expr = t.mutate(na_column=ibis.NA).na_column
result = expr.execute()
tm.assert_series_equal(result, pd.Series([None] * nrows, name='na_column'))
@pytest.mark.parametrize('opname', ['neg'])
def test_not_and_negate_bool(con, opname, df):
op = getattr(operator, opname)
t = con.table('functional_alltypes').limit(10)
expr = t.projection([op(t.MONTH).name('MONTH')])
result = expr.execute().MONTH
expected = op(df.head(10).MONTH)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func', ['mean', 'sum', 'min', 'max'])
def test_simple_window(alltypes, func, df):
t = alltypes
f = getattr(t.double_col, func)
df_f = getattr(df.double_col, func)
result = (
t.projection([(t.double_col - f()).name('double_col')])
.execute()
.double_col
)
expected = df.double_col - df_f()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
'field',
[
'id',
'double_col',
'tinyint_col',
'smallint_col',
'int_col',
'bigint_col',
'float_col',
'double_col',
'MONTH',
'YEAR',
],
)
def test_negate_non_boolean(con, field, df):
t = con.table('functional_alltypes').limit(10)
expr = t.projection([(-t[field]).name(field)])
result = expr.execute()[field]
expected = -df.head(10)[field]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('opname', ['sum', 'mean', 'min', 'max', 'std'])
def test_boolean_reduction(alltypes, opname, df):
op = operator.methodcaller(opname)
expr = op(alltypes.id)
result = expr.execute()
assert result == op(df.id)
@pytest.mark.parametrize(
('opname', 'expected'),
[
('year', [2011, 2012, 2013, 2014, 2015, 2016]),
('month', [3, 4, 5, 6, 7, 8]),
('day', [11, 12, 13, 14, 15, 16]),
],
)
def test_date_extract_field(db, opname, expected):
op = operator.methodcaller(opname)
t = db.functional_alltypes
expr = op(t.timestamp_col.cast('date')).distinct()
result = expr.execute().astype(int).to_list()
assert result == expected
def test_scalar_parameter(con):
start = ibis.param(dt.date)
end = ibis.param(dt.date)
t = con.table('functional_alltypes')
col = t.date_string_col.cast('date')
expr = col.between(start, end)
start_string, end_string = '2010-03-01', '2012-12-31'
result = expr.execute(params={start: start_string, end: end_string})
expected = col.between(start_string, end_string).execute()
tm.assert_series_equal(result, expected)
def test_equals_function(alltypes, df):
expr = alltypes.tinyint_col.__eq__(114)
result = expr.execute()
expected = df.tinyint_col.eq(114)
expected.name = 'tmp'
tm.assert_series_equal(result, expected)
def test_not_equals_function(alltypes, df):
expr = alltypes.tinyint_col.__ne__(114)
result = expr.execute()
expected = df.tinyint_col.ne(114)
expected.name = 'tmp'
tm.assert_series_equal(result, expected)
def test_GreaterEqual_function(alltypes, df):
expr = alltypes.date_string_col.__ge__('2012-05-05')
result = expr.execute()
expected = df.date_string_col.ge('2012-05-05')
expected.name = 'tmp'
tm.assert_series_equal(result, expected)
def test_Greater_function(alltypes, df):
expr = alltypes.date_string_col.__gt__('2012-05-05')
result = expr.execute()
expected = df.date_string_col.gt('2012-05-05')
expected.name = 'tmp'
tm.assert_series_equal(result, expected)
def test_LessEqual_function(alltypes, df):
expr = alltypes.timestamp_col.__le__('2013-05-13 16:12:14.280')
result = expr.execute()
expected = df.timestamp_col.le('2013-05-13 16:12:14.280')
expected.name = 'tmp'
tm.assert_series_equal(result, expected)
def test_Less_function(alltypes, df):
expr = alltypes.timestamp_col.__lt__('2013-05-13 16:12:14.280')
result = expr.execute()
expected = df.timestamp_col.lt('2013-05-13 16:12:14.280')
expected.name = 'tmp'
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('haystack', 'needle', 'expected'),
[
('foobar', 'bar', True),
('foobar', 'foo', True),
('foobar', 'baz', False),
('100%', '%', True),
('a_b_c', '_', True),
],
)
def test_string_contains(con, haystack, needle, expected):
value = L(haystack)
expr = value.contains(needle)
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('func', 'pandas_func'),
[
param(
lambda t, cond: t.bool_col.count(),
lambda df, cond: df.bool_col.count(),
id='count',
),
param(
lambda t, cond: t.double_col.sum(),
lambda df, cond: df.double_col.sum(),
id='sum',
),
param(
lambda t, cond: t.double_col.mean(),
lambda df, cond: df.double_col.mean(),
id='mean',
),
param(
lambda t, cond: t.double_col.min(),
lambda df, cond: df.double_col.min(),
id='min',
),
param(
lambda t, cond: t.double_col.max(),
lambda df, cond: df.double_col.max(),
id='max',
),
param(
lambda t, cond: t.double_col.var(),
lambda df, cond: df.double_col.var(),
id='var',
),
param(
lambda t, cond: t.double_col.std(),
lambda df, cond: df.double_col.std(),
id='std',
),
param(
lambda t, cond: t.double_col.var(how='sample'),
lambda df, cond: df.double_col.var(ddof=1),
id='samp_var',
),
param(
lambda t, cond: t.double_col.std(how='pop'),
lambda df, cond: df.double_col.std(ddof=0),
id='pop_std',
),
param(
lambda t, cond: t.bool_col.count(where=cond),
lambda df, cond: df.bool_col[cond].count(),
id='count_where',
),
param(
lambda t, cond: t.double_col.sum(where=cond),
lambda df, cond: df.double_col[cond].sum(),
id='sum_where',
),
param(
lambda t, cond: t.double_col.mean(where=cond),
lambda df, cond: df.double_col[cond].mean(),
id='mean_where',
),
param(
lambda t, cond: t.double_col.min(where=cond),
lambda df, cond: df.double_col[cond].min(),
id='min_where',
),
param(
lambda t, cond: t.double_col.max(where=cond),
lambda df, cond: df.double_col[cond].max(),
id='max_where',
),
param(
lambda t, cond: t.double_col.var(where=cond),
lambda df, cond: df.double_col[cond].var(),
id='var_where',
),
param(
lambda t, cond: t.double_col.std(where=cond),
lambda df, cond: df.double_col[cond].std(),
id='std_where',
),
param(
lambda t, cond: t.double_col.var(where=cond, how='sample'),
lambda df, cond: df.double_col[cond].var(),
id='samp_var_where',
),
param(
lambda t, cond: t.double_col.std(where=cond, how='pop'),
lambda df, cond: df.double_col[cond].std(ddof=0),
id='pop_std_where',
),
],
)
def test_aggregations(alltypes, df, func, pandas_func):
table = alltypes.limit(100)
df = df.head(table.execute().shape[0])
cond = table.string_col.isin(['abc', 'lmn'])
expr = func(table, cond)
result = expr.execute()
expected = pandas_func(df, cond.execute())
np.testing.assert_allclose(result, expected)
def test_not_contains(alltypes, df):
n = 100
table = alltypes.limit(n)
expr = table.string_col.notin(['abc', 'lmn'])
result = expr.execute()
expected = ~df.head(n).string_col.isin(['abc', 'lmn'])
tm.assert_series_equal(result, expected, check_names=False)
@pytest.mark.parametrize(
('func', 'expected_index'), [('first', 1), ('last', 0)]
)
def test_first_last_value(alltypes, df, func, expected_index):
col = alltypes.sort_by(ibis.desc(alltypes.string_col)).double_col
method = getattr(col, func)
expr = method()
result = expr.execute().rename('double_col')
expected = pd.Series(
df.double_col.iloc[expected_index],
index=pd.RangeIndex(len(df)),
name='double_col',
).astype('object')
tm.assert_series_equal(result, expected)
def test_identical_to(con, df):
# TODO: abstract this testing logic out into parameterized fixtures
t = con.table('functional_alltypes')
dt = df[['tinyint_col', 'double_col']]
expr = t.tinyint_col.identical_to(t.double_col)
result = expr.execute()
expected = (dt.tinyint_col.isnull() & dt.double_col.isnull()) | (
dt.tinyint_col == dt.double_col
)
expected.name = result.name
tm.assert_series_equal(result, expected)
def test_Between_function(alltypes, df):
expr = alltypes.date_string_col.between('2011-10-24', '2014-03-09')
result = expr.execute()
expected = df.date_string_col.between('2011-10-24', '2014-03-09')
expected.name = 'tmp'
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
'pattern', ['%Y%m%d', ' %X', ' %d ', ' %I ', ' %H%S ', ' %p ']
)
def test_strftime(con, pattern):
value = L('2015-09-01 14:48:05.359').cast(dt.string)
final_value = value.cast(dt.timestamp)
raw_value = datetime(
year=2015,
month=9,
day=1,
hour=14,
minute=48,
second=5,
microsecond=359000,
)
assert con.execute(final_value.strftime(pattern)) == raw_value.strftime(
pattern
)
def test_coalesce(pointer, pdf):
expr = pointer.marks.coalesce(None, 0)
result = expr.execute()
expected = pdf.marks.fillna(0).astype(np.int64)
expected.name = 'tmp'
tm.assert_series_equal(result, expected)
|
apache-2.0
|
mozvip/Sick-Beard
|
sickbeard/notifiers/__init__.py
|
1
|
2463
|
# Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import sickbeard
import xbmc
import plex
import nmj
import nmjv2
import synoindex
import pytivo
import growl
import prowl
import notifo
from . import libnotify
import pushover
import boxcar
import nma
import tweet
import trakt
from sickbeard.common import *
# home theater
xbmc_notifier = xbmc.XBMCNotifier()
plex_notifier = plex.PLEXNotifier()
nmj_notifier = nmj.NMJNotifier()
synoindex_notifier = synoindex.synoIndexNotifier()
nmjv2_notifier = nmjv2.NMJv2Notifier()
pytivo_notifier = pytivo.pyTivoNotifier()
# devices
growl_notifier = growl.GrowlNotifier()
prowl_notifier = prowl.ProwlNotifier()
notifo_notifier = notifo.NotifoNotifier()
libnotify_notifier = libnotify.LibnotifyNotifier()
pushover_notifier = pushover.PushoverNotifier()
boxcar_notifier = boxcar.BoxcarNotifier()
nma_notifier = nma.NMA_Notifier()
# online
twitter_notifier = tweet.TwitterNotifier()
trakt_notifier = trakt.TraktNotifier()
notifiers = [
libnotify_notifier, # Libnotify notifier goes first because it doesn't involve blocking on network activity.
xbmc_notifier,
plex_notifier,
nmj_notifier,
nmjv2_notifier,
synoindex_notifier,
pytivo_notifier,
growl_notifier,
prowl_notifier,
notifo_notifier,
pushover_notifier,
boxcar_notifier,
nma_notifier,
twitter_notifier,
trakt_notifier,
]
def notify_download(ep_name):
for n in notifiers:
n.notify_download(ep_name)
def notify_subtitle_download(ep_name, lang):
for n in notifiers:
n.notify_subtitle_download(ep_name, lang)
def notify_snatch(ep_name):
for n in notifiers:
n.notify_snatch(ep_name)
|
gpl-3.0
|
FelixMDenis/doxygen
|
src/version.py
|
6
|
1733
|
#
# script to read the version information from `../configure`
# relevant lines are starting with:
# `doxygen_version_major`
# `doxygen_version_minor`
# `doxygen_version_revision`
# `doxygen_version_mmn`
# the collected information is written to: `../VERSION` and `../src/version.cpp`
#
import sys
import os
#
# set 'default' values
#
major = 0
minor = 0
revision = 0
mnt = 'NO'
#
# open input file
# read file and get relevant information
# close
#
f = open('../configure', 'r')
for line in f:
# check if line can match (saves 3 comparisons)
if (line.startswith('doxygen_version')):
if (line.startswith('doxygen_version_major')):
major = line.replace('doxygen_version_major=','')
elif (line.startswith('doxygen_version_minor')):
minor = line.replace('doxygen_version_minor=','')
elif (line.startswith('doxygen_version_revision')):
revision = line.replace('doxygen_version_revision=','')
elif (line.startswith('doxygen_version_mmn')):
mnt = line.replace('doxygen_version_mmn=','')
f.close()
# strip superfluous '\n`
major = major.replace('\n','')
minor = minor.replace('\n','')
revision = revision.replace('\n','')
mnt = mnt.replace('\n','')
#
# open output files
# write relevant infomation
# close files
#
f1 = open('../VERSION','w')
f2 = open(os.path.join(sys.argv[1],'version.cpp'),'w')
if (mnt == 'NO'):
f1.write(major + '.' + minor + '.' + revision)
f2.write('char versionString[]="' + major + '.' + minor + '.' + revision + '";')
else:
f1.write(major + '.' + minor + '.' + revision + '-' + mnt)
f2.write('char versionString[]="' + major + '.' + minor + '.' + revision + '-' + mnt + '";')
f1.close()
f2.close()
|
gpl-2.0
|
INNUENDOCON/INNUca
|
src/SPAdes-3.11.0-Linux/share/spades/pyyaml2/loader.py
|
671
|
1132
|
__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
from reader import *
from scanner import *
from parser import *
from composer import *
from constructor import *
from resolver import *
class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
BaseConstructor.__init__(self)
BaseResolver.__init__(self)
class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
SafeConstructor.__init__(self)
Resolver.__init__(self)
class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
Constructor.__init__(self)
Resolver.__init__(self)
|
gpl-3.0
|
CubicERP/geraldo
|
site/newsite/django_1_0/django/db/models/related.py
|
49
|
6008
|
class BoundRelatedObject(object):
def __init__(self, related_object, field_mapping, original):
self.relation = related_object
self.field_mappings = field_mapping[related_object.name]
def template_name(self):
raise NotImplementedError
def __repr__(self):
return repr(self.__dict__)
class RelatedObject(object):
def __init__(self, parent_model, model, field):
self.parent_model = parent_model
self.model = model
self.opts = model._meta
self.field = field
self.edit_inline = field.rel.edit_inline
self.name = '%s:%s' % (self.opts.app_label, self.opts.module_name)
self.var_name = self.opts.object_name.lower()
def flatten_data(self, follow, obj=None):
new_data = {}
rel_instances = self.get_list(obj)
for i, rel_instance in enumerate(rel_instances):
instance_data = {}
for f in self.opts.fields + self.opts.many_to_many:
# TODO: Fix for recursive manipulators.
fol = follow.get(f.name, None)
if fol:
field_data = f.flatten_data(fol, rel_instance)
for name, value in field_data.items():
instance_data['%s.%d.%s' % (self.var_name, i, name)] = value
new_data.update(instance_data)
return new_data
def extract_data(self, data):
"""
Pull out the data meant for inline objects of this class,
i.e. anything starting with our module name.
"""
return data # TODO
def get_list(self, parent_instance=None):
"Get the list of this type of object from an instance of the parent class."
if parent_instance is not None:
attr = getattr(parent_instance, self.get_accessor_name())
if self.field.rel.multiple:
# For many-to-many relationships, return a list of objects
# corresponding to the xxx_num_in_admin options of the field
objects = list(attr.all())
count = len(objects) + self.field.rel.num_extra_on_change
if self.field.rel.min_num_in_admin:
count = max(count, self.field.rel.min_num_in_admin)
if self.field.rel.max_num_in_admin:
count = min(count, self.field.rel.max_num_in_admin)
change = count - len(objects)
if change > 0:
return objects + [None] * change
if change < 0:
return objects[:change]
else: # Just right
return objects
else:
# A one-to-one relationship, so just return the single related
# object
return [attr]
else:
if self.field.rel.min_num_in_admin:
return [None] * max(self.field.rel.num_in_admin, self.field.rel.min_num_in_admin)
else:
return [None] * self.field.rel.num_in_admin
def get_db_prep_lookup(self, lookup_type, value):
# Defer to the actual field definition for db prep
return self.field.get_db_prep_lookup(lookup_type, value)
def editable_fields(self):
"Get the fields in this class that should be edited inline."
return [f for f in self.opts.fields + self.opts.many_to_many if f.editable and f != self.field]
def get_follow(self, override=None):
if isinstance(override, bool):
if override:
over = {}
else:
return None
else:
if override:
over = override.copy()
elif self.edit_inline:
over = {}
else:
return None
over[self.field.name] = False
return self.opts.get_follow(over)
def get_manipulator_fields(self, opts, manipulator, change, follow):
if self.field.rel.multiple:
if change:
attr = getattr(manipulator.original_object, self.get_accessor_name())
count = attr.count()
count += self.field.rel.num_extra_on_change
else:
count = self.field.rel.num_in_admin
if self.field.rel.min_num_in_admin:
count = max(count, self.field.rel.min_num_in_admin)
if self.field.rel.max_num_in_admin:
count = min(count, self.field.rel.max_num_in_admin)
else:
count = 1
fields = []
for i in range(count):
for f in self.opts.fields + self.opts.many_to_many:
if follow.get(f.name, False):
prefix = '%s.%d.' % (self.var_name, i)
fields.extend(f.get_manipulator_fields(self.opts, manipulator, change,
name_prefix=prefix, rel=True))
return fields
def __repr__(self):
return "<RelatedObject: %s related to %s>" % (self.name, self.field.name)
def bind(self, field_mapping, original, bound_related_object_class=BoundRelatedObject):
return bound_related_object_class(self, field_mapping, original)
def get_accessor_name(self):
# This method encapsulates the logic that decides what name to give an
# accessor descriptor that retrieves related many-to-one or
# many-to-many objects. It uses the lower-cased object_name + "_set",
# but this can be overridden with the "related_name" option.
if self.field.rel.multiple:
# If this is a symmetrical m2m relation on self, there is no reverse accessor.
if getattr(self.field.rel, 'symmetrical', False) and self.model == self.parent_model:
return None
return self.field.rel.related_name or (self.opts.object_name.lower() + '_set')
else:
return self.field.rel.related_name or (self.opts.object_name.lower())
|
lgpl-3.0
|
bev-a-tron/pledge_service
|
backend/handlers.py
|
3
|
37417
|
"""Handlers for MayOne.US."""
from collections import namedtuple, defaultdict
import datetime
import json
import logging
import cgi
import base64
import urllib
import datetime
from google.appengine.api import mail
from google.appengine.ext import db
from google.appengine.ext import deferred
from google.appengine.api import urlfetch
from google.appengine.api import memcache
import validictory
import webapp2
import cache
import model
import templates
import util
import pprint
import urlparse
import paypal
from rauth import OAuth2Service
# Immutable environment with both configuration variables, and backends to be
# mocked out in tests.
Environment = namedtuple(
'Environment',
[
# App engine app name, or 'local' for dev_appserver, or 'unittest' for unit
# tests.
'app_name',
'stripe_public_key',
# StripeBackend
'stripe_backend',
# MailingListSubscriber
'mailing_list_subscriber',
# MailSender
'mail_sender',
])
class PaymentError(Exception):
pass
class StripeBackend(object):
"""Interface which contacts stripe."""
def CreateCustomer(self, email, card_token):
"""Creates a stripe customer so we can charge them later.
Returns: A string customer id.
"""
raise NotImplementedError()
def RetrieveCardData(self, customer_id):
"""Retrieves a stripe customer's card data, given the id.
Returns: A card dictionary
"""
raise NotImplementedError()
def Charge(self, customer_id, amount_cents):
"""Charges a customer and returns an identifier for the charge."""
raise NotImplementedError()
class MailingListSubscriber(object):
"""Interface which signs folks up for emails."""
def Subscribe(self, email, first_name, last_name, amount_cents, ip_addr, time,
source, phone=None, zipcode=None, volunteer=None, skills=None,
rootstrikers=None, nonce=None, pledgePageSlug=None, recurring=None):
raise NotImplementedError()
_STR = dict(type='string')
_STR_optional = dict(type='string', required=False)
valid_recurrence_periods = ["monthly", "weekly", ""]
PLEDGE_SCHEMA = dict(
type='object',
properties=dict(
email=_STR,
phone=dict(type='string', blank=True),
name=_STR,
occupation=_STR,
employer=_STR,
target=_STR,
surveyResult=_STR_optional,
subscribe=dict(type='boolean'),
anonymous=dict(type='boolean', required=False),
amountCents=dict(type='integer', minimum=100, maximum=540000),
pledgeType=dict(enum=model.Pledge.TYPE_VALUES, required=False),
team=dict(type='string', blank=True),
recurrence_period=dict(type='string', required=False, enum=valid_recurrence_periods),
payment=dict(type='object',
properties=dict(
STRIPE=dict(type='object',
required=False,
properties=dict(token=_STR)),
PAYPAL=dict(type='object',
required=False,
properties=dict(step=_STR_optional)),
)
),
)
)
def pledge_helper(handler, data, stripe_customer_id, stripe_charge_id, paypal_payer_id, paypal_txn_id):
env = handler.app.config['env']
if 'last_name' in data:
last_name = data['last_name']
if 'first_name' in data:
first_name = data['first_name']
else:
first_name = ''
else:
# Split apart the name into first and last. Yes, this sucks, but adding the
# name fields makes the form look way more daunting. We may reconsider this.
name_parts = data['name'].split(None, 1)
first_name = name_parts[0]
if len(name_parts) == 1:
last_name = ''
logging.warning('Could not determine last name: %s', data['name'])
else:
last_name = name_parts[1]
if not 'surveyResult' in data:
data['surveyResult'] = ''
if not 'city' in data:
data['city'] = None
if not 'address' in data:
data['address'] = None
else:
logging.info('Address was: ' + str( data['address']))
if not 'state' in data:
data['state'] = None
if not 'zipCode' in data:
data['zipCode'] = None
if not 'bitpay_invoice_id' in data:
data['bitpay_invoice_id'] = None
print data
if not 'recurring' in data:
data['recurring'] = False
if not 'enddate' in data:
data['enddate'] = ''
if not 'recurrence_period' in data:
data['recurrence_period'] = ''
if not 'nationBuilderVars' in data:
data['nationBuilderVars'] = None
if not 'keep_donation' in data:
data['keep_donation'] = False
amountCents = data['amountCents']
user, pledge = model.addPledge(email=data['email'],
stripe_customer_id=stripe_customer_id,
stripe_charge_id=stripe_charge_id,
paypal_payer_id=paypal_payer_id,
paypal_txn_id=paypal_txn_id,
amount_cents=amountCents,
first_name=first_name,
last_name=last_name,
occupation=data['occupation'],
employer=data['employer'],
phone=data['phone'],
target=data['target'],
surveyResult=data['surveyResult'],
pledge_type=data.get(
'pledgeType', model.Pledge.TYPE_CONDITIONAL),
team=data['team'],
mail_list_optin=data['subscribe'],
anonymous=data.get('anonymous', False),
address=str(data['address']),
city=data['city'],
state=data['state'],
zipCode=data['zipCode'],
bitpay_invoice_id = data['bitpay_invoice_id'],
recurring=data['recurring'],
recurrence_period=data['recurrence_period'],
enddate=data['enddate'],
keep_donation=data['keep_donation']
)
logging.info('Added pledge to database')
if data['subscribe']:
env.mailing_list_subscriber.Subscribe(
email=data['email'],
first_name=first_name, last_name=last_name,
amount_cents=amountCents,
ip_addr=handler.request.remote_addr,
time=datetime.datetime.now(),
source='pledge',
phone=data['phone'],
nonce=user.url_nonce,
recurring=data['recurring']
)
if False:
model.addNationBuilderDonation(email=data['email'],
stripe_customer_id=stripe_customer_id,
stripe_charge_id=stripe_charge_id,
paypal_payer_id=paypal_payer_id,
paypal_txn_id=paypal_txn_id,
amount_cents=amountCents,
first_name=first_name,
last_name=last_name,
occupation=data['occupation'],
employer=data['employer'],
phone=data['phone'],
target=data['target'],
surveyResult=data['surveyResult'],
pledge_type=data.get(
'pledgeType', model.Pledge.TYPE_CONDITIONAL),
team=data['team'],
mail_list_optin=data['subscribe'],
anonymous=data.get('anonymous', False),
address=str(data['address']),
city=data['city'],
state=data['state'],
zipCode=data['zipCode'],
bitpay_invoice_id = data['bitpay_invoice_id'],
recurring = data['recurring'],
enddate = data['enddate'],
recurrence_period = data['recurrence_period'],
nationBuilderVars = data['nationBuilderVars']
)
# Add to the total.
model.ShardedCounter.increment('TOTAL-5', amountCents)
if data['team']:
cache.IncrementTeamPledgeCount(data['team'], 1)
cache.IncrementTeamTotal(data['team'], amountCents)
totalStr = '$%d' % int(amountCents / 100)
format_kwargs = {
'name': data['name'].encode('utf-8'),
'url_nonce': pledge.url_nonce,
'total': totalStr,
'user_url_nonce': user.url_nonce
}
if data['recurring'] == True:
text_body = open('email/thank-you-recurring.txt').read().format(**format_kwargs)
html_body = open('email/thank-you-recurring.html').read().format(**format_kwargs)
else:
text_body = open('email/thank-you.txt').read().format(**format_kwargs)
html_body = open('email/thank-you.html').read().format(**format_kwargs)
env.mail_sender.Send(to=data['email'].encode('utf-8'),
subject='Thank you for your pledge',
text_body=text_body,
html_body=html_body)
id = str(pledge.key())
receipt_url = '?receipt=%s&auth_token=%s' % (id, pledge.url_nonce)
return id, pledge.url_nonce, receipt_url
class PledgeHandler(webapp2.RequestHandler):
"""RESTful handler for pledge objects."""
def post(self):
"""Create a new pledge, and update user info."""
util.EnableCors(self)
self.response.headers['Content-Type'] = 'application/json'
env = self.app.config['env']
try:
data = json.loads(self.request.body)
except ValueError, e:
logging.warning('Bad JSON request: %s', e)
self.error(400)
self.response.write('Invalid request')
return
try:
validictory.validate(data, PLEDGE_SCHEMA)
except ValueError, e:
logging.warning('Schema check failed: %s', e)
try:
logging.warning('referer: ' + self.request.referer)
except:
pass
self.error(400)
json.dump(dict(validationError=str(e)), self.response)
return
# Do any server-side processing the payment processor needs.
stripe_customer = None
stripe_customer_id = data.get('customer_id', None)
stripe_charge_id = None
# upsell this customer's plan to a monthly subscription
if stripe_customer_id:
try:
env.stripe_backend.UpsellCustomerToMonthlySubscription(stripe_customer_id, data['amountCents']/100)
except PaymentError, e:
logging.warning('Payment error: %s', e)
self.error(400)
json.dump(dict(paymentError=str(e)), self.response)
return
elif 'STRIPE' in data['payment']:
try:
if data.get('recurring', '') == True:
logging.info('Trying to create stripe customer %s for a recurring donation' % data['email'])
if data.get('recurrence_period', None) == None:
data['recurrence_period'] = 'monthly'
stripe_customer = env.stripe_backend.CreateCustomerWithPlan(
email=data['email'],
card_token=data['payment']['STRIPE']['token'],
amount_dollars=data['amountCents']/100,
recurrence_period=data['recurrence_period'],
upsell=data.get('upsell', False))
else:
logging.info('Trying to create stripe customer %s for a single donation' % data['email'])
stripe_customer = env.stripe_backend.CreateCustomer(
email=data['email'],
card_token=data['payment']['STRIPE']['token'])
stripe_customer_id = stripe_customer.id
logging.info('Trying to extract address for %s' % data['email'])
logging.info('Stripe customer is %s' % str(stripe_customer))
if len(stripe_customer.sources.data) > 0:
card_data = stripe_customer.sources.data[0]
if 'address_line1_check' in card_data:
logging.info('Address check: %s' % card_data['address_line1_check'])
if card_data['address_line1_check'] == 'fail':
logging.warning('Your billing address did not validate')
self.error(400)
json.dump(dict(paymentError='Your billing address did not validate'), self.response)
return # error trapping is not working in here, so have to do hacky early return for now
if 'address_line1' in card_data:
data['address'] = card_data['address_line1']
if card_data['address_line2']:
data['address'] += ', %s' % card_data['address_line2']
if 'address_city' in card_data:
data['city'] = card_data['address_city']
if 'address_state' in card_data:
data['state'] = card_data['address_state']
if 'address_zip' in card_data:
data['zipCode'] = card_data['address_zip']
logging.info('Trying to charge %s' % data['email'])
stripe_charge_id = env.stripe_backend.Charge(stripe_customer_id, data['amountCents'])
data['stripe_customer_id'] = stripe_customer_id
logging.info('Got charge id %s' % stripe_charge_id)
except PaymentError, e:
logging.warning('Payment error: %s', e)
self.error(400)
json.dump(dict(paymentError=str(e)), self.response)
return
else:
logging.warning('No payment processor specified: %s', data)
self.error(400)
return
id, auth_token, receipt_url = pledge_helper(self, data, stripe_customer_id, stripe_charge_id, None, None)
logging.info('Pledge handler finished')
json.dump(dict(id=id,
auth_token=auth_token,
pledge_amount=data['amountCents']/100,
recurrence_period=data['recurrence_period'],
receipt_url=receipt_url,
card_token=stripe_charge_id,
customer_id=stripe_customer_id), self.response)
def options(self):
util.EnableCors(self)
class SubscribeHandler(webapp2.RequestHandler):
"""RESTful handler for subscription requests."""
# https://www.pivotaltracker.com/s/projects/1075614/stories/71725060
def post(self):
util.EnableCors(self)
env = self.app.config['env']
logging.info('body: %s' % self.request.body)
email_input = cgi.escape(self.request.get('email'))
if len(email_input) == 0:
logging.warning("Bad Request: required field (email) missing.")
self.error(400)
return
redirect_input = cgi.escape(self.request.get('redirect'))
dont_redirect = cgi.escape(self.request.get('dont_redirect'))
if dont_redirect != '':
dont_redirect = True
if redirect_input != '':
dont_redirect = False
is_supporter = cgi.escape(self.request.get('is_supporter'))
if type(is_supporter) != bool:
is_supporter = False
first_name = cgi.escape(self.request.get('first_name'))
if len(first_name) == 0:
first_name = None
last_name = cgi.escape(self.request.get('last_name'))
if len(last_name) == 0:
last_name = None
phone_input = cgi.escape(self.request.get('phone'))
if len(phone_input) == 0:
phone_input = None
zipcode_input = cgi.escape(self.request.get('zipcode'))
if len(zipcode_input) == 0:
zipcode_input = None
phone_input = cgi.escape(self.request.get('phone'))
if len(phone_input) == 0:
phone_input = None
volunteer_input = cgi.escape(self.request.get('volunteer')) # "YES" or "NO"
if volunteer_input == 'on':
volunteer_input = 'Yes'
elif volunteer_input == 'off':
volunteer_input = ''
skills_input = cgi.escape(self.request.get('skills')) #Free text, limited to 255 char
if len(skills_input) == 0:
skills_input = None
rootstrikers_input = cgi.escape(self.request.get('rootstrikers')) #Free text, limited to 255 char
if rootstrikers_input=='on':
rootstrikers_input = 'Yes'
elif rootstrikers_input=='off':
rootstrikers_input = ''
source_input = cgi.escape(self.request.get('source'))
if len(source_input) == 0:
source_input = 'subscribe'
pledgePageSlug_input = cgi.escape(self.request.get('pledgePageSlug'))
if len(pledgePageSlug_input) == 0:
pledgePageSlug_input = ''
otherVars = {}
# get any parameter that looks like MERGE something
for argName in self.request.arguments():
if argName.startswith('MERGE'):
arg = self.request.get(argName)
otherVars[argName] = arg
NationBuilderVars = {}
for argName in self.request.arguments():
if argName.startswith('NationBuilder'):
arg = self.request.get(argName)
NationBuilderVars[argName[13:]] = arg
env.mailing_list_subscriber.Subscribe(
email=email_input,
first_name=first_name, last_name=last_name,
amount_cents=None,
ip_addr=self.request.remote_addr,
time=datetime.datetime.now(),
source=source_input,
phone=phone_input,
zipcode=zipcode_input,
volunteer=volunteer_input,
skills=skills_input,
rootstrikers=rootstrikers_input,
pledgePageSlug=pledgePageSlug_input,
otherVars=otherVars,
is_supporter=is_supporter,
nationBuilderVars=NationBuilderVars
)
if dont_redirect == False:
if len(redirect_input)>0:
redirect_url = '%s?email=%s&source=%s' % (redirect_input, email_input, source_input)
else:
redirect_url = '/pledge?email=%s' % email_input
self.redirect(str(redirect_url))
else:
pass
options = util.EnableCors
class ReceiptHandler(webapp2.RequestHandler):
def get(self, id):
try:
pledge = db.get(db.Key(id))
except db.BadKeyError, e:
logging.warning('Bad key error: %s', e)
self.error(404)
self.response.write('Not found')
return
if not pledge:
self.error(404)
self.response.write('Not found')
return
user = model.User.get_by_key_name(pledge.email)
if user is None:
logging.warning('pledge had missing user: %r, %r', id, pledge.email)
self.error(404)
self.response.write('Not found')
# allow this one pledge so test receipt can be viewed
if (id != 'agxzfm1heWRheS1wYWNyEwsSBlBsZWRnZRiAgICAlZG2CAw'):
auth_token = self.request.get('auth_token')
if not util.ConstantTimeIsEqual(auth_token, pledge.url_nonce):
self.error(403)
self.response.write('Access denied')
return
template = templates.GetTemplate('receipt.html')
self.response.write(template.render(dict(pledge=pledge, user=user)))
class PaymentConfigHandler(webapp2.RequestHandler):
def get(self):
util.EnableCors(self)
env = self.app.config['env']
logging.info('Got env ' + str(env))
if not env.stripe_public_key:
raise Error('No stripe public key in DB')
params = dict(testMode=(env.app_name == u'local'),
stripePublicKey=env.stripe_public_key)
self.response.headers['Content-Type'] = 'application/json'
json.dump(params, self.response)
options = util.EnableCors
class NumPledgesHandler(webapp2.RequestHandler):
def get(self):
util.EnableCors(self)
WP_PLEDGES = 0
VERSION_12_AND_UNDER = 0
count = memcache.get('TOTAL-PLEDGES')
if not count:
query = model.Pledge.all(keys_only=True).filter('model_version >', 12)
i = 0
while True:
result = query.fetch(1000)
i = i + len(result)
if len(result) < 1000:
break
cursor = query.cursor()
query.with_cursor(cursor)
count = i + WP_PLEDGES + VERSION_12_AND_UNDER
memcache.set('TOTAL-PLEDGES', count, 120)
self.response.headers['Content-Type'] = 'application/json'
json.dump({'count':count}, self.response)
options = util.EnableCors
class TotalHandler(webapp2.RequestHandler):
# These get added to every pledge total calculation
PRE_SHARDING_TOTAL = 0 # See model.ShardedCounter
WP_PLEDGE_TOTAL = 0
DEMOCRACY_DOT_COM_BALANCE = 0
CHECKS_BALANCE = 0 # lol US government humor
def get(self):
util.EnableCors(self)
total = (TotalHandler.PRE_SHARDING_TOTAL +
TotalHandler.WP_PLEDGE_TOTAL +
TotalHandler.DEMOCRACY_DOT_COM_BALANCE +
TotalHandler.CHECKS_BALANCE)
total += model.ShardedCounter.get_count('TOTAL-5')
result = dict(totalCents=total)
team = self.request.get("team")
if team:
team_pledges = cache.GetTeamPledgeCount(team) or 0
team_total = cache.GetTeamTotal(team) or 0
try:
# there are some memcache values with string values
team_total = int(team_total)
except ValueError, e:
logging.exception("non-integral team total: %r", team_total)
team_total = 0
if not (team_pledges and team_total):
for pledge in model.Pledge.all().filter("team =", team):
team_pledges += 1
team_total += pledge.amountCents
# There was a pledge made to this team that was not made through Stripe
if team == 'ahJzfm1heWRheS1wYWMtdGVhbXNyEQsSBFRlYW0YgICAgP7esAgM':
team_pledges +=1
team_total += 1000000
cache.SetTeamPledgeCount(team, team_pledges)
cache.SetTeamTotal(team, team_total)
result['team'] = team
result['teamPledges'] = team_pledges
result['teamTotalCents'] = team_total
self.response.headers['Content-Type'] = 'application/json'
json.dump(result, self.response)
options = util.EnableCors
class ThankTeamHandler(webapp2.RequestHandler):
def post(self):
env = self.app.config['env']
util.EnableCors(self)
for field in ['team', 'reply_to', 'subject', 'message_body', 'new_members']:
if not field in self.request.POST:
msg = "Bad Request: required field %s missing." % field
logging.warning(msg)
self.error(400)
self.response.write(msg)
return self.response
# get the pldedges for this team, excluding the reply_to
pledges = model.Pledge.all().filter(
'team =',self.request.POST['team'])
# .filter(
# 'email !=', self.request.POST['reply_to'])
# yes this is executing another query, and it's ok because
# this will be done so infrequently
# FIXME: lookup from cache.Get.. or TeamTotal once those are sorted out
total_pledges = model.Pledge.all().filter(
'team =',self.request.POST['team']).count()
# if only sending to new members, filter out those that have already received emails
if self.request.POST['new_members'] == 'True':
pledges = pledges.filter('thank_you_sent_at =', None)
i = 0
for pledge in pledges:
env.mail_sender.Send(to=pledge.email,
subject=self.request.POST['subject'],
text_body=self.request.POST['message_body'],
html_body=self.request.POST['message_body'],
reply_to=self.request.POST['reply_to'])
i += 1
# set the thank_you_sent_at for users after sending
# FIXME: make sure the send was successful
pledge.thank_you_sent_at = datetime.datetime.now()
pledge.put()
logging.info('THANKING: %d PLEDGERS!!' % i)
response_data = {'num_emailed': i, 'total_pledges': total_pledges}
self.response.content_type = 'application/json'
self.response.write(json.dumps(response_data))
options = util.EnableCors
class PledgersHandler(webapp2.RequestHandler):
def get(self):
util.EnableCors(self)
team = self.request.get("team")
if not team:
self.error(400)
self.response.write('team required')
return
pledgers = defaultdict(lambda: 0)
for pledge in model.Pledge.all().filter("team =", team):
if pledge.anonymous:
pledgers["Anonymous"] += pledge.amountCents
continue
user = model.User.get_by_key_name(pledge.email)
if user is None or (not user.first_name and not user.last_name):
pledgers["Anonymous"] += pledge.amountCents
continue
name = ("%s %s" % (user.first_name or "", user.last_name or "")).strip()
pledgers[name] += pledge.amountCents
pledgers_by_amount = []
for name, amount in pledgers.iteritems():
pledgers_by_amount.append((amount, name))
pledgers_by_amount.sort(reverse=True)
result = {"pledgers": [name for _, name in pledgers_by_amount]}
self.response.headers['Content-Type'] = 'application/json'
json.dump(result, self.response)
options = util.EnableCors
class LeaderboardHandler(webapp2.RequestHandler):
def get(self):
util.EnableCors(self)
offset = int(self.request.get("offset") or 0)
limit = int(self.request.get("limit") or 25)
orderBy = self.request.get("orderBy") or "-totalCents"
teams = []
for tt in model.TeamTotal.all().order(orderBy).run(
offset=offset, limit=limit):
teams.append({
"team": tt.team,
"total_cents": tt.totalCents,
"num_pledges": tt.num_pledges})
self.response.headers['Content-Type'] = 'application/json'
json.dump({"teams": teams}, self.response)
options = util.EnableCors
class BitcoinStartHandler(webapp2.RequestHandler):
"""RESTful handler for Paypal pledge objects."""
def post(self):
"""Create a new TempPledge, and update user info."""
util.EnableCors(self)
try:
data = json.loads(self.request.body)
except ValueError, e:
logging.warning('Bad JSON request: %s', str(e))
self.error(400)
self.response.write('Invalid request')
return
try:
validictory.validate(data, PLEDGE_SCHEMA)
except ValueError, e:
logging.warning('Schema check failed: %s', str(e))
self.error(400)
self.response.write('Invalid request')
return
temp_pledge = model.TempPledge(
model_version=model.MODEL_VERSION,
email=data["email"],
name=data["name"],
phone=data["phone"],
occupation=data["occupation"],
employer=data["employer"],
target=data["target"],
subscribe=data["subscribe"],
amountCents=data["amountCents"],
firstName=data["firstName"],
lastName=data["lastName"],
address=data["address"],
city=data["city"],
state=data["state"],
zipCode=data["zip"],
bitcoinConfirm=data["bitcoinConfirm"],
team=data["team"]
)
temp_key = temp_pledge.put()
temp_key_str = str(temp_key)
try:
resp_dict = self._send_to_bitpay(data["amountCents"], temp_key_str)
json.dump({"bitpay_url": resp_dict["url"]}, self.response)
temp_pledge.bitpay_invoice_id = resp_dict["id"]
logging.info('Created invoice with id ' + resp_dict["id"])
temp_pledge.put()
return
except Exception, e:
logging.warning('BitcoinStart failed: ' + str(e))
self.error(400)
def _send_to_bitpay(self, amountCents, temp_key_str):
price_in_dollars = int(amountCents) / 100.0
apiKey = model.Secrets.get().bitpay_api_key
uname = base64.b64encode(apiKey)
headers = {'Authorization': 'Basic ' + uname }
callbackUrl = self.request.host_url + "/r/bitcoin_notifications"
logging.info('CALLBACK URL WILL BE: ' + callbackUrl)
post_data = {
'posData': temp_key_str,
'price': price_in_dollars,
'notificationURL': self.request.host_url + "/r/bitcoin_notifications",
'currency': 'USD',
# 'buyerName': data["name"],
# 'buyerEmail': data["email"]
}
payload = urllib.urlencode(post_data)
logging.info('calling URL fetchee')
result = urlfetch.fetch(
url='https://bitpay.com/api/invoice/',
payload=payload,
method=urlfetch.POST,
headers=headers,
validate_certificate=True
)
if result.status_code == 200:
response_dict = json.loads(result.content)
return response_dict
else:
logging.warning('BitcoinStart failed: ' + str(result.content))
self.error(400)
self.response.write('Invalid request')
return
options = util.EnableCors
class BitcoinNotificationsHandler(webapp2.RequestHandler):
def post(self):
# TODO: check SSL cert
# add bitpay invoice ID to pledge record
try:
data = json.loads(self.request.body)
except ValueError, e:
logging.warning('Bad JSON request: %s', str(e))
logging.info('Bad request was: ' + str(self.request.body))
self.error(400)
self.response.write('Invalid request')
return
invoiceID = data["id"]
posData = data["posData"]
logging.info('Bitpay notifications for. Invoice ID: %s, Status: %s' % (invoiceID, data.get('status')))
key = db.Key(posData)
temp_pledge = model.TempPledge.get_by_id(key.id())
if not temp_pledge:
logging.warning('could not find temp pledge from posData')
self.error(400)
return
# check to make sure this isn't a duplicate notification
# as they try up to 5 times
if temp_pledge.pledge_id:
return self.response
if not data.get('status') == 'confirmed':
logging.info('Non comfirmed transaction. Ignoring. Invoice ID: %s, Status: %s' % (invoiceID, data.get('status')))
self.response.write('Thanks. We got this, but ignored it.')
return
paid_price = float(data["price"])
if paid_price > 100:
logging.warning('bitpay paid amount > $100')
temp_pledge_data = {
'bitpay_invoice_id': temp_pledge.bitpay_invoice_id,
'name': temp_pledge.firstName,
'email': temp_pledge.email,
'phone': temp_pledge.phone,
'occupation': temp_pledge.occupation,
'employer': temp_pledge.employer,
'target': temp_pledge.target,
'subscribe': temp_pledge.subscribe,
'team': temp_pledge.team,
'first_name': temp_pledge.firstName,
'last_name': temp_pledge.lastName,
'address': temp_pledge.address,
'city': temp_pledge.city,
'state': temp_pledge.state,
'zipCode': temp_pledge.zipCode,
'amountCents': int(paid_price * 100),
'pledgeType': 'DONATION'
}
#if the price paid in the confirmed invoice is different, update it here
temp_pledge_data["price"] = data["price"]
if temp_pledge.amountCents != data["price"]:
logging.warning('bitpay confirmed amount is different')
id, auth_token, receipt_url = pledge_helper(self, temp_pledge_data, None,
None, None, None)
temp_pledge.pledge_id = id
temp_pledge.put()
# they just look for any 200 response
return self.response
options = util.EnableCors
# Paypal Step 1: We initiate a PAYPAL transaction
class PaypalStartHandler(webapp2.RequestHandler):
"""RESTful handler for Paypal pledge objects."""
def post(self):
"""Create a new pledge, and update user info."""
util.EnableCors(self)
self.response.headers['Content-Type'] = 'application/json'
env = self.app.config['env']
try:
data = json.loads(self.request.body)
except ValueError, e:
logging.warning('Bad JSON request: %s', str(e))
self.error(400)
self.response.write('Invalid request')
return
try:
validictory.validate(data, PLEDGE_SCHEMA)
except ValueError, e:
logging.warning('Schema check failed: %s', str(e))
self.error(400)
self.response.write('Invalid request')
return
rc, paypal_url = paypal.SetExpressCheckout(self.request.host_url, data)
if rc:
json.dump(dict(paypal_url=paypal_url), self.response)
return
logging.warning('PaypalStart failed')
self.error(400)
options = util.EnableCors
# Paypal Step 2: Paypal returns to us, telling us the user has agreed. Book it.
class PaypalReturnHandler(webapp2.RequestHandler):
def get(self):
token = self.request.get("token")
if not token:
token = self.request.get("TOKEN")
payer_id = self.request.get("PayerID")
if not payer_id:
payer_id = self.request.get("PAYERID")
if not token or not payer_id:
logging.warning("Paypal completion missing data: " + self.request.url)
self.error(400);
self.response.write("Unusual error: no token or payer id from Paypal. Please contact [email protected] and report these details:")
self.response.write(self.request.url)
return
# Fetch the details of this pending transaction
form_fields = {
"METHOD": "GetExpressCheckoutDetails",
"TOKEN": token
}
rc, results = paypal.send_request(form_fields)
if not rc:
self.error(400);
self.response.write("Unusual error: Could not get payment details from Paypal. Please contact [email protected] and report these details:")
self.response.write(pprint.pformat(results))
return
data = dict()
name = ""
if 'FIRSTNAME' in results:
data['first_name'] = results['FIRSTNAME'][0]
name += results['FIRSTNAME'][0]
if 'MIDDLENAME' in results:
name += " " + results['FIRSTNAME'][0]
if 'LASTNAME' in results:
data['last_name'] = results['LASTNAME'][0]
if len(name) > 0:
name += " "
name += results['LASTNAME'][0]
data['name'] = name
paypal_email = results['EMAIL'][0]
amount = results['PAYMENTREQUEST_0_AMT'][0]
cents = int(float(amount)) * 100
data['amountCents'] = cents
payer_id = results['PAYERID'][0]
custom = urlparse.parse_qs(results['CUSTOM'][0])
if custom['email'][0] != paypal_email:
logging.warning("User entered email [%s], but purchased with email [%s]" % (custom['email'][0], paypal_email))
for v in { 'email', 'phone', 'occupation', 'employer', 'target', 'subscribe', 'anonymous', 'pledgeType', 'team', 'surveyResult' }:
if v in custom:
data[v] = custom[v][0]
else:
data[v] = None
if 'team' not in custom:
data['team'] = self.request.cookies.get("last_team_key")
data['subscribe'] = data['subscribe'] == 'True'
rc, results = paypal.DoExpressCheckoutPayment(token, payer_id, amount, custom)
if rc:
request_data = {
'METHOD': 'GetTransactionDetails',
'TRANSACTIONID': results['PAYMENTINFO_0_TRANSACTIONID'][0]
}
rc, txn_data = paypal.send_request(request_data)
if rc:
if 'SHIPTOSTREET' in txn_data:
data['address'] = txn_data['SHIPTOSTREET'][0]
if 'SHIPTOSTREET2' in txn_data:
data['address'] += ', %s' % txn_data['SHIPTOSTREET2'][0]
if 'SHIPTOCITY' in txn_data:
data['city'] = txn_data['SHIPTOCITY'][0]
if 'SHIPTOSTATE' in txn_data:
data['state'] = txn_data['SHIPTOSTATE'][0]
if 'SHIPTOZIP' in txn_data:
data['zipCode'] = txn_data['SHIPTOZIP'][0]
id, auth_token, receipt_url = pledge_helper(self, data, None, None, payer_id, results['PAYMENTINFO_0_TRANSACTIONID'][0])
self.redirect(receipt_url)
else:
self.error(400);
self.response.write("Unusual error: Could not get complete payment from Paypal. Please contact [email protected] and report these details:")
self.response.write(pprint.pformat(results))
return
class IssuePollingHandler(webapp2.RequestHandler):
def get(self):
util.EnableCors(self)
self.response.headers['Content-Type'] = 'application/json'
json.dump(dict({}), self.response) #TODO -- return something sensible
def post(self):
util.EnableCors(self)
email, issues = json.loads(self.request.body).popitem()
for issue in issues:
model.IssueVote.tally(email, issue)
class CandidatePollingHandler(webapp2.RequestHandler):
def options(self):
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.headers['Access-Control-Allow-Methods'] = 'POST, GET, OPTIONS'
self.response.headers['Access-Control-Max-Age'] = '1000'
self.response.headers['Access-Control-Allow-Headers'] = 'origin, x-csrftoken, content-type, accept'
def get(self):
util.EnableCors(self)
self.response.headers['Content-Type'] = 'application/json'
json.dump(dict({}), self.response) #TODO -- return something sensible
def post(self):
util.EnableCors(self)
email, candidates = json.loads(self.request.body).popitem()
for candidate in candidates:
model.CandidateVote.tally(email, candidate)
HANDLERS = [
('/r/leaderboard', LeaderboardHandler),
('/r/pledgers', PledgersHandler),
('/r/pledge', PledgeHandler),
('/receipt/(.+)', ReceiptHandler),
('/r/payment_config', PaymentConfigHandler),
('/r/total', TotalHandler),
('/r/num_pledges', NumPledgesHandler),
('/r/thank', ThankTeamHandler),
('/r/subscribe', SubscribeHandler),
('/r/bitcoin_start', BitcoinStartHandler),
('/r/bitcoin_notifications', BitcoinNotificationsHandler),
('/r/paypal_start', PaypalStartHandler),
('/r/paypal_return', PaypalReturnHandler),
('/r/issue_polling', IssuePollingHandler),
('/r/candidate_polling', CandidatePollingHandler),
]
|
apache-2.0
|
Mixser/django
|
tests/auth_tests/models/custom_permissions.py
|
295
|
1433
|
"""
The CustomPermissionsUser users email as the identifier, but uses the normal
Django permissions model. This allows us to check that the PermissionsMixin
includes everything that is needed to interact with the ModelBackend.
"""
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from django.contrib.auth.tests.custom_user import (
CustomUserManager, RemoveGroupsAndPermissions,
)
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class CustomPermissionsUserManager(CustomUserManager):
def create_superuser(self, email, password, date_of_birth):
u = self.create_user(email, password=password, date_of_birth=date_of_birth)
u.is_superuser = True
u.save(using=self._db)
return u
with RemoveGroupsAndPermissions():
@python_2_unicode_compatible
class CustomPermissionsUser(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(verbose_name='email address', max_length=255, unique=True)
date_of_birth = models.DateField()
custom_objects = CustomPermissionsUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['date_of_birth']
class Meta:
app_label = 'auth'
def get_full_name(self):
return self.email
def get_short_name(self):
return self.email
def __str__(self):
return self.email
|
bsd-3-clause
|
mach0/QGIS
|
tests/src/python/test_qgscodeeditorcolorscheme.py
|
25
|
3207
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsCodeEditorColorScheme
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '03/10/2020'
__copyright__ = 'Copyright 2020, The QGIS Project'
import qgis # NOQA
import sys
from qgis.core import QgsSettings, QgsApplication
from qgis.gui import QgsCodeEditorColorScheme, QgsCodeEditorColorSchemeRegistry, QgsGui
from qgis.PyQt.QtCore import QCoreApplication
from qgis.PyQt.QtGui import QColor, QFont
from qgis.testing import start_app, unittest
from utilities import getTestFont
start_app()
class TestQgsCodeEditorColorScheme(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
QCoreApplication.setOrganizationName("QGIS_Test")
QCoreApplication.setOrganizationDomain("QGIS_TestPyQgsCodeEditorColorScheme.com")
QCoreApplication.setApplicationName("QGIS_TestPyQgsCodeEditorColorScheme")
QgsSettings().clear()
start_app()
def testScheme(self):
scheme = QgsCodeEditorColorScheme('my id', 'my name')
self.assertEqual(scheme.id(), 'my id')
self.assertEqual(scheme.name(), 'my name')
scheme.setColor(QgsCodeEditorColorScheme.ColorRole.Keyword, QColor(255, 0, 0))
scheme.setColor(QgsCodeEditorColorScheme.ColorRole.Method, QColor(0, 255, 0))
self.assertEqual(scheme.color(QgsCodeEditorColorScheme.ColorRole.Keyword).name(), '#ff0000')
self.assertEqual(scheme.color(QgsCodeEditorColorScheme.ColorRole.Method).name(), '#00ff00')
def testSchemeRegistry(self):
default_reg = QgsGui.codeEditorColorSchemeRegistry()
self.assertGreaterEqual(len(default_reg.schemes()), 3)
registry = QgsCodeEditorColorSchemeRegistry()
self.assertCountEqual(registry.schemes(), ['default', 'solarized', 'solarized_dark'])
self.assertEqual(registry.scheme('solarized').name(), 'Solarized (Light)')
self.assertEqual(registry.scheme('solarized_dark').name(), 'Solarized (Dark)')
# duplicate name
scheme = QgsCodeEditorColorScheme('solarized', 'my name')
self.assertFalse(registry.addColorScheme(scheme))
# unique name
scheme = QgsCodeEditorColorScheme('xxxx', 'my name')
self.assertTrue(registry.addColorScheme(scheme))
self.assertCountEqual(registry.schemes(), ['default', 'solarized', 'solarized_dark', 'xxxx'])
self.assertEqual(registry.scheme('xxxx').name(), 'my name')
self.assertFalse(registry.removeColorScheme('yyyy'))
self.assertCountEqual(registry.schemes(), ['default', 'solarized', 'solarized_dark', 'xxxx'])
self.assertTrue(registry.removeColorScheme('xxxx'))
self.assertCountEqual(registry.schemes(), ['default', 'solarized', 'solarized_dark'])
# should return default registry if matching one doesn't exist
self.assertEqual(registry.scheme('xxxx').name(), 'Default')
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
|
veger/ansible
|
lib/ansible/modules/network/f5/bigip_monitor_tcp_half_open.py
|
3
|
20328
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_monitor_tcp_half_open
short_description: Manages F5 BIG-IP LTM tcp half-open monitors
description: Manages F5 BIG-IP LTM tcp half-open monitors.
version_added: 2.4
options:
name:
description:
- Monitor name.
required: True
parent:
description:
- The parent template of this monitor template. Once this value has
been set, it cannot be changed. By default, this value is the C(tcp_half_open)
parent on the C(Common) partition.
default: /Common/tcp_half_open
description:
description:
- The description of the monitor.
version_added: 2.7
ip:
description:
- IP address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'.
- If this value is an IP address, and the C(type) is C(tcp) (the default),
then a C(port) number must be specified.
port:
description:
- Port address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'. Note that if specifying an IP address, a value between 1 and 65535
must be specified
version_added: 2.5
interval:
description:
- The interval specifying how frequently the monitor instance of this
template will run. If this parameter is not provided when creating
a new monitor, then the default value will be 5. This value B(must)
be less than the C(timeout) value.
timeout:
description:
- The number of seconds in which the node or service must respond to
the monitor request. If the target responds within the set time
period, it is considered up. If the target does not respond within
the set time period, it is considered down. You can change this
number to any number you want, however, it should be 3 times the
interval number of seconds plus 1 second. If this parameter is not
provided when creating a new monitor, then the default value will be 16.
time_until_up:
description:
- Specifies the amount of time in seconds after the first successful
response before a node will be marked up. A value of 0 will cause a
node to be marked up immediately after a valid response is received
from the node. If this parameter is not provided when creating
a new monitor, then the default value will be 0.
partition:
description:
- Device partition to manage resources on.
default: Common
version_added: 2.5
state:
description:
- When C(present), ensures that the monitor exists.
- When C(absent), ensures the monitor is removed.
default: present
choices:
- present
- absent
version_added: 2.5
notes:
- Requires BIG-IP software version >= 12
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create TCP half-open Monitor
bigip_monitor_tcp_half_open:
state: present
ip: 10.10.10.10
name: my_tcp_monitor
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Remove TCP half-open Monitor
bigip_monitor_tcp_half_open:
state: absent
name: my_tcp_monitor
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Add half-open monitor for all addresses, port 514
bigip_monitor_tcp_half_open:
port: 514
name: my_tcp_monitor
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
parent:
description: New parent template of the monitor.
returned: changed
type: string
sample: tcp
description:
description: The description of the monitor.
returned: changed
type: str
sample: Important Monitor
ip:
description: The new IP of IP/port definition.
returned: changed
type: string
sample: 10.12.13.14
interval:
description: The new interval in which to run the monitor check.
returned: changed
type: int
sample: 2
timeout:
description: The new timeout in which the remote system must respond to the monitor.
returned: changed
type: int
sample: 10
time_until_up:
description: The new time in which to mark a system as up after first successful response.
returned: changed
type: int
sample: 2
'''
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.ipaddress import is_valid_ip
from library.module_utils.network.f5.compare import cmp_str_with_none
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
from ansible.module_utils.network.f5.compare import cmp_str_with_none
class Parameters(AnsibleF5Parameters):
api_map = {
'timeUntilUp': 'time_until_up',
'defaultsFrom': 'parent',
'recv': 'receive',
}
api_attributes = [
'timeUntilUp',
'defaultsFrom',
'interval',
'timeout',
'destination',
'description',
]
returnables = [
'parent',
'ip',
'port',
'interval',
'timeout',
'time_until_up',
'description',
]
updatables = [
'destination',
'interval',
'timeout',
'time_until_up',
'description',
]
@property
def destination(self):
if self.ip is None and self.port is None:
return None
destination = '{0}:{1}'.format(self.ip, self.port)
return destination
@destination.setter
def destination(self, value):
ip, port = value.split(':')
self._values['ip'] = ip
self._values['port'] = port
@property
def interval(self):
if self._values['interval'] is None:
return None
# Per BZ617284, the BIG-IP UI does not raise a warning about this.
# So I raise the error instead.
if 1 > int(self._values['interval']) > 86400:
raise F5ModuleError(
"Interval value must be between 1 and 86400"
)
return int(self._values['interval'])
@property
def timeout(self):
if self._values['timeout'] is None:
return None
return int(self._values['timeout'])
@property
def ip(self):
if self._values['ip'] is None:
return None
elif self._values['ip'] in ['*', '0.0.0.0']:
return '*'
elif is_valid_ip(self._values['ip']):
return self._values['ip']
raise F5ModuleError(
"The provided 'ip' parameter is not an IP address."
)
@property
def port(self):
if self._values['port'] is None:
return None
elif self._values['port'] == '*':
return '*'
return int(self._values['port'])
@property
def time_until_up(self):
if self._values['time_until_up'] is None:
return None
return int(self._values['time_until_up'])
@property
def parent(self):
if self._values['parent'] is None:
return None
if self._values['parent'].startswith('/'):
parent = os.path.basename(self._values['parent'])
result = '/{0}/{1}'.format(self.partition, parent)
else:
result = '/{0}/{1}'.format(self.partition, self._values['parent'])
return result
@property
def type(self):
return 'tcp_half_open'
class ApiParameters(Parameters):
@property
def description(self):
if self._values['description'] in [None, 'none']:
return None
return self._values['description']
class ModuleParameters(Parameters):
@property
def description(self):
if self._values['description'] is None:
return None
elif self._values['description'] in ['none', '']:
return ''
return self._values['description']
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
result = self.__default(param)
return result
@property
def parent(self):
if self.want.parent != self.have.parent:
raise F5ModuleError(
"The parent monitor cannot be changed"
)
@property
def destination(self):
if self.want.ip is None and self.want.port is None:
return None
if self.want.port is None:
self.want.update({'port': self.have.port})
if self.want.ip is None:
self.want.update({'ip': self.have.ip})
if self.want.port in [None, '*'] and self.want.ip != '*':
raise F5ModuleError(
"Specifying an IP address requires that a port number be specified"
)
if self.want.destination != self.have.destination:
return self.want.destination
@property
def interval(self):
if self.want.timeout is not None and self.want.interval is not None:
if self.want.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.timeout is not None:
if self.have.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.interval is not None:
if self.want.interval >= self.have.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
if self.want.interval != self.have.interval:
return self.want.interval
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def description(self):
return cmp_str_with_none(self.want.description, self.have.description)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/tcp-half-open/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
self._set_default_creation_values()
if self.module.check_mode:
return True
self.create_on_device()
return True
def _set_default_creation_values(self):
if self.want.timeout is None:
self.want.update({'timeout': 16})
if self.want.interval is None:
self.want.update({'interval': 5})
if self.want.time_until_up is None:
self.want.update({'time_until_up': 0})
if self.want.ip is None:
self.want.update({'ip': '*'})
if self.want.port is None:
self.want.update({'port': '*'})
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/tcp-half-open/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/tcp-half-open/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/tcp-half-open/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/tcp-half-open/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(default='/Common/tcp_half_open'),
description=dict(),
ip=dict(),
port=dict(),
interval=dict(type='int'),
timeout=dict(type='int'),
time_until_up=dict(type='int'),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
|
gpl-3.0
|
openstack/sahara
|
sahara/utils/proxy.py
|
1
|
11491
|
# Copyright (c) 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
import six
from sahara import conductor as c
from sahara import context
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.service.castellan import utils as key_manager
from sahara.service.edp import job_utils
from sahara.service import trusts as t
from sahara.swift import utils as su
from sahara.utils.openstack import base as b
from sahara.utils.openstack import keystone as k
PROXY_DOMAIN = None
conductor = c.API
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
opts = [
cfg.BoolOpt('use_domain_for_proxy_users',
default=False,
help='Enables Sahara to use a domain for creating temporary '
'proxy users to access Swift. If this is enabled '
'a domain must be created for Sahara to use.'),
cfg.StrOpt('proxy_user_domain_name',
default=None,
help='The domain Sahara will use to create new proxy users '
'for Swift object access.'),
cfg.ListOpt('proxy_user_role_names',
default=['member'],
help='A list of the role names that the proxy user should '
'assume through trust for Swift object access.')
]
CONF.register_opts(opts)
def create_proxy_user_for_job_execution(job_execution):
'''Creates a proxy user and adds the credentials to the job execution
:param job_execution: The job execution model to update
'''
username = 'job_{0}'.format(job_execution.id)
password = key_manager.store_secret(proxy_user_create(username))
current_user = k.auth()
proxy_user = k.auth_for_proxy(username, password)
trust_id = t.create_trust(trustor=current_user,
trustee=proxy_user,
role_names=CONF.proxy_user_role_names)
update = {'job_configs': job_execution.job_configs.to_dict()}
update['job_configs']['proxy_configs'] = {
'proxy_username': username,
'proxy_password': password,
'proxy_trust_id': trust_id
}
conductor.job_execution_update(context.ctx(), job_execution, update)
def delete_proxy_user_for_job_execution(job_execution):
'''Delete a proxy user based on a JobExecution
:param job_execution: The job execution with proxy user information
:returns: An updated job_configs dictionary or None
'''
proxy_configs = job_execution.job_configs.get('proxy_configs')
if proxy_configs is not None:
proxy_username = proxy_configs.get('proxy_username')
proxy_trust_id = proxy_configs.get('proxy_trust_id')
proxy_user = k.auth_for_proxy(proxy_username,
key_manager.get_secret(
proxy_configs.get('proxy_password')),
proxy_trust_id)
t.delete_trust(proxy_user, proxy_trust_id)
proxy_user_delete(proxy_username)
key_manager.delete_secret(proxy_configs.get('proxy_password'))
update = job_execution.job_configs.to_dict()
del update['proxy_configs']
return update
return None
def create_proxy_user_for_cluster(cluster):
'''Creates a proxy user and adds the credentials to the cluster
:param cluster: The cluster model to update
'''
if cluster.cluster_configs.get('proxy_configs'):
return cluster
username = 'cluster_{0}'.format(cluster.id)
password = key_manager.store_secret(proxy_user_create(username))
current_user = k.auth()
proxy_user = k.auth_for_proxy(username, password)
trust_id = t.create_trust(trustor=current_user,
trustee=proxy_user,
role_names=CONF.proxy_user_role_names)
update = {'cluster_configs': cluster.cluster_configs.to_dict()}
update['cluster_configs']['proxy_configs'] = {
'proxy_username': username,
'proxy_password': password,
'proxy_trust_id': trust_id
}
return conductor.cluster_update(context.ctx(), cluster, update)
def delete_proxy_user_for_cluster(cluster):
'''Delete a proxy user based on a Cluster
:param cluster: The cluster model with proxy user information
'''
proxy_configs = cluster.cluster_configs.get('proxy_configs')
if proxy_configs is not None:
proxy_username = proxy_configs.get('proxy_username')
proxy_trust_id = proxy_configs.get('proxy_trust_id')
proxy_user = k.auth_for_proxy(proxy_username,
key_manager.get_secret(
proxy_configs.get('proxy_password')),
proxy_trust_id)
t.delete_trust(proxy_user, proxy_trust_id)
proxy_user_delete(proxy_username)
key_manager.delete_secret(proxy_configs.get('proxy_password'))
update = {'cluster_configs': cluster.cluster_configs.to_dict()}
del update['cluster_configs']['proxy_configs']
conductor.cluster_update(context.ctx(), cluster, update)
def domain_for_proxy():
'''Return the proxy domain or None
If configured to use the proxy domain, this function will return that
domain. If not configured to use the proxy domain, this function will
return None. If the proxy domain can't be found this will raise an
exception.
:returns: A Keystone Domain object or None.
:raises ConfigurationError: If the domain is requested but not specified.
:raises NotFoundException: If the domain name is specified but cannot be
found.
'''
if CONF.use_domain_for_proxy_users is False:
return None
if CONF.proxy_user_domain_name is None:
raise ex.ConfigurationError(_('Proxy domain requested but not '
'specified.'))
admin = k.client_for_admin()
global PROXY_DOMAIN
if not PROXY_DOMAIN:
domain_list = b.execute_with_retries(
admin.domains.list, name=CONF.proxy_user_domain_name)
if len(domain_list) == 0:
raise ex.NotFoundException(
value=CONF.proxy_user_domain_name,
message_template=_('Failed to find domain %s'))
# the domain name should be globally unique in Keystone
if len(domain_list) > 1:
raise ex.NotFoundException(
value=CONF.proxy_user_domain_name,
message_template=_('Unexpected results found when searching '
'for domain %s'))
PROXY_DOMAIN = domain_list[0]
return PROXY_DOMAIN
def job_execution_requires_proxy_user(job_execution):
'''Returns True if the job execution requires a proxy user.'''
def _check_values(values):
return any(value.startswith(
su.SWIFT_INTERNAL_PREFIX) for value in values if (
isinstance(value, six.string_types)))
if CONF.use_domain_for_proxy_users is False:
return False
paths = [conductor.data_source_get(context.ctx(), job_execution.output_id),
conductor.data_source_get(context.ctx(), job_execution.input_id)]
if _check_values(ds.url for ds in paths if ds):
return True
if _check_values(six.itervalues(
job_execution.job_configs.get('configs', {}))):
return True
if _check_values(six.itervalues(
job_execution.job_configs.get('params', {}))):
return True
if _check_values(job_execution.job_configs.get('args', [])):
return True
job = conductor.job_get(context.ctx(), job_execution.job_id)
if _check_values(main.url for main in job.mains):
return True
if _check_values(lib.url for lib in job.libs):
return True
# We did the simple checks, now if data_source referencing is
# enabled and we have values that could be a name or uuid,
# query for data_sources that match and contain a swift path
by_name, by_uuid = job_utils.may_contain_data_source_refs(
job_execution.job_configs)
if by_name:
names = tuple(job_utils.find_possible_data_source_refs_by_name(
job_execution.job_configs))
# do a query here for name in names and path starts with swift-prefix
if names and conductor.data_source_count(
context.ctx(),
name=names,
url=su.SWIFT_INTERNAL_PREFIX+'%') > 0:
return True
if by_uuid:
uuids = tuple(job_utils.find_possible_data_source_refs_by_uuid(
job_execution.job_configs))
# do a query here for id in uuids and path starts with swift-prefix
if uuids and conductor.data_source_count(
context.ctx(),
id=uuids,
url=su.SWIFT_INTERNAL_PREFIX+'%') > 0:
return True
return False
def proxy_domain_users_list():
'''Return a list of all users in the proxy domain.'''
admin = k.client_for_admin()
domain = domain_for_proxy()
if domain:
return b.execute_with_retries(admin.users.list, domain=domain.id)
return []
def proxy_user_create(username):
'''Create a new user in the proxy domain
Creates the username specified with a random password.
:param username: The name of the new user.
:returns: The password created for the user.
'''
admin = k.client_for_admin()
domain = domain_for_proxy()
password = uuidutils.generate_uuid()
b.execute_with_retries(
admin.users.create, name=username, password=password, domain=domain.id)
LOG.debug('Created proxy user {username}'.format(username=username))
return password
def proxy_user_delete(username=None, user_id=None):
'''Delete the user from the proxy domain.
:param username: The name of the user to delete.
:param user_id: The id of the user to delete, if provided this overrides
the username.
:raises NotFoundException: If there is an error locating the user in the
proxy domain.
'''
admin = k.client_for_admin()
if not user_id:
domain = domain_for_proxy()
user_list = b.execute_with_retries(
admin.users.list, domain=domain.id, name=username)
if len(user_list) == 0:
raise ex.NotFoundException(
value=username,
message_template=_('Failed to find user %s'))
if len(user_list) > 1:
raise ex.NotFoundException(
value=username,
message_template=_('Unexpected results found when searching '
'for user %s'))
user_id = user_list[0].id
b.execute_with_retries(admin.users.delete, user_id)
LOG.debug('Deleted proxy user id {user_id}'.format(user_id=user_id))
|
apache-2.0
|
williamfeng323/py-web
|
flask/lib/python3.6/site-packages/pip/req/req_install.py
|
335
|
46487
|
from __future__ import absolute_import
import logging
import os
import re
import shutil
import sys
import tempfile
import traceback
import warnings
import zipfile
from distutils import sysconfig
from distutils.util import change_root
from email.parser import FeedParser
from pip._vendor import pkg_resources, six
from pip._vendor.packaging import specifiers
from pip._vendor.packaging.markers import Marker
from pip._vendor.packaging.requirements import InvalidRequirement, Requirement
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.packaging.version import Version, parse as parse_version
from pip._vendor.six.moves import configparser
import pip.wheel
from pip.compat import native_str, get_stdlib, WINDOWS
from pip.download import is_url, url_to_path, path_to_url, is_archive_file
from pip.exceptions import (
InstallationError, UninstallationError,
)
from pip.locations import (
bin_py, running_under_virtualenv, PIP_DELETE_MARKER_FILENAME, bin_user,
)
from pip.utils import (
display_path, rmtree, ask_path_exists, backup_dir, is_installable_dir,
dist_in_usersite, dist_in_site_packages, egg_link_path,
call_subprocess, read_text_file, FakeFile, _make_build_dir, ensure_dir,
get_installed_version, normalize_path, dist_is_local,
)
from pip.utils.hashes import Hashes
from pip.utils.deprecation import RemovedInPip10Warning
from pip.utils.logging import indent_log
from pip.utils.setuptools_build import SETUPTOOLS_SHIM
from pip.utils.ui import open_spinner
from pip.req.req_uninstall import UninstallPathSet
from pip.vcs import vcs
from pip.wheel import move_wheel_files, Wheel
logger = logging.getLogger(__name__)
operators = specifiers.Specifier._operators.keys()
def _strip_extras(path):
m = re.match(r'^(.+)(\[[^\]]+\])$', path)
extras = None
if m:
path_no_extras = m.group(1)
extras = m.group(2)
else:
path_no_extras = path
return path_no_extras, extras
def _safe_extras(extras):
return set(pkg_resources.safe_extra(extra) for extra in extras)
class InstallRequirement(object):
def __init__(self, req, comes_from, source_dir=None, editable=False,
link=None, as_egg=False, update=True,
pycompile=True, markers=None, isolated=False, options=None,
wheel_cache=None, constraint=False):
self.extras = ()
if isinstance(req, six.string_types):
try:
req = Requirement(req)
except InvalidRequirement:
if os.path.sep in req:
add_msg = "It looks like a path. Does it exist ?"
elif '=' in req and not any(op in req for op in operators):
add_msg = "= is not a valid operator. Did you mean == ?"
else:
add_msg = traceback.format_exc()
raise InstallationError(
"Invalid requirement: '%s'\n%s" % (req, add_msg))
self.extras = _safe_extras(req.extras)
self.req = req
self.comes_from = comes_from
self.constraint = constraint
self.source_dir = source_dir
self.editable = editable
self._wheel_cache = wheel_cache
self.link = self.original_link = link
self.as_egg = as_egg
if markers is not None:
self.markers = markers
else:
self.markers = req and req.marker
self._egg_info_path = None
# This holds the pkg_resources.Distribution object if this requirement
# is already available:
self.satisfied_by = None
# This hold the pkg_resources.Distribution object if this requirement
# conflicts with another installed distribution:
self.conflicts_with = None
# Temporary build location
self._temp_build_dir = None
# Used to store the global directory where the _temp_build_dir should
# have been created. Cf _correct_build_location method.
self._ideal_build_dir = None
# True if the editable should be updated:
self.update = update
# Set to True after successful installation
self.install_succeeded = None
# UninstallPathSet of uninstalled distribution (for possible rollback)
self.uninstalled = None
# Set True if a legitimate do-nothing-on-uninstall has happened - e.g.
# system site packages, stdlib packages.
self.nothing_to_uninstall = False
self.use_user_site = False
self.target_dir = None
self.options = options if options else {}
self.pycompile = pycompile
# Set to True after successful preparation of this requirement
self.prepared = False
self.isolated = isolated
@classmethod
def from_editable(cls, editable_req, comes_from=None, default_vcs=None,
isolated=False, options=None, wheel_cache=None,
constraint=False):
from pip.index import Link
name, url, extras_override = parse_editable(
editable_req, default_vcs)
if url.startswith('file:'):
source_dir = url_to_path(url)
else:
source_dir = None
res = cls(name, comes_from, source_dir=source_dir,
editable=True,
link=Link(url),
constraint=constraint,
isolated=isolated,
options=options if options else {},
wheel_cache=wheel_cache)
if extras_override is not None:
res.extras = _safe_extras(extras_override)
return res
@classmethod
def from_line(
cls, name, comes_from=None, isolated=False, options=None,
wheel_cache=None, constraint=False):
"""Creates an InstallRequirement from a name, which might be a
requirement, directory containing 'setup.py', filename, or URL.
"""
from pip.index import Link
if is_url(name):
marker_sep = '; '
else:
marker_sep = ';'
if marker_sep in name:
name, markers = name.split(marker_sep, 1)
markers = markers.strip()
if not markers:
markers = None
else:
markers = Marker(markers)
else:
markers = None
name = name.strip()
req = None
path = os.path.normpath(os.path.abspath(name))
link = None
extras = None
if is_url(name):
link = Link(name)
else:
p, extras = _strip_extras(path)
if (os.path.isdir(p) and
(os.path.sep in name or name.startswith('.'))):
if not is_installable_dir(p):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' "
"not found." % name
)
link = Link(path_to_url(p))
elif is_archive_file(p):
if not os.path.isfile(p):
logger.warning(
'Requirement %r looks like a filename, but the '
'file does not exist',
name
)
link = Link(path_to_url(p))
# it's a local file, dir, or url
if link:
# Handle relative file URLs
if link.scheme == 'file' and re.search(r'\.\./', link.url):
link = Link(
path_to_url(os.path.normpath(os.path.abspath(link.path))))
# wheel file
if link.is_wheel:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
req = "%s==%s" % (wheel.name, wheel.version)
else:
# set the req to the egg fragment. when it's not there, this
# will become an 'unnamed' requirement
req = link.egg_fragment
# a requirement specifier
else:
req = name
options = options if options else {}
res = cls(req, comes_from, link=link, markers=markers,
isolated=isolated, options=options,
wheel_cache=wheel_cache, constraint=constraint)
if extras:
res.extras = _safe_extras(
Requirement('placeholder' + extras).extras)
return res
def __str__(self):
if self.req:
s = str(self.req)
if self.link:
s += ' from %s' % self.link.url
else:
s = self.link.url if self.link else None
if self.satisfied_by is not None:
s += ' in %s' % display_path(self.satisfied_by.location)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += ' (from %s)' % comes_from
return s
def __repr__(self):
return '<%s object: %s editable=%r>' % (
self.__class__.__name__, str(self), self.editable)
def populate_link(self, finder, upgrade, require_hashes):
"""Ensure that if a link can be found for this, that it is found.
Note that self.link may still be None - if Upgrade is False and the
requirement is already installed.
If require_hashes is True, don't use the wheel cache, because cached
wheels, always built locally, have different hashes than the files
downloaded from the index server and thus throw false hash mismatches.
Furthermore, cached wheels at present have undeterministic contents due
to file modification times.
"""
if self.link is None:
self.link = finder.find_requirement(self, upgrade)
if self._wheel_cache is not None and not require_hashes:
old_link = self.link
self.link = self._wheel_cache.cached_wheel(self.link, self.name)
if old_link != self.link:
logger.debug('Using cached wheel link: %s', self.link)
@property
def specifier(self):
return self.req.specifier
@property
def is_pinned(self):
"""Return whether I am pinned to an exact version.
For example, some-package==1.2 is pinned; some-package>1.2 is not.
"""
specifiers = self.specifier
return (len(specifiers) == 1 and
next(iter(specifiers)).operator in ('==', '==='))
def from_path(self):
if self.req is None:
return None
s = str(self.req)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += '->' + comes_from
return s
def build_location(self, build_dir):
if self._temp_build_dir is not None:
return self._temp_build_dir
if self.req is None:
# for requirement via a path to a directory: the name of the
# package is not available yet so we create a temp directory
# Once run_egg_info will have run, we'll be able
# to fix it via _correct_build_location
# Some systems have /tmp as a symlink which confuses custom
# builds (such as numpy). Thus, we ensure that the real path
# is returned.
self._temp_build_dir = os.path.realpath(
tempfile.mkdtemp('-build', 'pip-')
)
self._ideal_build_dir = build_dir
return self._temp_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
# FIXME: Is there a better place to create the build_dir? (hg and bzr
# need this)
if not os.path.exists(build_dir):
logger.debug('Creating directory %s', build_dir)
_make_build_dir(build_dir)
return os.path.join(build_dir, name)
def _correct_build_location(self):
"""Move self._temp_build_dir to self._ideal_build_dir/self.req.name
For some requirements (e.g. a path to a directory), the name of the
package is not available until we run egg_info, so the build_location
will return a temporary directory and store the _ideal_build_dir.
This is only called by self.egg_info_path to fix the temporary build
directory.
"""
if self.source_dir is not None:
return
assert self.req is not None
assert self._temp_build_dir
assert self._ideal_build_dir
old_location = self._temp_build_dir
self._temp_build_dir = None
new_location = self.build_location(self._ideal_build_dir)
if os.path.exists(new_location):
raise InstallationError(
'A package already exists in %s; please remove it to continue'
% display_path(new_location))
logger.debug(
'Moving package %s from %s to new location %s',
self, display_path(old_location), display_path(new_location),
)
shutil.move(old_location, new_location)
self._temp_build_dir = new_location
self._ideal_build_dir = None
self.source_dir = new_location
self._egg_info_path = None
@property
def name(self):
if self.req is None:
return None
return native_str(pkg_resources.safe_name(self.req.name))
@property
def setup_py_dir(self):
return os.path.join(
self.source_dir,
self.link and self.link.subdirectory_fragment or '')
@property
def setup_py(self):
assert self.source_dir, "No source dir for %s" % self
try:
import setuptools # noqa
except ImportError:
if get_installed_version('setuptools') is None:
add_msg = "Please install setuptools."
else:
add_msg = traceback.format_exc()
# Setuptools is not available
raise InstallationError(
"Could not import setuptools which is required to "
"install from a source distribution.\n%s" % add_msg
)
setup_py = os.path.join(self.setup_py_dir, 'setup.py')
# Python2 __file__ should not be unicode
if six.PY2 and isinstance(setup_py, six.text_type):
setup_py = setup_py.encode(sys.getfilesystemencoding())
return setup_py
def run_egg_info(self):
assert self.source_dir
if self.name:
logger.debug(
'Running setup.py (path:%s) egg_info for package %s',
self.setup_py, self.name,
)
else:
logger.debug(
'Running setup.py (path:%s) egg_info for package from %s',
self.setup_py, self.link,
)
with indent_log():
script = SETUPTOOLS_SHIM % self.setup_py
base_cmd = [sys.executable, '-c', script]
if self.isolated:
base_cmd += ["--no-user-cfg"]
egg_info_cmd = base_cmd + ['egg_info']
# We can't put the .egg-info files at the root, because then the
# source code will be mistaken for an installed egg, causing
# problems
if self.editable:
egg_base_option = []
else:
egg_info_dir = os.path.join(self.setup_py_dir, 'pip-egg-info')
ensure_dir(egg_info_dir)
egg_base_option = ['--egg-base', 'pip-egg-info']
call_subprocess(
egg_info_cmd + egg_base_option,
cwd=self.setup_py_dir,
show_stdout=False,
command_desc='python setup.py egg_info')
if not self.req:
if isinstance(parse_version(self.pkg_info()["Version"]), Version):
op = "=="
else:
op = "==="
self.req = Requirement(
"".join([
self.pkg_info()["Name"],
op,
self.pkg_info()["Version"],
])
)
self._correct_build_location()
else:
metadata_name = canonicalize_name(self.pkg_info()["Name"])
if canonicalize_name(self.req.name) != metadata_name:
logger.warning(
'Running setup.py (path:%s) egg_info for package %s '
'produced metadata for project name %s. Fix your '
'#egg=%s fragments.',
self.setup_py, self.name, metadata_name, self.name
)
self.req = Requirement(metadata_name)
def egg_info_data(self, filename):
if self.satisfied_by is not None:
if not self.satisfied_by.has_metadata(filename):
return None
return self.satisfied_by.get_metadata(filename)
assert self.source_dir
filename = self.egg_info_path(filename)
if not os.path.exists(filename):
return None
data = read_text_file(filename)
return data
def egg_info_path(self, filename):
if self._egg_info_path is None:
if self.editable:
base = self.source_dir
else:
base = os.path.join(self.setup_py_dir, 'pip-egg-info')
filenames = os.listdir(base)
if self.editable:
filenames = []
for root, dirs, files in os.walk(base):
for dir in vcs.dirnames:
if dir in dirs:
dirs.remove(dir)
# Iterate over a copy of ``dirs``, since mutating
# a list while iterating over it can cause trouble.
# (See https://github.com/pypa/pip/pull/462.)
for dir in list(dirs):
# Don't search in anything that looks like a virtualenv
# environment
if (
os.path.lexists(
os.path.join(root, dir, 'bin', 'python')
) or
os.path.exists(
os.path.join(
root, dir, 'Scripts', 'Python.exe'
)
)):
dirs.remove(dir)
# Also don't search through tests
elif dir == 'test' or dir == 'tests':
dirs.remove(dir)
filenames.extend([os.path.join(root, dir)
for dir in dirs])
filenames = [f for f in filenames if f.endswith('.egg-info')]
if not filenames:
raise InstallationError(
'No files/directories in %s (from %s)' % (base, filename)
)
assert filenames, \
"No files/directories in %s (from %s)" % (base, filename)
# if we have more than one match, we pick the toplevel one. This
# can easily be the case if there is a dist folder which contains
# an extracted tarball for testing purposes.
if len(filenames) > 1:
filenames.sort(
key=lambda x: x.count(os.path.sep) +
(os.path.altsep and x.count(os.path.altsep) or 0)
)
self._egg_info_path = os.path.join(base, filenames[0])
return os.path.join(self._egg_info_path, filename)
def pkg_info(self):
p = FeedParser()
data = self.egg_info_data('PKG-INFO')
if not data:
logger.warning(
'No PKG-INFO file found in %s',
display_path(self.egg_info_path('PKG-INFO')),
)
p.feed(data or '')
return p.close()
_requirements_section_re = re.compile(r'\[(.*?)\]')
@property
def installed_version(self):
return get_installed_version(self.name)
def assert_source_matches_version(self):
assert self.source_dir
version = self.pkg_info()['version']
if self.req.specifier and version not in self.req.specifier:
logger.warning(
'Requested %s, but installing version %s',
self,
self.installed_version,
)
else:
logger.debug(
'Source in %s has version %s, which satisfies requirement %s',
display_path(self.source_dir),
version,
self,
)
def update_editable(self, obtain=True):
if not self.link:
logger.debug(
"Cannot update repository at %s; repository location is "
"unknown",
self.source_dir,
)
return
assert self.editable
assert self.source_dir
if self.link.scheme == 'file':
# Static paths don't get updated
return
assert '+' in self.link.url, "bad url: %r" % self.link.url
if not self.update:
return
vc_type, url = self.link.url.split('+', 1)
backend = vcs.get_backend(vc_type)
if backend:
vcs_backend = backend(self.link.url)
if obtain:
vcs_backend.obtain(self.source_dir)
else:
vcs_backend.export(self.source_dir)
else:
assert 0, (
'Unexpected version control type (in %s): %s'
% (self.link, vc_type))
def uninstall(self, auto_confirm=False):
"""
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
"""
if not self.check_if_exists():
raise UninstallationError(
"Cannot uninstall requirement %s, not installed" % (self.name,)
)
dist = self.satisfied_by or self.conflicts_with
dist_path = normalize_path(dist.location)
if not dist_is_local(dist):
logger.info(
"Not uninstalling %s at %s, outside environment %s",
dist.key,
dist_path,
sys.prefix,
)
self.nothing_to_uninstall = True
return
if dist_path in get_stdlib():
logger.info(
"Not uninstalling %s at %s, as it is in the standard library.",
dist.key,
dist_path,
)
self.nothing_to_uninstall = True
return
paths_to_remove = UninstallPathSet(dist)
develop_egg_link = egg_link_path(dist)
develop_egg_link_egg_info = '{0}.egg-info'.format(
pkg_resources.to_filename(dist.project_name))
egg_info_exists = dist.egg_info and os.path.exists(dist.egg_info)
# Special case for distutils installed package
distutils_egg_info = getattr(dist._provider, 'path', None)
# Uninstall cases order do matter as in the case of 2 installs of the
# same package, pip needs to uninstall the currently detected version
if (egg_info_exists and dist.egg_info.endswith('.egg-info') and
not dist.egg_info.endswith(develop_egg_link_egg_info)):
# if dist.egg_info.endswith(develop_egg_link_egg_info), we
# are in fact in the develop_egg_link case
paths_to_remove.add(dist.egg_info)
if dist.has_metadata('installed-files.txt'):
for installed_file in dist.get_metadata(
'installed-files.txt').splitlines():
path = os.path.normpath(
os.path.join(dist.egg_info, installed_file)
)
paths_to_remove.add(path)
# FIXME: need a test for this elif block
# occurs with --single-version-externally-managed/--record outside
# of pip
elif dist.has_metadata('top_level.txt'):
if dist.has_metadata('namespace_packages.txt'):
namespaces = dist.get_metadata('namespace_packages.txt')
else:
namespaces = []
for top_level_pkg in [
p for p
in dist.get_metadata('top_level.txt').splitlines()
if p and p not in namespaces]:
path = os.path.join(dist.location, top_level_pkg)
paths_to_remove.add(path)
paths_to_remove.add(path + '.py')
paths_to_remove.add(path + '.pyc')
paths_to_remove.add(path + '.pyo')
elif distutils_egg_info:
warnings.warn(
"Uninstalling a distutils installed project ({0}) has been "
"deprecated and will be removed in a future version. This is "
"due to the fact that uninstalling a distutils project will "
"only partially uninstall the project.".format(self.name),
RemovedInPip10Warning,
)
paths_to_remove.add(distutils_egg_info)
elif dist.location.endswith('.egg'):
# package installed by easy_install
# We cannot match on dist.egg_name because it can slightly vary
# i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg
paths_to_remove.add(dist.location)
easy_install_egg = os.path.split(dist.location)[1]
easy_install_pth = os.path.join(os.path.dirname(dist.location),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg)
elif egg_info_exists and dist.egg_info.endswith('.dist-info'):
for path in pip.wheel.uninstallation_paths(dist):
paths_to_remove.add(path)
elif develop_egg_link:
# develop egg
with open(develop_egg_link, 'r') as fh:
link_pointer = os.path.normcase(fh.readline().strip())
assert (link_pointer == dist.location), (
'Egg-link %s does not match installed location of %s '
'(at %s)' % (link_pointer, self.name, dist.location)
)
paths_to_remove.add(develop_egg_link)
easy_install_pth = os.path.join(os.path.dirname(develop_egg_link),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, dist.location)
else:
logger.debug(
'Not sure how to uninstall: %s - Check: %s',
dist, dist.location)
# find distutils scripts= scripts
if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'):
for script in dist.metadata_listdir('scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, script))
if WINDOWS:
paths_to_remove.add(os.path.join(bin_dir, script) + '.bat')
# find console_scripts
if dist.has_metadata('entry_points.txt'):
if six.PY2:
options = {}
else:
options = {"delimiters": ('=', )}
config = configparser.SafeConfigParser(**options)
config.readfp(
FakeFile(dist.get_metadata_lines('entry_points.txt'))
)
if config.has_section('console_scripts'):
for name, value in config.items('console_scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, name))
if WINDOWS:
paths_to_remove.add(
os.path.join(bin_dir, name) + '.exe'
)
paths_to_remove.add(
os.path.join(bin_dir, name) + '.exe.manifest'
)
paths_to_remove.add(
os.path.join(bin_dir, name) + '-script.py'
)
paths_to_remove.remove(auto_confirm)
self.uninstalled = paths_to_remove
def rollback_uninstall(self):
if self.uninstalled:
self.uninstalled.rollback()
else:
logger.error(
"Can't rollback %s, nothing uninstalled.", self.name,
)
def commit_uninstall(self):
if self.uninstalled:
self.uninstalled.commit()
elif not self.nothing_to_uninstall:
logger.error(
"Can't commit %s, nothing uninstalled.", self.name,
)
def archive(self, build_dir):
assert self.source_dir
create_archive = True
archive_name = '%s-%s.zip' % (self.name, self.pkg_info()["version"])
archive_path = os.path.join(build_dir, archive_name)
if os.path.exists(archive_path):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup, (a)bort ' %
display_path(archive_path), ('i', 'w', 'b', 'a'))
if response == 'i':
create_archive = False
elif response == 'w':
logger.warning('Deleting %s', display_path(archive_path))
os.remove(archive_path)
elif response == 'b':
dest_file = backup_dir(archive_path)
logger.warning(
'Backing up %s to %s',
display_path(archive_path),
display_path(dest_file),
)
shutil.move(archive_path, dest_file)
elif response == 'a':
sys.exit(-1)
if create_archive:
zip = zipfile.ZipFile(
archive_path, 'w', zipfile.ZIP_DEFLATED,
allowZip64=True
)
dir = os.path.normcase(os.path.abspath(self.setup_py_dir))
for dirpath, dirnames, filenames in os.walk(dir):
if 'pip-egg-info' in dirnames:
dirnames.remove('pip-egg-info')
for dirname in dirnames:
dirname = os.path.join(dirpath, dirname)
name = self._clean_zip_name(dirname, dir)
zipdir = zipfile.ZipInfo(self.name + '/' + name + '/')
zipdir.external_attr = 0x1ED << 16 # 0o755
zip.writestr(zipdir, '')
for filename in filenames:
if filename == PIP_DELETE_MARKER_FILENAME:
continue
filename = os.path.join(dirpath, filename)
name = self._clean_zip_name(filename, dir)
zip.write(filename, self.name + '/' + name)
zip.close()
logger.info('Saved %s', display_path(archive_path))
def _clean_zip_name(self, name, prefix):
assert name.startswith(prefix + os.path.sep), (
"name %r doesn't start with prefix %r" % (name, prefix)
)
name = name[len(prefix) + 1:]
name = name.replace(os.path.sep, '/')
return name
def match_markers(self, extras_requested=None):
if not extras_requested:
# Provide an extra to safely evaluate the markers
# without matching any extra
extras_requested = ('',)
if self.markers is not None:
return any(
self.markers.evaluate({'extra': extra})
for extra in extras_requested)
else:
return True
def install(self, install_options, global_options=[], root=None,
prefix=None):
if self.editable:
self.install_editable(
install_options, global_options, prefix=prefix)
return
if self.is_wheel:
version = pip.wheel.wheel_version(self.source_dir)
pip.wheel.check_compatibility(version, self.name)
self.move_wheel_files(self.source_dir, root=root, prefix=prefix)
self.install_succeeded = True
return
# Extend the list of global and install options passed on to
# the setup.py call with the ones from the requirements file.
# Options specified in requirements file override those
# specified on the command line, since the last option given
# to setup.py is the one that is used.
global_options += self.options.get('global_options', [])
install_options += self.options.get('install_options', [])
if self.isolated:
global_options = list(global_options) + ["--no-user-cfg"]
temp_location = tempfile.mkdtemp('-record', 'pip-')
record_filename = os.path.join(temp_location, 'install-record.txt')
try:
install_args = self.get_install_args(
global_options, record_filename, root, prefix)
msg = 'Running setup.py install for %s' % (self.name,)
with open_spinner(msg) as spinner:
with indent_log():
call_subprocess(
install_args + install_options,
cwd=self.setup_py_dir,
show_stdout=False,
spinner=spinner,
)
if not os.path.exists(record_filename):
logger.debug('Record file %s not found', record_filename)
return
self.install_succeeded = True
if self.as_egg:
# there's no --always-unzip option we can pass to install
# command so we unable to save the installed-files.txt
return
def prepend_root(path):
if root is None or not os.path.isabs(path):
return path
else:
return change_root(root, path)
with open(record_filename) as f:
for line in f:
directory = os.path.dirname(line)
if directory.endswith('.egg-info'):
egg_info_dir = prepend_root(directory)
break
else:
logger.warning(
'Could not find .egg-info directory in install record'
' for %s',
self,
)
# FIXME: put the record somewhere
# FIXME: should this be an error?
return
new_lines = []
with open(record_filename) as f:
for line in f:
filename = line.strip()
if os.path.isdir(filename):
filename += os.path.sep
new_lines.append(
os.path.relpath(
prepend_root(filename), egg_info_dir)
)
inst_files_path = os.path.join(egg_info_dir, 'installed-files.txt')
with open(inst_files_path, 'w') as f:
f.write('\n'.join(new_lines) + '\n')
finally:
if os.path.exists(record_filename):
os.remove(record_filename)
rmtree(temp_location)
def ensure_has_source_dir(self, parent_dir):
"""Ensure that a source_dir is set.
This will create a temporary build dir if the name of the requirement
isn't known yet.
:param parent_dir: The ideal pip parent_dir for the source_dir.
Generally src_dir for editables and build_dir for sdists.
:return: self.source_dir
"""
if self.source_dir is None:
self.source_dir = self.build_location(parent_dir)
return self.source_dir
def get_install_args(self, global_options, record_filename, root, prefix):
install_args = [sys.executable, "-u"]
install_args.append('-c')
install_args.append(SETUPTOOLS_SHIM % self.setup_py)
install_args += list(global_options) + \
['install', '--record', record_filename]
if not self.as_egg:
install_args += ['--single-version-externally-managed']
if root is not None:
install_args += ['--root', root]
if prefix is not None:
install_args += ['--prefix', prefix]
if self.pycompile:
install_args += ["--compile"]
else:
install_args += ["--no-compile"]
if running_under_virtualenv():
py_ver_str = 'python' + sysconfig.get_python_version()
install_args += ['--install-headers',
os.path.join(sys.prefix, 'include', 'site',
py_ver_str, self.name)]
return install_args
def remove_temporary_source(self):
"""Remove the source files from this requirement, if they are marked
for deletion"""
if self.source_dir and os.path.exists(
os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)):
logger.debug('Removing source in %s', self.source_dir)
rmtree(self.source_dir)
self.source_dir = None
if self._temp_build_dir and os.path.exists(self._temp_build_dir):
rmtree(self._temp_build_dir)
self._temp_build_dir = None
def install_editable(self, install_options,
global_options=(), prefix=None):
logger.info('Running setup.py develop for %s', self.name)
if self.isolated:
global_options = list(global_options) + ["--no-user-cfg"]
if prefix:
prefix_param = ['--prefix={0}'.format(prefix)]
install_options = list(install_options) + prefix_param
with indent_log():
# FIXME: should we do --install-headers here too?
call_subprocess(
[
sys.executable,
'-c',
SETUPTOOLS_SHIM % self.setup_py
] +
list(global_options) +
['develop', '--no-deps'] +
list(install_options),
cwd=self.setup_py_dir,
show_stdout=False)
self.install_succeeded = True
def check_if_exists(self):
"""Find an installed distribution that satisfies or conflicts
with this requirement, and set self.satisfied_by or
self.conflicts_with appropriately.
"""
if self.req is None:
return False
try:
# get_distribution() will resolve the entire list of requirements
# anyway, and we've already determined that we need the requirement
# in question, so strip the marker so that we don't try to
# evaluate it.
no_marker = Requirement(str(self.req))
no_marker.marker = None
self.satisfied_by = pkg_resources.get_distribution(str(no_marker))
if self.editable and self.satisfied_by:
self.conflicts_with = self.satisfied_by
# when installing editables, nothing pre-existing should ever
# satisfy
self.satisfied_by = None
return True
except pkg_resources.DistributionNotFound:
return False
except pkg_resources.VersionConflict:
existing_dist = pkg_resources.get_distribution(
self.req.name
)
if self.use_user_site:
if dist_in_usersite(existing_dist):
self.conflicts_with = existing_dist
elif (running_under_virtualenv() and
dist_in_site_packages(existing_dist)):
raise InstallationError(
"Will not install to the user site because it will "
"lack sys.path precedence to %s in %s" %
(existing_dist.project_name, existing_dist.location)
)
else:
self.conflicts_with = existing_dist
return True
@property
def is_wheel(self):
return self.link and self.link.is_wheel
def move_wheel_files(self, wheeldir, root=None, prefix=None):
move_wheel_files(
self.name, self.req, wheeldir,
user=self.use_user_site,
home=self.target_dir,
root=root,
prefix=prefix,
pycompile=self.pycompile,
isolated=self.isolated,
)
def get_dist(self):
"""Return a pkg_resources.Distribution built from self.egg_info_path"""
egg_info = self.egg_info_path('').rstrip('/')
base_dir = os.path.dirname(egg_info)
metadata = pkg_resources.PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
return pkg_resources.Distribution(
os.path.dirname(egg_info),
project_name=dist_name,
metadata=metadata)
@property
def has_hash_options(self):
"""Return whether any known-good hashes are specified as options.
These activate --require-hashes mode; hashes specified as part of a
URL do not.
"""
return bool(self.options.get('hashes', {}))
def hashes(self, trust_internet=True):
"""Return a hash-comparer that considers my option- and URL-based
hashes to be known-good.
Hashes in URLs--ones embedded in the requirements file, not ones
downloaded from an index server--are almost peers with ones from
flags. They satisfy --require-hashes (whether it was implicitly or
explicitly activated) but do not activate it. md5 and sha224 are not
allowed in flags, which should nudge people toward good algos. We
always OR all hashes together, even ones from URLs.
:param trust_internet: Whether to trust URL-based (#md5=...) hashes
downloaded from the internet, as by populate_link()
"""
good_hashes = self.options.get('hashes', {}).copy()
link = self.link if trust_internet else self.original_link
if link and link.hash:
good_hashes.setdefault(link.hash_name, []).append(link.hash)
return Hashes(good_hashes)
def _strip_postfix(req):
"""
Strip req postfix ( -dev, 0.2, etc )
"""
# FIXME: use package_to_requirement?
match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req)
if match:
# Strip off -dev, -0.2, etc.
req = match.group(1)
return req
def parse_editable(editable_req, default_vcs=None):
"""Parses an editable requirement into:
- a requirement name
- an URL
- extras
- editable options
Accepted requirements:
svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir
.[some_extra]
"""
from pip.index import Link
url = editable_req
extras = None
# If a file path is specified with extras, strip off the extras.
m = re.match(r'^(.+)(\[[^\]]+\])$', url)
if m:
url_no_extras = m.group(1)
extras = m.group(2)
else:
url_no_extras = url
if os.path.isdir(url_no_extras):
if not os.path.exists(os.path.join(url_no_extras, 'setup.py')):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' not found." %
url_no_extras
)
# Treating it as code that has already been checked out
url_no_extras = path_to_url(url_no_extras)
if url_no_extras.lower().startswith('file:'):
package_name = Link(url_no_extras).egg_fragment
if extras:
return (
package_name,
url_no_extras,
Requirement("placeholder" + extras.lower()).extras,
)
else:
return package_name, url_no_extras, None
for version_control in vcs:
if url.lower().startswith('%s:' % version_control):
url = '%s+%s' % (version_control, url)
break
if '+' not in url:
if default_vcs:
warnings.warn(
"--default-vcs has been deprecated and will be removed in "
"the future.",
RemovedInPip10Warning,
)
url = default_vcs + '+' + url
else:
raise InstallationError(
'%s should either be a path to a local project or a VCS url '
'beginning with svn+, git+, hg+, or bzr+' %
editable_req
)
vc_type = url.split('+', 1)[0].lower()
if not vcs.get_backend(vc_type):
error_message = 'For --editable=%s only ' % editable_req + \
', '.join([backend.name + '+URL' for backend in vcs.backends]) + \
' is currently supported'
raise InstallationError(error_message)
package_name = Link(url).egg_fragment
if not package_name:
raise InstallationError(
"Could not detect requirement name, please specify one with #egg="
)
if not package_name:
raise InstallationError(
'--editable=%s is not the right format; it must have '
'#egg=Package' % editable_req
)
return _strip_postfix(package_name), url, None
|
mit
|
andela-ifageyinbo/django
|
tests/auth_tests/test_context_processors.py
|
269
|
6773
|
import datetime
from django.contrib.auth import authenticate
from django.contrib.auth.context_processors import PermLookupDict, PermWrapper
from django.contrib.auth.models import Permission, User
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from django.test import SimpleTestCase, TestCase, override_settings
from .settings import AUTH_MIDDLEWARE_CLASSES, AUTH_TEMPLATES
class MockUser(object):
def has_module_perms(self, perm):
if perm == 'mockapp':
return True
return False
def has_perm(self, perm):
if perm == 'mockapp.someperm':
return True
return False
class PermWrapperTests(SimpleTestCase):
"""
Test some details of the PermWrapper implementation.
"""
class EQLimiterObject(object):
"""
This object makes sure __eq__ will not be called endlessly.
"""
def __init__(self):
self.eq_calls = 0
def __eq__(self, other):
if self.eq_calls > 0:
return True
self.eq_calls += 1
return False
def test_permwrapper_in(self):
"""
Test that 'something' in PermWrapper works as expected.
"""
perms = PermWrapper(MockUser())
# Works for modules and full permissions.
self.assertIn('mockapp', perms)
self.assertNotIn('nonexisting', perms)
self.assertIn('mockapp.someperm', perms)
self.assertNotIn('mockapp.nonexisting', perms)
def test_permlookupdict_in(self):
"""
No endless loops if accessed with 'in' - refs #18979.
"""
pldict = PermLookupDict(MockUser(), 'mockapp')
with self.assertRaises(TypeError):
self.EQLimiterObject() in pldict
@override_settings(
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='auth_tests.urls',
TEMPLATES=AUTH_TEMPLATES,
USE_TZ=False, # required for loading the fixture
)
class AuthContextProcessorTests(TestCase):
"""
Tests for the ``django.contrib.auth.context_processors.auth`` processor
"""
@classmethod
def setUpTestData(cls):
# password = "secret"
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='[email protected]',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
@override_settings(MIDDLEWARE_CLASSES=AUTH_MIDDLEWARE_CLASSES)
def test_session_not_accessed(self):
"""
Tests that the session is not accessed simply by including
the auth context processor
"""
response = self.client.get('/auth_processor_no_attr_access/')
self.assertContains(response, "Session not accessed")
@override_settings(MIDDLEWARE_CLASSES=AUTH_MIDDLEWARE_CLASSES)
def test_session_is_accessed(self):
"""
Tests that the session is accessed if the auth context processor
is used and relevant attributes accessed.
"""
response = self.client.get('/auth_processor_attr_access/')
self.assertContains(response, "Session accessed")
def test_perms_attrs(self):
u = User.objects.create_user(username='normal', password='secret')
u.user_permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(Permission),
codename='add_permission'))
self.client.login(username='normal', password='secret')
response = self.client.get('/auth_processor_perms/')
self.assertContains(response, "Has auth permissions")
self.assertContains(response, "Has auth.add_permission permissions")
self.assertNotContains(response, "nonexisting")
def test_perm_in_perms_attrs(self):
u = User.objects.create_user(username='normal', password='secret')
u.user_permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(Permission),
codename='add_permission'))
self.client.login(username='normal', password='secret')
response = self.client.get('/auth_processor_perm_in_perms/')
self.assertContains(response, "Has auth permissions")
self.assertContains(response, "Has auth.add_permission permissions")
self.assertNotContains(response, "nonexisting")
def test_message_attrs(self):
self.client.login(username='super', password='secret')
response = self.client.get('/auth_processor_messages/')
self.assertContains(response, "Message 1")
def test_user_attrs(self):
"""
Test that the lazy objects returned behave just like the wrapped objects.
"""
# These are 'functional' level tests for common use cases. Direct
# testing of the implementation (SimpleLazyObject) is in the 'utils'
# tests.
self.client.login(username='super', password='secret')
user = authenticate(username='super', password='secret')
response = self.client.get('/auth_processor_user/')
self.assertContains(response, "unicode: super")
self.assertContains(response, "id: 100")
self.assertContains(response, "username: super")
# bug #12037 is tested by the {% url %} in the template:
self.assertContains(response, "url: /userpage/super/")
# See if this object can be used for queries where a Q() comparing
# a user can be used with another Q() (in an AND or OR fashion).
# This simulates what a template tag might do with the user from the
# context. Note that we don't need to execute a query, just build it.
#
# The failure case (bug #12049) on Python 2.4 with a LazyObject-wrapped
# User is a fatal TypeError: "function() takes at least 2 arguments
# (0 given)" deep inside deepcopy().
#
# Python 2.5 and 2.6 succeeded, but logged internally caught exception
# spew:
#
# Exception RuntimeError: 'maximum recursion depth exceeded while
# calling a Python object' in <type 'exceptions.AttributeError'>
# ignored"
Q(user=response.context['user']) & Q(someflag=True)
# Tests for user equality. This is hard because User defines
# equality in a non-duck-typing way
# See bug #12060
self.assertEqual(response.context['user'], user)
self.assertEqual(user, response.context['user'])
|
bsd-3-clause
|
chintak/scikit-image
|
skimage/color/colorconv.py
|
2
|
36639
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Functions for converting between color spaces.
The "central" color space in this module is RGB, more specifically the linear
sRGB color space using D65 as a white-point [1]_. This represents a
standard monitor (w/o gamma correction). For a good FAQ on color spaces see
[2]_.
The API consists of functions to convert to and from RGB as defined above, as
well as a generic function to convert to and from any supported color space
(which is done through RGB in most cases).
Supported color spaces
----------------------
* RGB : Red Green Blue.
Here the sRGB standard [1]_.
* HSV : Hue, Saturation, Value.
Uniquely defined when related to sRGB [3]_.
* RGB CIE : Red Green Blue.
The original RGB CIE standard from 1931 [4]_. Primary colors are 700 nm
(red), 546.1 nm (blue) and 435.8 nm (green).
* XYZ CIE : XYZ
Derived from the RGB CIE color space. Chosen such that
``x == y == z == 1/3`` at the whitepoint, and all color matching
functions are greater than zero everywhere.
* LAB CIE : Lightness, a, b
Colorspace derived from XYZ CIE that is intended to be more
perceptually uniform
* LUV CIE : Lightness, u, v
Colorspace derived from XYZ CIE that is intended to be more
perceptually uniform
* LCH CIE : Lightness, Chroma, Hue
Defined in terms of LAB CIE. C and H are the polar representation of
a and b. The polar angle C is defined to be on ``(0, 2*pi)``
:author: Nicolas Pinto (rgb2hsv)
:author: Ralf Gommers (hsv2rgb)
:author: Travis Oliphant (XYZ and RGB CIE functions)
:author: Matt Terry (lab2lch)
:license: modified BSD
References
----------
.. [1] Official specification of sRGB, IEC 61966-2-1:1999.
.. [2] http://www.poynton.com/ColorFAQ.html
.. [3] http://en.wikipedia.org/wiki/HSL_and_HSV
.. [4] http://en.wikipedia.org/wiki/CIE_1931_color_space
"""
from __future__ import division
import numpy as np
from scipy import linalg
from ..util import dtype
from skimage._shared.utils import deprecated
def guess_spatial_dimensions(image):
"""Make an educated guess about whether an image has a channels dimension.
Parameters
----------
image : ndarray
The input image.
Returns
-------
spatial_dims : int or None
The number of spatial dimensions of `image`. If ambiguous, the value
is ``None``.
Raises
------
ValueError
If the image array has less than two or more than four dimensions.
"""
if image.ndim == 2:
return 2
if image.ndim == 3 and image.shape[-1] != 3:
return 3
if image.ndim == 3 and image.shape[-1] == 3:
return None
if image.ndim == 4 and image.shape[-1] == 3:
return 3
else:
raise ValueError("Expected 2D, 3D, or 4D array, got %iD." % image.ndim)
@deprecated()
def is_rgb(image):
"""Test whether the image is RGB or RGBA.
Parameters
----------
image : ndarray
Input image.
"""
return (image.ndim == 3 and image.shape[2] in (3, 4))
@deprecated()
def is_gray(image):
"""Test whether the image is gray (i.e. has only one color band).
Parameters
----------
image : ndarray
Input image.
"""
return image.ndim in (2, 3) and not is_rgb(image)
def convert_colorspace(arr, fromspace, tospace):
"""Convert an image array to a new color space.
Parameters
----------
arr : array_like
The image to convert.
fromspace : str
The color space to convert from. Valid color space strings are
``['RGB', 'HSV', 'RGB CIE', 'XYZ']``. Value may also be specified as
lower case.
tospace : str
The color space to convert to. Valid color space strings are
``['RGB', 'HSV', 'RGB CIE', 'XYZ']``. Value may also be specified as
lower case.
Returns
-------
newarr : ndarray
The converted image.
Notes
-----
Conversion occurs through the "central" RGB color space, i.e. conversion
from XYZ to HSV is implemented as ``XYZ -> RGB -> HSV`` instead of directly.
Examples
--------
>>> from skimage import data
>>> lena = data.lena()
>>> lena_hsv = convert_colorspace(lena, 'RGB', 'HSV')
"""
fromdict = {'RGB': lambda im: im, 'HSV': hsv2rgb, 'RGB CIE': rgbcie2rgb,
'XYZ': xyz2rgb}
todict = {'RGB': lambda im: im, 'HSV': rgb2hsv, 'RGB CIE': rgb2rgbcie,
'XYZ': rgb2xyz}
fromspace = fromspace.upper()
tospace = tospace.upper()
if not fromspace in fromdict.keys():
raise ValueError('fromspace needs to be one of %s' % fromdict.keys())
if not tospace in todict.keys():
raise ValueError('tospace needs to be one of %s' % todict.keys())
return todict[tospace](fromdict[fromspace](arr))
def _prepare_colorarray(arr):
"""Check the shape of the array and convert it to
floating point representation.
"""
arr = np.asanyarray(arr)
if arr.ndim not in [3, 4] or arr.shape[-1] != 3:
msg = ("the input array must be have a shape == (.., ..,[ ..,] 3)), " +
"got (" + (", ".join(map(str, arr.shape))) + ")")
raise ValueError(msg)
return dtype.img_as_float(arr)
def rgb2hsv(rgb):
"""RGB to HSV color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Returns
-------
out : ndarray
The image in HSV format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `rgb` is not a 3-D array of shape ``(.., .., 3)``.
Notes
-----
The conversion assumes an input data range of [0, 1] for all
color components.
Conversion between RGB and HSV color spaces results in some loss of
precision, due to integer arithmetic and rounding [1]_.
References
----------
.. [1] http://en.wikipedia.org/wiki/HSL_and_HSV
Examples
--------
>>> from skimage import color
>>> from skimage import data
>>> lena = data.lena()
>>> lena_hsv = color.rgb2hsv(lena)
"""
arr = _prepare_colorarray(rgb)
out = np.empty_like(arr)
# -- V channel
out_v = arr.max(-1)
# -- S channel
delta = arr.ptp(-1)
# Ignore warning for zero divided by zero
old_settings = np.seterr(invalid='ignore')
out_s = delta / out_v
out_s[delta == 0.] = 0.
# -- H channel
# red is max
idx = (arr[:, :, 0] == out_v)
out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]
# green is max
idx = (arr[:, :, 1] == out_v)
out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx]
# blue is max
idx = (arr[:, :, 2] == out_v)
out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx]
out_h = (out[:, :, 0] / 6.) % 1.
out_h[delta == 0.] = 0.
np.seterr(**old_settings)
# -- output
out[:, :, 0] = out_h
out[:, :, 1] = out_s
out[:, :, 2] = out_v
# remove NaN
out[np.isnan(out)] = 0
return out
def hsv2rgb(hsv):
"""HSV to RGB color space conversion.
Parameters
----------
hsv : array_like
The image in HSV format, in a 3-D array of shape ``(.., .., 3)``.
Returns
-------
out : ndarray
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `hsv` is not a 3-D array of shape ``(.., .., 3)``.
Notes
-----
The conversion assumes an input data range of ``[0, 1]`` for all
color components.
Conversion between RGB and HSV color spaces results in some loss of
precision, due to integer arithmetic and rounding [1]_.
References
----------
.. [1] http://en.wikipedia.org/wiki/HSL_and_HSV
Examples
--------
>>> from skimage import data
>>> lena = data.lena()
>>> lena_hsv = rgb2hsv(lena)
>>> lena_rgb = hsv2rgb(lena_hsv)
"""
arr = _prepare_colorarray(hsv)
hi = np.floor(arr[:, :, 0] * 6)
f = arr[:, :, 0] * 6 - hi
p = arr[:, :, 2] * (1 - arr[:, :, 1])
q = arr[:, :, 2] * (1 - f * arr[:, :, 1])
t = arr[:, :, 2] * (1 - (1 - f) * arr[:, :, 1])
v = arr[:, :, 2]
hi = np.dstack([hi, hi, hi]).astype(np.uint8) % 6
out = np.choose(hi, [np.dstack((v, t, p)),
np.dstack((q, v, p)),
np.dstack((p, v, t)),
np.dstack((p, q, v)),
np.dstack((t, p, v)),
np.dstack((v, p, q))])
return out
#---------------------------------------------------------------
# Primaries for the coordinate systems
#---------------------------------------------------------------
cie_primaries = np.array([700, 546.1, 435.8])
sb_primaries = np.array([1. / 155, 1. / 190, 1. / 225]) * 1e5
#---------------------------------------------------------------
# Matrices that define conversion between different color spaces
#---------------------------------------------------------------
# From sRGB specification
xyz_from_rgb = np.array([[0.412453, 0.357580, 0.180423],
[0.212671, 0.715160, 0.072169],
[0.019334, 0.119193, 0.950227]])
rgb_from_xyz = linalg.inv(xyz_from_rgb)
# From http://en.wikipedia.org/wiki/CIE_1931_color_space
# Note: Travis's code did not have the divide by 0.17697
xyz_from_rgbcie = np.array([[0.49, 0.31, 0.20],
[0.17697, 0.81240, 0.01063],
[0.00, 0.01, 0.99]]) / 0.17697
rgbcie_from_xyz = linalg.inv(xyz_from_rgbcie)
# construct matrices to and from rgb:
rgbcie_from_rgb = np.dot(rgbcie_from_xyz, xyz_from_rgb)
rgb_from_rgbcie = np.dot(rgb_from_xyz, xyz_from_rgbcie)
gray_from_rgb = np.array([[0.2125, 0.7154, 0.0721],
[0, 0, 0],
[0, 0, 0]])
# CIE LAB constants for Observer= 2A, Illuminant= D65
lab_ref_white = np.array([0.95047, 1., 1.08883])
# Haematoxylin-Eosin-DAB colorspace
# From original Ruifrok's paper: A. C. Ruifrok and D. A. Johnston,
# "Quantification of histochemical staining by color deconvolution.,"
# Analytical and quantitative cytology and histology / the International
# Academy of Cytology [and] American Society of Cytology, vol. 23, no. 4,
# pp. 291-9, Aug. 2001.
rgb_from_hed = np.array([[0.65, 0.70, 0.29],
[0.07, 0.99, 0.11],
[0.27, 0.57, 0.78]])
hed_from_rgb = linalg.inv(rgb_from_hed)
# Following matrices are adapted form the Java code written by G.Landini.
# The original code is available at:
# http://www.dentistry.bham.ac.uk/landinig/software/cdeconv/cdeconv.html
# Hematoxylin + DAB
rgb_from_hdx = np.array([[0.650, 0.704, 0.286],
[0.268, 0.570, 0.776],
[0.0, 0.0, 0.0]])
rgb_from_hdx[2, :] = np.cross(rgb_from_hdx[0, :], rgb_from_hdx[1, :])
hdx_from_rgb = linalg.inv(rgb_from_hdx)
# Feulgen + Light Green
rgb_from_fgx = np.array([[0.46420921, 0.83008335, 0.30827187],
[0.94705542, 0.25373821, 0.19650764],
[0.0, 0.0, 0.0]])
rgb_from_fgx[2, :] = np.cross(rgb_from_fgx[0, :], rgb_from_fgx[1, :])
fgx_from_rgb = linalg.inv(rgb_from_fgx)
# Giemsa: Methyl Blue + Eosin
rgb_from_bex = np.array([[0.834750233, 0.513556283, 0.196330403],
[0.092789, 0.954111, 0.283111],
[0.0, 0.0, 0.0]])
rgb_from_bex[2, :] = np.cross(rgb_from_bex[0, :], rgb_from_bex[1, :])
bex_from_rgb = linalg.inv(rgb_from_bex)
# FastRed + FastBlue + DAB
rgb_from_rbd = np.array([[0.21393921, 0.85112669, 0.47794022],
[0.74890292, 0.60624161, 0.26731082],
[0.268, 0.570, 0.776]])
rbd_from_rgb = linalg.inv(rgb_from_rbd)
# Methyl Green + DAB
rgb_from_gdx = np.array([[0.98003, 0.144316, 0.133146],
[0.268, 0.570, 0.776],
[0.0, 0.0, 0.0]])
rgb_from_gdx[2, :] = np.cross(rgb_from_gdx[0, :], rgb_from_gdx[1, :])
gdx_from_rgb = linalg.inv(rgb_from_gdx)
# Hematoxylin + AEC
rgb_from_hax = np.array([[0.650, 0.704, 0.286],
[0.2743, 0.6796, 0.6803],
[0.0, 0.0, 0.0]])
rgb_from_hax[2, :] = np.cross(rgb_from_hax[0, :], rgb_from_hax[1, :])
hax_from_rgb = linalg.inv(rgb_from_hax)
# Blue matrix Anilline Blue + Red matrix Azocarmine + Orange matrix Orange-G
rgb_from_bro = np.array([[0.853033, 0.508733, 0.112656],
[0.09289875, 0.8662008, 0.49098468],
[0.10732849, 0.36765403, 0.9237484]])
bro_from_rgb = linalg.inv(rgb_from_bro)
# Methyl Blue + Ponceau Fuchsin
rgb_from_bpx = np.array([[0.7995107, 0.5913521, 0.10528667],
[0.09997159, 0.73738605, 0.6680326],
[0.0, 0.0, 0.0]])
rgb_from_bpx[2, :] = np.cross(rgb_from_bpx[0, :], rgb_from_bpx[1, :])
bpx_from_rgb = linalg.inv(rgb_from_bpx)
# Alcian Blue + Hematoxylin
rgb_from_ahx = np.array([[0.874622, 0.457711, 0.158256],
[0.552556, 0.7544, 0.353744],
[0.0, 0.0, 0.0]])
rgb_from_ahx[2, :] = np.cross(rgb_from_ahx[0, :], rgb_from_ahx[1, :])
ahx_from_rgb = linalg.inv(rgb_from_ahx)
# Hematoxylin + PAS
rgb_from_hpx = np.array([[0.644211, 0.716556, 0.266844],
[0.175411, 0.972178, 0.154589],
[0.0, 0.0, 0.0]])
rgb_from_hpx[2, :] = np.cross(rgb_from_hpx[0, :], rgb_from_hpx[1, :])
hpx_from_rgb = linalg.inv(rgb_from_hpx)
#-------------------------------------------------------------
# The conversion functions that make use of the matrices above
#-------------------------------------------------------------
def _convert(matrix, arr):
"""Do the color space conversion.
Parameters
----------
matrix : array_like
The 3x3 matrix to use.
arr : array_like
The input array.
Returns
-------
out : ndarray, dtype=float
The converted array.
"""
arr = _prepare_colorarray(arr)
arr = np.swapaxes(arr, 0, -1)
oldshape = arr.shape
arr = np.reshape(arr, (3, -1))
out = np.dot(matrix, arr)
out.shape = oldshape
out = np.swapaxes(out, -1, 0)
return np.ascontiguousarray(out)
def xyz2rgb(xyz):
"""XYZ to RGB color space conversion.
Parameters
----------
xyz : array_like
The image in XYZ format, in a 3-D array of shape ``(.., .., 3)``.
Returns
-------
out : ndarray
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `xyz` is not a 3-D array of shape ``(.., .., 3)``.
Notes
-----
The CIE XYZ color space is derived from the CIE RGB color space. Note
however that this function converts to sRGB.
References
----------
.. [1] http://en.wikipedia.org/wiki/CIE_1931_color_space
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2xyz, xyz2rgb
>>> lena = data.lena()
>>> lena_xyz = rgb2xyz(lena)
>>> lena_rgb = xyz2rgb(lena_xyz)
"""
# Follow the algorithm from http://www.easyrgb.com/index.php
# except we don't multiply/divide by 100 in the conversion
arr = _convert(rgb_from_xyz, xyz)
mask = arr > 0.0031308
arr[mask] = 1.055 * np.power(arr[mask], 1 / 2.4) - 0.055
arr[~mask] *= 12.92
return arr
def rgb2xyz(rgb):
"""RGB to XYZ color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
Returns
-------
out : ndarray
The image in XYZ format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
Raises
------
ValueError
If `rgb` is not a 3- or 4-D array of shape ``(.., ..,[ ..,] 3)``.
Notes
-----
The CIE XYZ color space is derived from the CIE RGB color space. Note
however that this function converts from sRGB.
References
----------
.. [1] http://en.wikipedia.org/wiki/CIE_1931_color_space
Examples
--------
>>> from skimage import data
>>> lena = data.lena()
>>> lena_xyz = rgb2xyz(lena)
"""
# Follow the algorithm from http://www.easyrgb.com/index.php
# except we don't multiply/divide by 100 in the conversion
arr = _prepare_colorarray(rgb).copy()
mask = arr > 0.04045
arr[mask] = np.power((arr[mask] + 0.055) / 1.055, 2.4)
arr[~mask] /= 12.92
return _convert(xyz_from_rgb, arr)
def rgb2rgbcie(rgb):
"""RGB to RGB CIE color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Returns
-------
out : ndarray
The image in RGB CIE format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `rgb` is not a 3-D array of shape ``(.., .., 3)``.
References
----------
.. [1] http://en.wikipedia.org/wiki/CIE_1931_color_space
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2rgbcie
>>> lena = data.lena()
>>> lena_rgbcie = rgb2rgbcie(lena)
"""
return _convert(rgbcie_from_rgb, rgb)
def rgbcie2rgb(rgbcie):
"""RGB CIE to RGB color space conversion.
Parameters
----------
rgbcie : array_like
The image in RGB CIE format, in a 3-D array of shape ``(.., .., 3)``.
Returns
-------
out : ndarray
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `rgbcie` is not a 3-D array of shape ``(.., .., 3)``.
References
----------
.. [1] http://en.wikipedia.org/wiki/CIE_1931_color_space
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2rgbcie, rgbcie2rgb
>>> lena = data.lena()
>>> lena_rgbcie = rgb2rgbcie(lena)
>>> lena_rgb = rgbcie2rgb(lena_rgbcie)
"""
return _convert(rgb_from_rgbcie, rgbcie)
def rgb2gray(rgb):
"""Compute luminance of an RGB image.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``,
or in RGBA format with shape ``(.., .., 4)``.
Returns
-------
out : ndarray
The luminance image, a 2-D array.
Raises
------
ValueError
If `rgb2gray` is not a 3-D array of shape ``(.., .., 3)`` or
``(.., .., 4)``.
References
----------
.. [1] http://www.poynton.com/PDFs/ColorFAQ.pdf
Notes
-----
The weights used in this conversion are calibrated for contemporary
CRT phosphors::
Y = 0.2125 R + 0.7154 G + 0.0721 B
If there is an alpha channel present, it is ignored.
Examples
--------
>>> from skimage.color import rgb2gray
>>> from skimage import data
>>> lena = data.lena()
>>> lena_gray = rgb2gray(lena)
"""
if rgb.ndim == 2:
return rgb
return _convert(gray_from_rgb, rgb[:, :, :3])[..., 0]
rgb2grey = rgb2gray
def gray2rgb(image):
"""Create an RGB representation of a gray-level image.
Parameters
----------
image : array_like
Input image of shape ``(M, N [, P])``.
Returns
-------
rgb : ndarray
RGB image of shape ``(M, N, [, P], 3)``.
Raises
------
ValueError
If the input is not a 2- or 3-dimensional image.
"""
if np.squeeze(image).ndim == 3 and image.shape[2] in (3, 4):
return image
elif image.ndim != 1 and np.squeeze(image).ndim in (1, 2, 3):
image = image[..., np.newaxis]
return np.concatenate(3 * (image,), axis=-1)
else:
raise ValueError("Input image expected to be RGB, RGBA or gray.")
def xyz2lab(xyz):
"""XYZ to CIE-LAB color space conversion.
Parameters
----------
xyz : array_like
The image in XYZ format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
Returns
-------
out : ndarray
The image in CIE-LAB format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
Raises
------
ValueError
If `xyz` is not a 3-D array of shape ``(.., ..,[ ..,] 3)``.
Notes
-----
Observer= 2A, Illuminant= D65
CIE XYZ tristimulus values x_ref = 95.047, y_ref = 100., z_ref = 108.883
References
----------
.. [1] http://www.easyrgb.com/index.php?X=MATH&H=07#text7
.. [2] http://en.wikipedia.org/wiki/Lab_color_space
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2xyz, xyz2lab
>>> lena = data.lena()
>>> lena_xyz = rgb2xyz(lena)
>>> lena_lab = xyz2lab(lena_xyz)
"""
arr = _prepare_colorarray(xyz)
# scale by CIE XYZ tristimulus values of the reference white point
arr = arr / lab_ref_white
# Nonlinear distortion and linear transformation
mask = arr > 0.008856
arr[mask] = np.power(arr[mask], 1. / 3.)
arr[~mask] = 7.787 * arr[~mask] + 16. / 116.
x, y, z = arr[..., 0], arr[..., 1], arr[..., 2]
# Vector scaling
L = (116. * y) - 16.
a = 500.0 * (x - y)
b = 200.0 * (y - z)
return np.concatenate([x[..., np.newaxis] for x in [L, a, b]], axis=-1)
def lab2xyz(lab):
"""CIE-LAB to XYZcolor space conversion.
Parameters
----------
lab : array_like
The image in lab format, in a 3-D array of shape ``(.., .., 3)``.
Returns
-------
out : ndarray
The image in XYZ format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `lab` is not a 3-D array of shape ``(.., .., 3)``.
Notes
-----
Observer = 2A, Illuminant = D65
CIE XYZ tristimulus values x_ref = 95.047, y_ref = 100., z_ref = 108.883
References
----------
.. [1] http://www.easyrgb.com/index.php?X=MATH&H=07#text7
.. [2] http://en.wikipedia.org/wiki/Lab_color_space
"""
arr = _prepare_colorarray(lab).copy()
L, a, b = arr[:, :, 0], arr[:, :, 1], arr[:, :, 2]
y = (L + 16.) / 116.
x = (a / 500.) + y
z = y - (b / 200.)
out = np.dstack([x, y, z])
mask = out > 0.2068966
out[mask] = np.power(out[mask], 3.)
out[~mask] = (out[~mask] - 16.0 / 116.) / 7.787
# rescale Observer= 2 deg, Illuminant= D65
out *= lab_ref_white
return out
def rgb2lab(rgb):
"""RGB to lab color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
Returns
-------
out : ndarray
The image in Lab format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
Raises
------
ValueError
If `rgb` is not a 3- or 4-D array of shape ``(.., ..,[ ..,] 3)``.
Notes
-----
This function uses rgb2xyz and xyz2lab.
"""
return xyz2lab(rgb2xyz(rgb))
def lab2rgb(lab):
"""Lab to RGB color space conversion.
Parameters
----------
rgb : array_like
The image in Lab format, in a 3-D array of shape ``(.., .., 3)``.
Returns
-------
out : ndarray
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `lab` is not a 3-D array of shape ``(.., .., 3)``.
Notes
-----
This function uses lab2xyz and xyz2rgb.
"""
return xyz2rgb(lab2xyz(lab))
def xyz2luv(xyz):
"""XYZ to CIE-Luv color space conversion.
Parameters
----------
xyz : (M, N, [P,] 3) array_like
The 3 or 4 dimensional image in XYZ format. Final dimension denotes
channels.
Returns
-------
out : (M, N, [P,] 3) ndarray
The image in CIE-Luv format. Same dimensions as input.
Raises
------
ValueError
If `xyz` is not a 3-D or 4-D array of shape ``(M, N, [P,] 3)``.
Notes
-----
XYZ conversion weights use Observer = 2A. Reference whitepoint for D65
Illuminant, with XYZ tristimulus values of ``(95.047, 100., 108.883)``.
References
----------
.. [1] http://www.easyrgb.com/index.php?X=MATH&H=16#text16
.. [2] http://en.wikipedia.org/wiki/CIELUV
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2xyz, xyz2luv
>>> lena = data.lena()
>>> lena_xyz = rgb2xyz(lena)
>>> lena_luv = xyz2luv(lena_xyz)
"""
arr = _prepare_colorarray(xyz)
# extract channels
x, y, z = arr[..., 0], arr[..., 1], arr[..., 2]
eps = np.finfo(np.float).eps
# compute y_r and L
L = y / lab_ref_white[1]
mask = L > 0.008856
L[mask] = 116. * np.power(L[mask], 1. / 3.) - 16.
L[~mask] = 903.3 * L[~mask]
u0 = 4*lab_ref_white[0] / np.dot([1, 15, 3], lab_ref_white)
v0 = 9*lab_ref_white[1] / np.dot([1, 15, 3], lab_ref_white)
# u' and v' helper functions
def fu(X, Y, Z):
return (4.*X) / (X + 15.*Y + 3.*Z + eps)
def fv(X, Y, Z):
return (9.*Y) / (X + 15.*Y + 3.*Z + eps)
# compute u and v using helper functions
u = 13.*L * (fu(x, y, z) - u0)
v = 13.*L * (fv(x, y, z) - v0)
return np.concatenate([q[..., np.newaxis] for q in [L, u, v]], axis=-1)
def luv2xyz(luv):
"""CIE-Luv to XYZ color space conversion.
Parameters
----------
luv : (M, N, [P,] 3) array_like
The 3 or 4 dimensional image in CIE-Luv format. Final dimension denotes
channels.
Returns
-------
out : (M, N, [P,] 3) ndarray
The image in XYZ format. Same dimensions as input.
Raises
------
ValueError
If `luv` is not a 3-D or 4-D array of shape ``(M, N, [P,] 3)``.
Notes
-----
XYZ conversion weights use Observer = 2A. Reference whitepoint for D65
Illuminant, with XYZ tristimulus values of ``(95.047, 100., 108.883)``.
References
----------
.. [1] http://www.easyrgb.com/index.php?X=MATH&H=16#text16
.. [2] http://en.wikipedia.org/wiki/CIELUV
"""
arr = _prepare_colorarray(luv).copy()
L, u, v = arr[:, :, 0], arr[:, :, 1], arr[:, :, 2]
eps = np.finfo(np.float).eps
# compute y
y = L.copy()
mask = y > 7.999625
y[mask] = np.power((y[mask]+16.) / 116., 3.)
y[~mask] = y[~mask] / 903.3
y *= lab_ref_white[1]
# reference white x,z
uv_weights = [1, 15, 3]
u0 = 4*lab_ref_white[0] / np.dot(uv_weights, lab_ref_white)
v0 = 9*lab_ref_white[1] / np.dot(uv_weights, lab_ref_white)
# compute intermediate values
a = u0 + u / (13.*L + eps)
b = v0 + v / (13.*L + eps)
c = 3*y * (5*b-3)
# compute x and z
z = ((a-4)*c - 15*a*b*y) / (12*b)
x = -(c/b + 3.*z)
return np.concatenate([q[..., np.newaxis] for q in [x, y, z]], axis=-1)
def rgb2luv(rgb):
"""RGB to CIE-Luv color space conversion.
Parameters
----------
rgb : (M, N, [P,] 3) array_like
The 3 or 4 dimensional image in RGB format. Final dimension denotes
channels.
Returns
-------
out : (M, N, [P,] 3) ndarray
The image in CIE Luv format. Same dimensions as input.
Raises
------
ValueError
If `rgb` is not a 3-D or 4-D array of shape ``(M, N, [P,] 3)``.
Notes
-----
This function uses rgb2xyz and xyz2luv.
"""
return xyz2luv(rgb2xyz(rgb))
def luv2rgb(luv):
"""Luv to RGB color space conversion.
Parameters
----------
luv : (M, N, [P,] 3) array_like
The 3 or 4 dimensional image in CIE Luv format. Final dimension denotes
channels.
Returns
-------
out : (M, N, [P,] 3) ndarray
The image in RGB format. Same dimensions as input.
Raises
------
ValueError
If `luv` is not a 3-D or 4-D array of shape ``(M, N, [P,] 3)``.
Notes
-----
This function uses luv2xyz and xyz2rgb.
"""
return xyz2rgb(luv2xyz(luv))
def rgb2hed(rgb):
"""RGB to Haematoxylin-Eosin-DAB (HED) color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Returns
-------
out : ndarray
The image in HED format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `rgb` is not a 3-D array of shape ``(.., .., 3)``.
References
----------
.. [1] A. C. Ruifrok and D. A. Johnston, "Quantification of histochemical
staining by color deconvolution.," Analytical and quantitative
cytology and histology / the International Academy of Cytology [and]
American Society of Cytology, vol. 23, no. 4, pp. 291-9, Aug. 2001.
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2hed
>>> ihc = data.immunohistochemistry()
>>> ihc_hed = rgb2hed(ihc)
"""
return separate_stains(rgb, hed_from_rgb)
def hed2rgb(hed):
"""Haematoxylin-Eosin-DAB (HED) to RGB color space conversion.
Parameters
----------
hed : array_like
The image in the HED color space, in a 3-D array of shape ``(.., .., 3)``.
Returns
-------
out : ndarray
The image in RGB, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `hed` is not a 3-D array of shape ``(.., .., 3)``.
References
----------
.. [1] A. C. Ruifrok and D. A. Johnston, "Quantification of histochemical
staining by color deconvolution.," Analytical and quantitative
cytology and histology / the International Academy of Cytology [and]
American Society of Cytology, vol. 23, no. 4, pp. 291-9, Aug. 2001.
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2hed, hed2rgb
>>> ihc = data.immunohistochemistry()
>>> ihc_hed = rgb2hed(ihc)
>>> ihc_rgb = hed2rgb(ihc_hed)
"""
return combine_stains(hed, rgb_from_hed)
def separate_stains(rgb, conv_matrix):
"""RGB to stain color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
conv_matrix: ndarray
The stain separation matrix as described by G. Landini [1]_.
Returns
-------
out : ndarray
The image in stain color space, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `rgb` is not a 3-D array of shape ``(.., .., 3)``.
Notes
-----
Stain separation matrices available in the ``color`` module and their
respective colorspace:
* ``hed_from_rgb``: Hematoxylin + Eosin + DAB
* ``hdx_from_rgb``: Hematoxylin + DAB
* ``fgx_from_rgb``: Feulgen + Light Green
* ``bex_from_rgb``: Giemsa stain : Methyl Blue + Eosin
* ``rbd_from_rgb``: FastRed + FastBlue + DAB
* ``gdx_from_rgb``: Methyl Green + DAB
* ``hax_from_rgb``: Hematoxylin + AEC
* ``bro_from_rgb``: Blue matrix Anilline Blue + Red matrix Azocarmine\
+ Orange matrix Orange-G
* ``bpx_from_rgb``: Methyl Blue + Ponceau Fuchsin
* ``ahx_from_rgb``: Alcian Blue + Hematoxylin
* ``hpx_from_rgb``: Hematoxylin + PAS
References
----------
.. [1] http://www.dentistry.bham.ac.uk/landinig/software/cdeconv/cdeconv.html
Examples
--------
>>> from skimage import data
>>> from skimage.color import separate_stains, hdx_from_rgb
>>> ihc = data.immunohistochemistry()
>>> ihc_hdx = separate_stains(ihc, hdx_from_rgb)
"""
rgb = dtype.img_as_float(rgb) + 2
stains = np.dot(np.reshape(-np.log(rgb), (-1, 3)), conv_matrix)
return np.reshape(stains, rgb.shape)
def combine_stains(stains, conv_matrix):
"""Stain to RGB color space conversion.
Parameters
----------
stains : array_like
The image in stain color space, in a 3-D array of shape ``(.., .., 3)``.
conv_matrix: ndarray
The stain separation matrix as described by G. Landini [1]_.
Returns
-------
out : ndarray
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `stains` is not a 3-D array of shape ``(.., .., 3)``.
Notes
-----
Stain combination matrices available in the ``color`` module and their
respective colorspace:
* ``rgb_from_hed``: Hematoxylin + Eosin + DAB
* ``rgb_from_hdx``: Hematoxylin + DAB
* ``rgb_from_fgx``: Feulgen + Light Green
* ``rgb_from_bex``: Giemsa stain : Methyl Blue + Eosin
* ``rgb_from_rbd``: FastRed + FastBlue + DAB
* ``rgb_from_gdx``: Methyl Green + DAB
* ``rgb_from_hax``: Hematoxylin + AEC
* ``rgb_from_bro``: Blue matrix Anilline Blue + Red matrix Azocarmine\
+ Orange matrix Orange-G
* ``rgb_from_bpx``: Methyl Blue + Ponceau Fuchsin
* ``rgb_from_ahx``: Alcian Blue + Hematoxylin
* ``rgb_from_hpx``: Hematoxylin + PAS
References
----------
.. [1] http://www.dentistry.bham.ac.uk/landinig/software/cdeconv/cdeconv.html
Examples
--------
>>> from skimage import data
>>> from skimage.color import (separate_stains, combine_stains,
... hdx_from_rgb, rgb_from_hdx)
>>> ihc = data.immunohistochemistry()
>>> ihc_hdx = separate_stains(ihc, hdx_from_rgb)
>>> ihc_rgb = combine_stains(ihc_hdx, rgb_from_hdx)
"""
from ..exposure import rescale_intensity
stains = dtype.img_as_float(stains)
logrgb2 = np.dot(-np.reshape(stains, (-1, 3)), conv_matrix)
rgb2 = np.exp(logrgb2)
return rescale_intensity(np.reshape(rgb2 - 2, stains.shape), in_range=(-1, 1))
def lab2lch(lab):
"""CIE-LAB to CIE-LCH color space conversion.
LCH is the cylindrical representation of the LAB (Cartesian) colorspace
Parameters
----------
lab : array_like
The N-D image in CIE-LAB format. The last (``N+1``-th) dimension must
have at least 3 elements, corresponding to the ``L``, ``a``, and ``b``
color channels. Subsequent elements are copied.
Returns
-------
out : ndarray
The image in LCH format, in a N-D array with same shape as input `lab`.
Raises
------
ValueError
If `lch` does not have at least 3 color channels (i.e. l, a, b).
Notes
-----
The Hue is expressed as an angle between ``(0, 2*pi)``
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2lab, lab2lch
>>> lena = data.lena()
>>> lena_lab = rgb2lab(lena)
>>> lena_lch = lab2lch(lena_lab)
"""
lch = _prepare_lab_array(lab)
a, b = lch[..., 1], lch[..., 2]
lch[..., 1], lch[..., 2] = _cart2polar_2pi(a, b)
return lch
def _cart2polar_2pi(x, y):
"""convert cartesian coordiantes to polar (uses non-standard theta range!)
NON-STANDARD RANGE! Maps to ``(0, 2*pi)`` rather than usual ``(-pi, +pi)``
"""
r, t = np.hypot(x, y), np.arctan2(y, x)
t += np.where(t < 0., 2 * np.pi, 0)
return r, t
def lch2lab(lch):
"""CIE-LCH to CIE-LAB color space conversion.
LCH is the cylindrical representation of the LAB (Cartesian) colorspace
Parameters
----------
lch : array_like
The N-D image in CIE-LCH format. The last (``N+1``-th) dimension must
have at least 3 elements, corresponding to the ``L``, ``a``, and ``b``
color channels. Subsequent elements are copied.
Returns
-------
out : ndarray
The image in LAB format, with same shape as input `lch`.
Raises
------
ValueError
If `lch` does not have at least 3 color channels (i.e. l, c, h).
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2lab, lch2lab
>>> lena = data.lena()
>>> lena_lab = rgb2lab(lena)
>>> lena_lch = lab2lch(lena_lab)
>>> lena_lab2 = lch2lab(lena_lch)
"""
lch = _prepare_lab_array(lch)
c, h = lch[..., 1], lch[..., 2]
lch[..., 1], lch[..., 2] = c * np.cos(h), c * np.sin(h)
return lch
def _prepare_lab_array(arr):
"""Ensure input for lab2lch, lch2lab are well-posed.
Arrays must be in floating point and have at least 3 elements in
last dimension. Return a new array.
"""
arr = np.asarray(arr)
shape = arr.shape
if shape[-1] < 3:
raise ValueError('Input array has less than 3 color channels')
return dtype.img_as_float(arr, force_copy=True)
|
bsd-3-clause
|
wuhengzhi/chromium-crosswalk
|
tools/metrics/histograms/extract_histograms.py
|
40
|
16108
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Extract histogram names from the description XML file.
For more information on the format of the XML file, which is self-documenting,
see histograms.xml; however, here is a simple example to get you started. The
XML below will generate the following five histograms:
HistogramTime
HistogramEnum
HistogramEnum_Chrome
HistogramEnum_IE
HistogramEnum_Firefox
<histogram-configuration>
<histograms>
<histogram name="HistogramTime" units="milliseconds">
<summary>A brief description.</summary>
<details>This is a more thorough description of this histogram.</details>
</histogram>
<histogram name="HistogramEnum" enum="MyEnumType">
<summary>This histogram sports an enum value type.</summary>
</histogram>
</histograms>
<enums>
<enum name="MyEnumType">
<summary>This is an example enum type, where the values mean little.</summary>
<int value="1" label="FIRST_VALUE">This is the first value.</int>
<int value="2" label="SECOND_VALUE">This is the second value.</int>
</enum>
</enums>
<histogram_suffixes_list>
<histogram_suffixes name="BrowserType">
<suffix name="Chrome"/>
<suffix name="IE"/>
<suffix name="Firefox"/>
<affected-histogram name="HistogramEnum"/>
</histogram_suffixes>
</histogram_suffixes_list>
</histogram-configuration>
"""
import copy
import logging
import xml.dom.minidom
OWNER_FIELD_PLACEHOLDER = (
'Please list the metric\'s owners. Add more owner tags as needed.')
MAX_HISTOGRAM_SUFFIX_DEPENDENCY_DEPTH = 5
class Error(Exception):
pass
def _JoinChildNodes(tag):
"""Join child nodes into a single text.
Applicable to leafs like 'summary' and 'detail'.
Args:
tag: parent node
Returns:
a string with concatenated nodes' text representation.
"""
return ''.join(c.toxml() for c in tag.childNodes).strip()
def _NormalizeString(s):
"""Replaces all whitespace sequences with a single space.
The function properly handles multi-line strings.
Args:
s: The string to normalize, (' \\n a b c\\n d ').
Returns:
The normalized string (a b c d).
"""
return ' '.join(s.split())
def _NormalizeAllAttributeValues(node):
"""Recursively normalizes all tag attribute values in the given tree.
Args:
node: The minidom node to be normalized.
Returns:
The normalized minidom node.
"""
if node.nodeType == xml.dom.minidom.Node.ELEMENT_NODE:
for a in node.attributes.keys():
node.attributes[a].value = _NormalizeString(node.attributes[a].value)
for c in node.childNodes:
_NormalizeAllAttributeValues(c)
return node
def _ExpandHistogramNameWithSuffixes(suffix_name, histogram_name,
histogram_suffixes_node):
"""Creates a new histogram name based on a histogram suffix.
Args:
suffix_name: The suffix string to apply to the histogram name. May be empty.
histogram_name: The name of the histogram. May be of the form
Group.BaseName or BaseName.
histogram_suffixes_node: The histogram_suffixes XML node.
Returns:
A string with the expanded histogram name.
Raises:
Error: if the expansion can't be done.
"""
if histogram_suffixes_node.hasAttribute('separator'):
separator = histogram_suffixes_node.getAttribute('separator')
else:
separator = '_'
if histogram_suffixes_node.hasAttribute('ordering'):
ordering = histogram_suffixes_node.getAttribute('ordering')
else:
ordering = 'suffix'
if ordering not in ['prefix', 'suffix']:
logging.error('ordering needs to be prefix or suffix, value is %s',
ordering)
raise Error()
if not suffix_name:
return histogram_name
if ordering == 'suffix':
return histogram_name + separator + suffix_name
# For prefixes, the suffix_name is inserted between the "cluster" and the
# "remainder", e.g. Foo.BarHist expanded with gamma becomes Foo.gamma_BarHist.
sections = histogram_name.split('.')
if len(sections) <= 1:
logging.error(
'Prefix Field Trial expansions require histogram names which include a '
'dot separator. Histogram name is %s, and Field Trial is %s',
histogram_name, histogram_suffixes_node.getAttribute('name'))
raise Error()
cluster = sections[0] + '.'
remainder = '.'.join(sections[1:])
return cluster + suffix_name + separator + remainder
def _ExtractEnumsFromXmlTree(tree):
"""Extract all <enum> nodes in the tree into a dictionary."""
enums = {}
have_errors = False
last_name = None
for enum in tree.getElementsByTagName('enum'):
if enum.getAttribute('type') != 'int':
logging.error('Unknown enum type %s', enum.getAttribute('type'))
have_errors = True
continue
name = enum.getAttribute('name')
if last_name is not None and name.lower() < last_name.lower():
logging.error('Enums %s and %s are not in alphabetical order',
last_name, name)
have_errors = True
last_name = name
if name in enums:
logging.error('Duplicate enum %s', name)
have_errors = True
continue
last_int_value = None
enum_dict = {}
enum_dict['name'] = name
enum_dict['values'] = {}
for int_tag in enum.getElementsByTagName('int'):
value_dict = {}
int_value = int(int_tag.getAttribute('value'))
if last_int_value is not None and int_value < last_int_value:
logging.error('Enum %s int values %d and %d are not in numerical order',
name, last_int_value, int_value)
have_errors = True
last_int_value = int_value
if int_value in enum_dict['values']:
logging.error('Duplicate enum value %d for enum %s', int_value, name)
have_errors = True
continue
value_dict['label'] = int_tag.getAttribute('label')
value_dict['summary'] = _JoinChildNodes(int_tag)
enum_dict['values'][int_value] = value_dict
summary_nodes = enum.getElementsByTagName('summary')
if summary_nodes:
enum_dict['summary'] = _NormalizeString(_JoinChildNodes(summary_nodes[0]))
enums[name] = enum_dict
return enums, have_errors
def _ExtractOwners(xml_node):
"""Extract all owners into a list from owner tag under |xml_node|."""
owners = []
for owner_node in xml_node.getElementsByTagName('owner'):
owner_entry = _NormalizeString(_JoinChildNodes(owner_node))
if OWNER_FIELD_PLACEHOLDER not in owner_entry:
owners.append(owner_entry)
return owners
def _ExtractHistogramsFromXmlTree(tree, enums):
"""Extract all <histogram> nodes in the tree into a dictionary."""
# Process the histograms. The descriptions can include HTML tags.
histograms = {}
have_errors = False
last_name = None
for histogram in tree.getElementsByTagName('histogram'):
name = histogram.getAttribute('name')
if last_name is not None and name.lower() < last_name.lower():
logging.error('Histograms %s and %s are not in alphabetical order',
last_name, name)
have_errors = True
last_name = name
if name in histograms:
logging.error('Duplicate histogram definition %s', name)
have_errors = True
continue
histograms[name] = histogram_entry = {}
# Find <owner> tag.
owners = _ExtractOwners(histogram)
if owners:
histogram_entry['owners'] = owners
# Find <summary> tag.
summary_nodes = histogram.getElementsByTagName('summary')
if summary_nodes:
histogram_entry['summary'] = _NormalizeString(
_JoinChildNodes(summary_nodes[0]))
else:
histogram_entry['summary'] = 'TBD'
# Find <obsolete> tag.
obsolete_nodes = histogram.getElementsByTagName('obsolete')
if obsolete_nodes:
reason = _JoinChildNodes(obsolete_nodes[0])
histogram_entry['obsolete'] = reason
# Handle units.
if histogram.hasAttribute('units'):
histogram_entry['units'] = histogram.getAttribute('units')
# Find <details> tag.
details_nodes = histogram.getElementsByTagName('details')
if details_nodes:
histogram_entry['details'] = _NormalizeString(
_JoinChildNodes(details_nodes[0]))
# Handle enum types.
if histogram.hasAttribute('enum'):
enum_name = histogram.getAttribute('enum')
if enum_name not in enums:
logging.error('Unknown enum %s in histogram %s', enum_name, name)
have_errors = True
else:
histogram_entry['enum'] = enums[enum_name]
return histograms, have_errors
# Finds an <obsolete> node amongst |node|'s immediate children and returns its
# content as a string. Returns None if no such node exists.
def _GetObsoleteReason(node):
for child in node.childNodes:
if child.localName == 'obsolete':
# There can be at most 1 obsolete element per node.
return _JoinChildNodes(child)
return None
def _UpdateHistogramsWithSuffixes(tree, histograms):
"""Process <histogram_suffixes> tags and combine with affected histograms.
The histograms dictionary will be updated in-place by adding new histograms
created by combining histograms themselves with histogram_suffixes targeting
these histograms.
Args:
tree: XML dom tree.
histograms: a dictionary of histograms previously extracted from the tree;
Returns:
True if any errors were found.
"""
have_errors = False
histogram_suffix_tag = 'histogram_suffixes'
suffix_tag = 'suffix'
with_tag = 'with-suffix'
# Verify order of histogram_suffixes fields first.
last_name = None
for histogram_suffixes in tree.getElementsByTagName(histogram_suffix_tag):
name = histogram_suffixes.getAttribute('name')
if last_name is not None and name.lower() < last_name.lower():
logging.error('histogram_suffixes %s and %s are not in alphabetical '
'order', last_name, name)
have_errors = True
last_name = name
# histogram_suffixes can depend on other histogram_suffixes, so we need to be
# careful. Make a temporary copy of the list of histogram_suffixes to use as a
# queue. histogram_suffixes whose dependencies have not yet been processed
# will get relegated to the back of the queue to be processed later.
reprocess_queue = []
def GenerateHistogramSuffixes():
for f in tree.getElementsByTagName(histogram_suffix_tag):
yield 0, f
for r, f in reprocess_queue:
yield r, f
for reprocess_count, histogram_suffixes in GenerateHistogramSuffixes():
# Check dependencies first
dependencies_valid = True
affected_histograms = histogram_suffixes.getElementsByTagName(
'affected-histogram')
for affected_histogram in affected_histograms:
histogram_name = affected_histogram.getAttribute('name')
if histogram_name not in histograms:
# Base histogram is missing
dependencies_valid = False
missing_dependency = histogram_name
break
if not dependencies_valid:
if reprocess_count < MAX_HISTOGRAM_SUFFIX_DEPENDENCY_DEPTH:
reprocess_queue.append((reprocess_count + 1, histogram_suffixes))
continue
else:
logging.error('histogram_suffixes %s is missing its dependency %s',
histogram_suffixes.getAttribute('name'),
missing_dependency)
have_errors = True
continue
# If the suffix group has an obsolete tag, all suffixes it generates inherit
# its reason.
group_obsolete_reason = _GetObsoleteReason(histogram_suffixes)
name = histogram_suffixes.getAttribute('name')
suffix_nodes = histogram_suffixes.getElementsByTagName(suffix_tag)
suffix_labels = {}
for suffix in suffix_nodes:
suffix_labels[suffix.getAttribute('name')] = suffix.getAttribute('label')
# Find owners list under current histogram_suffixes tag.
owners = _ExtractOwners(histogram_suffixes)
last_histogram_name = None
for affected_histogram in affected_histograms:
histogram_name = affected_histogram.getAttribute('name')
if (last_histogram_name is not None
and histogram_name.lower() < last_histogram_name.lower()):
logging.error('Affected histograms %s and %s of histogram_suffixes %s '
'are not in alphabetical order',
last_histogram_name, histogram_name, name)
have_errors = True
last_histogram_name = histogram_name
with_suffixes = affected_histogram.getElementsByTagName(with_tag)
if with_suffixes:
suffixes_to_add = with_suffixes
else:
suffixes_to_add = suffix_nodes
for suffix in suffixes_to_add:
suffix_name = suffix.getAttribute('name')
try:
new_histogram_name = _ExpandHistogramNameWithSuffixes(
suffix_name, histogram_name, histogram_suffixes)
if new_histogram_name != histogram_name:
histograms[new_histogram_name] = copy.deepcopy(
histograms[histogram_name])
suffix_label = suffix_labels.get(suffix_name, '')
# TODO(yiyaoliu): Rename these to be consistent with the new naming.
# It is kept unchanged for now to be it's used by dashboards.
if 'fieldtrial_groups' not in histograms[new_histogram_name]:
histograms[new_histogram_name]['fieldtrial_groups'] = []
histograms[new_histogram_name]['fieldtrial_groups'].append(
suffix_name)
if 'fieldtrial_names' not in histograms[new_histogram_name]:
histograms[new_histogram_name]['fieldtrial_names'] = []
histograms[new_histogram_name]['fieldtrial_names'].append(name)
if 'fieldtrial_labels' not in histograms[new_histogram_name]:
histograms[new_histogram_name]['fieldtrial_labels'] = []
histograms[new_histogram_name]['fieldtrial_labels'].append(
suffix_label)
# If no owners are added for this histogram-suffixes, it inherits the
# owners of its parents.
if owners:
histograms[new_histogram_name]['owners'] = owners
# If a suffix has an obsolete node, it's marked as obsolete for the
# specified reason, overwriting its group's obsoletion reason if the
# group itself was obsolete as well.
obsolete_reason = _GetObsoleteReason(suffix)
if not obsolete_reason:
obsolete_reason = group_obsolete_reason
# If the suffix has an obsolete tag, all histograms it generates
# inherit it.
if obsolete_reason:
histograms[new_histogram_name]['obsolete'] = obsolete_reason
except Error:
have_errors = True
return have_errors
def ExtractHistogramsFromFile(file_handle):
"""Compute the histogram names and descriptions from the XML representation.
Args:
file_handle: A file or file-like with XML content.
Returns:
a tuple of (histograms, status) where histograms is a dictionary mapping
histogram names to dictionaries containing histogram descriptions and status
is a boolean indicating if errros were encoutered in processing.
"""
tree = xml.dom.minidom.parse(file_handle)
_NormalizeAllAttributeValues(tree)
enums, enum_errors = _ExtractEnumsFromXmlTree(tree)
histograms, histogram_errors = _ExtractHistogramsFromXmlTree(tree, enums)
update_errors = _UpdateHistogramsWithSuffixes(tree, histograms)
return histograms, enum_errors or histogram_errors or update_errors
def ExtractHistograms(filename):
"""Load histogram definitions from a disk file.
Args:
filename: a file path to load data from.
Returns:
a dictionary of histogram descriptions.
Raises:
Error: if the file is not well-formatted.
"""
with open(filename, 'r') as f:
histograms, had_errors = ExtractHistogramsFromFile(f)
if had_errors:
logging.error('Error parsing %s', filename)
raise Error()
return histograms
def ExtractNames(histograms):
return sorted(histograms.keys())
|
bsd-3-clause
|
MontpellierRessourcesImagerie/openmicroscopy
|
components/tools/OmeroWeb/omeroweb/webclient/controller/__init__.py
|
15
|
2099
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
#
# Copyright (c) 2008-2011 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
#
# Version: 1.0
#
from django.conf import settings
PAGE = settings.PAGE
class BaseController(object):
conn = None
def __init__(self, conn, **kw):
self.conn = conn
def getShareId(self):
return self.conn.getShareId()
###########################################################
# Paging
def doPaging(self, page, page_size, total_size, limit=PAGE):
total = list()
t = (total_size/limit) + (total_size % limit > 0 and 1 or 0)
if total_size > (limit * 10):
if page > 10:
total.append(-1)
for i in range((1, page-9)[page-9 >= 1],
(t+1, page+10)[page+9 < t]):
total.append(i)
if page < t-9:
total.append(-1)
elif total_size > limit and total_size <= (limit*10):
for i in range(1, t+1):
total.append(i)
else:
total.append(1)
next = None
if page_size == limit and (page*limit) < total_size:
next = page + 1
prev = None
if page > 1:
prev = page - 1
if len(total) > 1:
return {'page': page, 'total': total, 'next': next, "prev": prev}
return None
|
gpl-2.0
|
aes/unleash-client-python
|
unleash_client/features.py
|
1
|
1103
|
import logging
log = logging.getLogger(__name__)
def feature_gates(strategies, feature):
tests = []
for args in feature['strategies']:
name, parameters = args['name'], args['parameters']
strategy = strategies.get(name)
if strategy:
test = strategy(**parameters)
tests.append(test)
else:
log.warning('Could not find strategy %r%r', name, parameters)
tests.append(lambda *a, **k: False)
return tests
class Feature:
def __init__(self, strategies, feature):
self.feature = feature
self.enabled = feature['enabled']
self.choices = {False: 0, True: 0}
self.gates = feature_gates(strategies, feature)
def __call__(self, context):
result = self.enabled and any(g(**context) for g in self.gates)
self.choices[result] += 1
return result
def report(self):
result, self.choices = self.choices, {False: 0, True: 0}
log.info('Feature report for %r: %r', self.feature, result)
return {'yes': result[True], 'no': result[False]}
|
apache-2.0
|
hadronproject/lpms
|
lpms/operations/sync.py
|
1
|
2054
|
# Copyright 2009 - 2011 Burak Sezer <[email protected]>
#
# This file is part of lpms
#
# lpms is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# lpms is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with lpms. If not, see <http://www.gnu.org/licenses/>.
import lpms
from lpms import out
from lpms import constants as cst
# FIXME: This module must be rewritten. Object Oriended is nice.
class SyncronizeRepo(object):
def __init__(self):
self.data = None
self.remote = None
self._type = None
def read_conf_file(self):
with open(cst.repo_conf) as data:
self.data = data.read().split("\n")
def run(self, repo):
keyword = "["+repo+"]"
# import repo.conf
self.read_conf_file()
if keyword in self.data:
first = self.data.index(keyword)
for line in self.data[first+1:]:
if line.startswith("["):
continue
if self._type is None and line.startswith("type"):
self._type = line.split("@")[1].strip()
if self._type == 'local':
return
elif self.remote is None and line.startswith("remote"):
self.remote = line.split("@")[1].strip()
if self._type == "git":
from lpms.syncers import git as syncer
lpms.logger.info("synchronizing %s from %s" % (repo, self.remote))
out.notify("synchronizing %s from %s" % (out.color(repo, "green"), self.remote))
syncer.run(repo, self.remote)
|
gpl-3.0
|
galeone/dynamic-training-bench
|
dytb/trainer/utils/flow.py
|
1
|
2027
|
#Copyright (C) 2017 Paolo Galeone <[email protected]>
#
#This Source Code Form is subject to the terms of the Mozilla Public
#License, v. 2.0. If a copy of the MPL was not distributed with this
#file, you can obtain one at http://mozilla.org/MPL/2.0/.
#Exhibit B is not attached; this software is compatible with the
#licenses expressed under Section 1.12 of the MPL v2.
"""Utilities to control to flow execution of the trainers"""
import sys
import tensorflow as tf
from .builders import build_restore_saver
def restore_or_restart(args, paths, sess):
"""Restore actual session or restart the training.
If SESS.checkpoint_path is setted, start a new train
loading the weight from the lastest checkpoint in that path
Args:
sess: session
paths: dict of paths
"""
# first check if exists and checkpoint_path passed
# from where to load the weights.
# Return error if there's not
pretrained_checkpoint = None
if args["checkpoint_path"] != '':
pretrained_checkpoint = tf.train.latest_checkpoint(
args["checkpoint_path"])
if not pretrained_checkpoint:
print("[E] {} not valid".format(args["checkpoint_path"]))
sys.exit(-1)
if not args["force_restart"]:
# continue training checkpoint
continue_checkpoint = tf.train.latest_checkpoint(paths["log"])
if continue_checkpoint:
restore_saver = build_restore_saver(
None, scopes_to_remove=args["exclude_scopes"])
restore_saver.restore(sess, continue_checkpoint)
# else if the continue checkpoint does not exists
# and the pretrained checkpoint has been specified
# load the weights from the pretrained checkpoint
elif pretrained_checkpoint:
restore_saver = build_restore_saver(
[], scopes_to_remove=args["exclude_scopes"])
restore_saver.restore(sess, pretrained_checkpoint)
else:
print('[!] No checkpoint file found')
|
mpl-2.0
|
mojeto/django
|
tests/gis_tests/tests.py
|
22
|
4106
|
import unittest
from django.core.exceptions import ImproperlyConfigured
from django.db import ProgrammingError
try:
from django.contrib.gis.db.backends.postgis.operations import PostGISOperations
HAS_POSTGRES = True
except ImportError:
HAS_POSTGRES = False
except ImproperlyConfigured as e:
# If psycopg is installed but not geos, the import path hits
# django.contrib.gis.geometry.backend which will "helpfully" convert
# an ImportError into an ImproperlyConfigured.
# Here, we make sure we're only catching this specific case and not another
# ImproperlyConfigured one.
if e.args and e.args[0].startswith('Could not import user-defined GEOMETRY_BACKEND'):
HAS_POSTGRES = False
else:
raise
if HAS_POSTGRES:
class FakeConnection:
def __init__(self):
self.settings_dict = {
'NAME': 'test',
}
class FakePostGISOperations(PostGISOperations):
def __init__(self, version=None):
self.version = version
self.connection = FakeConnection()
def _get_postgis_func(self, func):
if func == 'postgis_lib_version':
if self.version is None:
raise ProgrammingError
else:
return self.version
elif func == 'version':
pass
else:
raise NotImplementedError('This function was not expected to be called')
@unittest.skipUnless(HAS_POSTGRES, "The psycopg2 driver is needed for these tests")
class TestPostGISVersionCheck(unittest.TestCase):
"""
The PostGIS version check parses correctly the version numbers
"""
def test_get_version(self):
expect = '1.0.0'
ops = FakePostGISOperations(expect)
actual = ops.postgis_lib_version()
self.assertEqual(expect, actual)
def test_version_classic_tuple(self):
expect = ('1.2.3', 1, 2, 3)
ops = FakePostGISOperations(expect[0])
actual = ops.postgis_version_tuple()
self.assertEqual(expect, actual)
def test_version_dev_tuple(self):
expect = ('1.2.3dev', 1, 2, 3)
ops = FakePostGISOperations(expect[0])
actual = ops.postgis_version_tuple()
self.assertEqual(expect, actual)
def test_valid_version_numbers(self):
versions = [
('1.3.0', 1, 3, 0),
('2.1.1', 2, 1, 1),
('2.2.0dev', 2, 2, 0),
]
for version in versions:
ops = FakePostGISOperations(version[0])
actual = ops.spatial_version
self.assertEqual(version[1:], actual)
def test_invalid_version_numbers(self):
versions = ['nope', '123']
for version in versions:
ops = FakePostGISOperations(version)
with self.assertRaises(Exception):
ops.spatial_version
def test_no_version_number(self):
ops = FakePostGISOperations()
with self.assertRaises(ImproperlyConfigured):
ops.spatial_version
def test_version_dependent_funcs(self):
"""
Resolve names of functions renamed and deprecated in PostGIS 2.2.0
depending on PostGIS version.
Remove when dropping support for PostGIS 2.1.
"""
ops = FakePostGISOperations('2.2.0')
self.assertEqual(ops.spatial_function_name('DistanceSphere'), 'ST_DistanceSphere')
self.assertEqual(ops.spatial_function_name('DistanceSpheroid'), 'ST_DistanceSpheroid')
self.assertEqual(ops.spatial_function_name('LengthSpheroid'), 'ST_LengthSpheroid')
self.assertEqual(ops.spatial_function_name('MemSize'), 'ST_MemSize')
ops = FakePostGISOperations('2.1.0')
self.assertEqual(ops.spatial_function_name('DistanceSphere'), 'ST_distance_sphere')
self.assertEqual(ops.spatial_function_name('DistanceSpheroid'), 'ST_distance_spheroid')
self.assertEqual(ops.spatial_function_name('LengthSpheroid'), 'ST_length_spheroid')
self.assertEqual(ops.spatial_function_name('MemSize'), 'ST_mem_size')
|
bsd-3-clause
|
nseifert/oddities
|
chemistry/harmonic_state_counter.py
|
1
|
7688
|
"""
HARMONIC (VIBRATIONAL) STATE COUNTER
FOR POLYATOMIC MOLECULES.
Implements the state counting algorithm
from:
M. J. H. Kemper, J. M. F. van Dijk, H. M. Buck,
Chem. Phys. Lett., 53 (1), 121.
Note:
State counter doesn't count for ground state,
so calculations of the partition function require
the addition of +1 to get the right number.
"""
__author__ = 'Nathan Seifert'
__license__ = "MIT"
__version__ = "1.0"
# Import statements
import numpy as np
import scipy.stats as spstats
import scipy.misc as misc
from matplotlib import pyplot as pp
# Constant definitions
h = 6.62606957e-34
c = 299792458
R = 8.3144621
kB = 1.3806488e-23
kT_cm = 0.695039 # Boltzmann's constant in cm-1
def state_count(E_min, E_max, modes):
"""
state_count(E_min, E_max, modes):
Given an array or list of harmonic mode frequencies (modes) and
energy bounds E_min and E_max (in units of the harmonic mode frequencies),
state_count() will calculate the number of possible harmonic vibrational
states and return an array of all possible states with their harmonic
energies. The returned array has shape (M,N+1) where M is the number
of available modes between the energy bounds and N is the number of
harmonic modes available in the target molecule. The extra column, the last
column of the returned arary, contains the energies of each possible mode.
"""
min_mode = np.amin(modes)
num_columns = int(np.ceil(E_max/min_mode))
num_modes = len(modes)
#print "NUMBER OF COLUMNS:", num_columns
# Initialize E difference matrix
E_mat = np.zeros((num_modes,num_columns))
for i in range(num_modes):
for j in range(0, num_columns):
E_mat[i,j] = modes[i]
i = 0 # This will be our internal counter for adding quanta.
total_states = 0 # This will be our state counter, not used except for debug
back_counter = 1 # This corresponds to "j" in step (2) of the pseudocode from the paper.
quanta = np.zeros(num_modes) # The quanta-counting array
states = [] # Results of each quanta combination with E_min < total energy < E_max will be placed here.
while back_counter >= 0: # Program terminates when "j" is zero or less, so this is our primary cutoff
E_tot = 0
for m in range(0, num_modes): # Step (1) from paper
if quanta[m] > 0:
for n in range(int(quanta[m])):
E_tot = E_tot + E_mat[m,n]
while i < num_modes:
while E_tot <= E_max:
quanta[i] = quanta[i] + 1
E_tot = E_tot + E_mat[i,quanta[i]-1]
while E_tot < E_min: # "Sum is too low"
quanta[i] = quanta[i] + 1
E_tot = E_tot + E_mat[i,quanta[i]-1]
if E_tot <= E_max:
total_states = total_states + 1
temp = []
for m in range(0,num_modes):
temp.append(quanta[m])
temp.append(E_tot)
states.append(temp)
#print temp
E_tot = E_tot - E_mat[i,quanta[i]-1]
quanta[i] = quanta[i] - 1
i = i + 1
back_counter = i - 2
while back_counter >= 0 and quanta[back_counter] == 0:
back_counter = back_counter - 1
if back_counter >= 0:
quanta[back_counter] = quanta[back_counter] - 1
for m in range(back_counter+1,num_modes):
quanta[m] = 0
i = back_counter + 1
return np.array(states)
def calc_q_vs_beta(T_min, T_max, N, energy_vals, direct=1, harmonic_modes = None):
"""
A general helper function, takes in a range of temperature
values, the complete set of energies for the available states,
and returns a 2D array of ln (q_vib) vs beta (1/kT) values.
Other arguments:
N = number of temperature steps to calculate
direct = if energy_vals is a set of normal mode state energies,
direct = 1. Else direct = 0 if you want to use harmonic mode
frequencies for your q_vib calculation. If direct = 0,
then harmonic_modes must be supplied (energy_vals is not used).
One caveat: State counting is not done in this function, so
energy_vals must contain states with an appropriate range of
thermal availability for the Boltzmann distribution. (e.g.
energy_vals contains energies well larger than kT_max)
"""
temp_range = np.linspace(T_min,T_max,N)
beta_range = np.zeros(N)
for i in range(0,N):
beta_range[i] = (temp_range[i]*kT_cm)**(-1.0)
q_vs_beta = np.zeros((N,2))
for i in range(0, N):
if direct == 1:
part_func = qvib_direct(energy_vals,temp_range[i])
if direct == 0 and harmonic_modes:
part_func = qvib_est(vib_temps,temp_range[i])
q_vs_beta[i,1] = np.log(part_func)
q_vs_beta[i,0] = beta_range[i]
return q_vs_beta
def qvib_direct(energy_vals,temp):
"""
Returns the partition function from an
input array energy_vals and temperature temp.
Energy_vals is expected to be an array of energies
derived from all accessable harmonic states.
"""
return np.sum(np.exp(-1.0*energy_vals/(kT_cm*temp)))+1
def qvib_est(modes,temp):
"""
Returns the estimated partition function
from an input list of harmonic mode frequencies.
"""
vib_temps = modes/kT_cm
vib_part_funcs = (1-np.exp(-vib_temps/temp))**(-1)
return reduce(lambda x, y: x*y, vib_part_funcs)
if __name__ == '__main__':
"""
Below is some test code for NH3
to illustrate the exploitation of the above
functions.
"""
# Normal mode frequencies for NH3
NH3_modes = [950.0,1628.0,1628.0,3414.0,3414.0,3337.0]
T = 1000.0 # Temperature of experiment
kT_temp = T * kT_cm
# states at E_max = 5 kT
states_5 = state_count(0.0,5.0*kT_temp,NH3_modes)
print qvib_direct(states_5[:,-1], T)
# states at E_max = 10 kT
states_10 = state_count(0.0,10.0*kT_temp,NH3_modes)
print qvib_direct(states_10[:,-1], T)
print 'States at 5 kT, T = %d K: %s' %(T,states_5.shape)
print 'States at 10 kT, T = %d K: %s' %(T,states_10.shape)
# Now we'll calculate the average vibrational energy
# of NH3 over a variety of temperatures.
# <E> = kT^2 (d(ln q)/dT)
T_min = 100.0 #K
T_max = 1000.0
N = 100
temp_range = np.linspace(T_min,T_max,N) # Useful for plotting
NH3_states = state_count(0.0, kT_cm*10.0*T_max, NH3_modes)
NH3_q_v_b = calc_q_vs_beta(T_min, T_max, 100, NH3_states[:,-1])
pp.plot(temp_range,NH3_q_v_b[:,1])
pp.show()
# Now we'll calculate the average energy
dx = temp_range[1] - temp_range[0]
avg_energy = np.diff(NH3_q_v_b[:,1])/dx
for i in range(0,N-1):
avg_energy[i] = avg_energy[i] * kT_cm * temp_range[i]**2
# Now we will calculate C_v = d<E>/dT
avg_cv = np.diff(avg_energy)/dx
# Now we can compare this to the C_p data available from NIST Webbook
# We have to subtract 3kT to remove translation and rotation
# and another factor of kT for Cp <--> Cv conversion
def nh3_cp_expt(T):
A = 19.99563 * 0.083593 # Conversion from J/molK --> 1/cmK
B = 49.77119 * 0.083593
C = -15.37599 * 0.083593
D = 1.921168 * 0.083593
E = 0.189174 * 0.083593
return A + B*(T/1000.0) + C*(T/1000.0)**2 + D*(T/1000.0)**3 + E/((T/1000.0)**2) - 4*kT_cm
cv_expt = nh3_cp_expt(temp_range)
pp.plot(temp_range[:N-2],avg_cv,'r') # Plots calculated C_v
pp.plot(temp_range,cv_expt,'b') # Plots expt. fit C_v data
pp.show()
# Plots the difference between expt and calculated C_V
differences = [avg_cv[i] - cv_expt[i] for i in range(0,len(avg_cv))]
pp.plot(temp_range[:N-2],differences)
pp.show()
|
mit
|
openstack/nova
|
nova/tests/functional/regressions/test_bug_1620248.py
|
2
|
1916
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import test
from nova.tests import fixtures as nova_fixtures
class TestServerUpdate(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(TestServerUpdate, self).setUp()
self.useFixture(nova_fixtures.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.GlanceFixture(self))
# Simulate requests coming in before the instance is scheduled by
# using a no-op for conductor build_instances
self.useFixture(nova_fixtures.NoopConductorFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.api
self.useFixture(nova_fixtures.CastAsCallFixture(self))
self.image_id = self.api.get_images()[0]['id']
self.flavor_id = self.api.get_flavors()[0]['id']
def test_update_name_before_scheduled(self):
server = dict(name='server0',
imageRef=self.image_id,
flavorRef=self.flavor_id)
server_id = self.api.post_server({'server': server})['id']
server = {'server': {'name': 'server-renamed'}}
self.api.api_put('/servers/%s' % server_id, server)
server_name = self.api.get_server(server_id)['name']
self.assertEqual('server-renamed', server_name)
|
apache-2.0
|
formiano/enigma2-4.4
|
lib/python/Components/Converter/ServiceInfo.py
|
8
|
8587
|
from Components.Converter.Converter import Converter
from enigma import iServiceInformation, iPlayableService
from Components.Element import cached
from os import path
WIDESCREEN = [3, 4, 7, 8, 0xB, 0xC, 0xF, 0x10]
class ServiceInfo(Converter, object):
HAS_TELETEXT = 1
IS_MULTICHANNEL = 2
AUDIO_STEREO = 3
IS_CRYPTED = 4
IS_WIDESCREEN = 5
IS_NOT_WIDESCREEN = 6
SUBSERVICES_AVAILABLE = 7
XRES = 8
YRES = 9
APID = 10
VPID = 11
PCRPID = 12
PMTPID = 13
TXTPID = 14
TSID = 15
ONID = 16
SID = 17
FRAMERATE = 18
TRANSFERBPS = 19
HAS_HBBTV = 20
AUDIOTRACKS_AVAILABLE = 21
SUBTITLES_AVAILABLE = 22
EDITMODE = 23
IS_STREAM = 24
IS_SD = 25
IS_HD = 26
IS_1080 = 27
IS_720 = 28
IS_576 = 29
IS_480 = 30
def __init__(self, type):
Converter.__init__(self, type)
self.type, self.interesting_events = {
"HasTelext": (self.HAS_TELETEXT, (iPlayableService.evUpdatedInfo,)),
"IsMultichannel": (self.IS_MULTICHANNEL, (iPlayableService.evUpdatedInfo,)),
"IsStereo": (self.AUDIO_STEREO, (iPlayableService.evUpdatedInfo,)),
"IsCrypted": (self.IS_CRYPTED, (iPlayableService.evUpdatedInfo,)),
"IsWidescreen": (self.IS_WIDESCREEN, (iPlayableService.evVideoSizeChanged,)),
"IsNotWidescreen": (self.IS_NOT_WIDESCREEN, (iPlayableService.evVideoSizeChanged,)),
"SubservicesAvailable": (self.SUBSERVICES_AVAILABLE, (iPlayableService.evUpdatedEventInfo,)),
"VideoWidth": (self.XRES, (iPlayableService.evVideoSizeChanged,)),
"VideoHeight": (self.YRES, (iPlayableService.evVideoSizeChanged,)),
"AudioPid": (self.APID, (iPlayableService.evUpdatedInfo,)),
"VideoPid": (self.VPID, (iPlayableService.evUpdatedInfo,)),
"PcrPid": (self.PCRPID, (iPlayableService.evUpdatedInfo,)),
"PmtPid": (self.PMTPID, (iPlayableService.evUpdatedInfo,)),
"TxtPid": (self.TXTPID, (iPlayableService.evUpdatedInfo,)),
"TsId": (self.TSID, (iPlayableService.evUpdatedInfo,)),
"OnId": (self.ONID, (iPlayableService.evUpdatedInfo,)),
"Sid": (self.SID, (iPlayableService.evUpdatedInfo,)),
"Framerate": (self.FRAMERATE, (iPlayableService.evVideoSizeChanged,iPlayableService.evUpdatedInfo,)),
"TransferBPS": (self.TRANSFERBPS, (iPlayableService.evUpdatedInfo,)),
"HasHBBTV": (self.HAS_HBBTV, (iPlayableService.evUpdatedInfo,iPlayableService.evHBBTVInfo,)),
"AudioTracksAvailable": (self.AUDIOTRACKS_AVAILABLE, (iPlayableService.evUpdatedInfo,)),
"SubtitlesAvailable": (self.SUBTITLES_AVAILABLE, (iPlayableService.evUpdatedInfo,)),
"Editmode": (self.EDITMODE, (iPlayableService.evUpdatedInfo,)),
"IsStream": (self.IS_STREAM, (iPlayableService.evUpdatedInfo,)),
"IsSD": (self.IS_SD, (iPlayableService.evVideoSizeChanged,)),
"IsHD": (self.IS_HD, (iPlayableService.evVideoSizeChanged,)),
"Is1080": (self.IS_1080, (iPlayableService.evVideoSizeChanged,)),
"Is720": (self.IS_720, (iPlayableService.evVideoSizeChanged,)),
"Is576": (self.IS_576, (iPlayableService.evVideoSizeChanged,)),
"Is480": (self.IS_480, (iPlayableService.evVideoSizeChanged,)),
}[type]
def getServiceInfoString(self, info, what, convert = lambda x: "%d" % x):
v = info.getInfo(what)
if v == -1:
return "N/A"
if v == -2:
return info.getInfoString(what)
return convert(v)
@cached
def getBoolean(self):
service = self.source.service
info = service and service.info()
if not info:
return False
video_height = None
video_aspect = None
try:
f = open("/proc/stb/vmpeg/0/yres", "r")
video_height = int(f.read(),16)
f.close()
except:
video_height = int(info.getInfo(iServiceInformation.sVideoHeight))
video_aspect = info.getInfo(iServiceInformation.sAspect)
if self.type == self.HAS_TELETEXT:
tpid = info.getInfo(iServiceInformation.sTXTPID)
return tpid != -1
elif self.type in (self.IS_MULTICHANNEL, self.AUDIO_STEREO):
# FIXME. but currently iAudioTrackInfo doesn't provide more information.
audio = service.audioTracks()
if audio:
n = audio.getNumberOfTracks()
idx = 0
while idx < n:
i = audio.getTrackInfo(idx)
description = i.getDescription()
if description in ("AC3", "AC-3", "DTS"):
if self.type == self.IS_MULTICHANNEL:
return True
elif self.type == self.AUDIO_STEREO:
return False
idx += 1
if self.type == self.IS_MULTICHANNEL:
return False
elif self.type == self.AUDIO_STEREO:
return True
return False
elif self.type == self.IS_CRYPTED:
return info.getInfo(iServiceInformation.sIsCrypted) == 1
elif self.type == self.IS_WIDESCREEN:
return video_aspect in WIDESCREEN
elif self.type == self.IS_NOT_WIDESCREEN:
return video_aspect not in WIDESCREEN
elif self.type == self.SUBSERVICES_AVAILABLE:
subservices = service.subServices()
return subservices and subservices.getNumberOfSubservices() > 0
elif self.type == self.HAS_HBBTV:
return info.getInfoString(iServiceInformation.sHBBTVUrl) != ""
elif self.type == self.AUDIOTRACKS_AVAILABLE:
audio = service.audioTracks()
return audio and audio.getNumberOfTracks() > 1
elif self.type == self.SUBTITLES_AVAILABLE:
subtitle = service and service.subtitle()
subtitlelist = subtitle and subtitle.getSubtitleList()
if subtitlelist:
return len(subtitlelist) > 0
return False
elif self.type == self.EDITMODE:
return hasattr(self.source, "editmode") and not not self.source.editmode
elif self.type == self.IS_STREAM:
return service.streamed() is not None
elif self.type == self.IS_SD:
return video_height < 720
elif self.type == self.IS_HD:
return video_height >= 720
elif self.type == self.IS_1080:
return video_height > 1000 and video_height <= 1080
elif self.type == self.IS_720:
return video_height > 700 and video_height <= 720
elif self.type == self.IS_576:
return video_height > 500 and video_height <= 576
elif self.type == self.IS_480:
return video_height > 0 and video_height <= 480
return False
boolean = property(getBoolean)
@cached
def getText(self):
service = self.source.service
info = service and service.info()
if not info:
return ""
if self.type == self.XRES:
video_width = None
if path.exists("/proc/stb/vmpeg/0/xres"):
f = open("/proc/stb/vmpeg/0/xres", "r")
video_width = int(f.read(),16)
f.close()
if not video_width:
video_width = int(self.getServiceInfoString(info, iServiceInformation.sVideoWidth))
return "%d" % video_width
elif self.type == self.YRES:
video_height = None
if path.exists("/proc/stb/vmpeg/0/yres"):
f = open("/proc/stb/vmpeg/0/yres", "r")
video_height = int(f.read(),16)
f.close()
if not video_height:
video_height = int(self.getServiceInfoString(info, iServiceInformation.sVideoHeight))
return "%d" % video_height
elif self.type == self.APID:
return self.getServiceInfoString(info, iServiceInformation.sAudioPID)
elif self.type == self.VPID:
return self.getServiceInfoString(info, iServiceInformation.sVideoPID)
elif self.type == self.PCRPID:
return self.getServiceInfoString(info, iServiceInformation.sPCRPID)
elif self.type == self.PMTPID:
return self.getServiceInfoString(info, iServiceInformation.sPMTPID)
elif self.type == self.TXTPID:
return self.getServiceInfoString(info, iServiceInformation.sTXTPID)
elif self.type == self.TSID:
return self.getServiceInfoString(info, iServiceInformation.sTSID)
elif self.type == self.ONID:
return self.getServiceInfoString(info, iServiceInformation.sONID)
elif self.type == self.SID:
return self.getServiceInfoString(info, iServiceInformation.sSID)
elif self.type == self.FRAMERATE:
return self.getServiceInfoString(info, iServiceInformation.sFrameRate, lambda x: "%d fps" % ((x+500)/1000))
elif self.type == self.TRANSFERBPS:
return self.getServiceInfoString(info, iServiceInformation.sTransferBPS, lambda x: "%d kB/s" % (x/1024))
elif self.type == self.HAS_HBBTV:
return info.getInfoString(iServiceInformation.sHBBTVUrl)
return ""
text = property(getText)
@cached
def getValue(self):
service = self.source.service
info = service and service.info()
if not info:
return -1
if self.type == self.XRES:
return info.getInfo(iServiceInformation.sVideoWidth)
if self.type == self.YRES:
return info.getInfo(iServiceInformation.sVideoHeight)
if self.type == self.FRAMERATE:
return info.getInfo(iServiceInformation.sFrameRate)
return -1
value = property(getValue)
def changed(self, what):
if what[0] != self.CHANGED_SPECIFIC or what[1] in self.interesting_events:
Converter.changed(self, what)
|
gpl-2.0
|
p4datasystems/CarnotKE
|
jyhton/lib-python/2.7/lib-tk/FixTk.py
|
96
|
2938
|
import sys, os
# Delay import _tkinter until we have set TCL_LIBRARY,
# so that Tcl_FindExecutable has a chance to locate its
# encoding directory.
# Unfortunately, we cannot know the TCL_LIBRARY directory
# if we don't know the tcl version, which we cannot find out
# without import Tcl. Fortunately, Tcl will itself look in
# <TCL_LIBRARY>\..\tcl<TCL_VERSION>, so anything close to
# the real Tcl library will do.
# Expand symbolic links on Vista
try:
import ctypes
ctypes.windll.kernel32.GetFinalPathNameByHandleW
except (ImportError, AttributeError):
def convert_path(s):
return s
else:
def convert_path(s):
assert isinstance(s, str) # sys.prefix contains only bytes
udir = s.decode("mbcs")
hdir = ctypes.windll.kernel32.\
CreateFileW(udir, 0x80, # FILE_READ_ATTRIBUTES
1, # FILE_SHARE_READ
None, 3, # OPEN_EXISTING
0x02000000, # FILE_FLAG_BACKUP_SEMANTICS
None)
if hdir == -1:
# Cannot open directory, give up
return s
buf = ctypes.create_unicode_buffer(u"", 32768)
res = ctypes.windll.kernel32.\
GetFinalPathNameByHandleW(hdir, buf, len(buf),
0) # VOLUME_NAME_DOS
ctypes.windll.kernel32.CloseHandle(hdir)
if res == 0:
# Conversion failed (e.g. network location)
return s
s = buf[:res].encode("mbcs")
# Ignore leading \\?\
if s.startswith("\\\\?\\"):
s = s[4:]
if s.startswith("UNC"):
s = "\\" + s[3:]
return s
prefix = os.path.join(sys.prefix,"tcl")
if not os.path.exists(prefix):
# devdir/../tcltk/lib
prefix = os.path.join(sys.prefix, os.path.pardir, "tcltk", "lib")
prefix = os.path.abspath(prefix)
# if this does not exist, no further search is needed
if os.path.exists(prefix):
prefix = convert_path(prefix)
if "TCL_LIBRARY" not in os.environ:
for name in os.listdir(prefix):
if name.startswith("tcl"):
tcldir = os.path.join(prefix,name)
if os.path.isdir(tcldir):
os.environ["TCL_LIBRARY"] = tcldir
# Compute TK_LIBRARY, knowing that it has the same version
# as Tcl
import _tkinter
ver = str(_tkinter.TCL_VERSION)
if "TK_LIBRARY" not in os.environ:
v = os.path.join(prefix, 'tk'+ver)
if os.path.exists(os.path.join(v, "tclIndex")):
os.environ['TK_LIBRARY'] = v
# We don't know the Tix version, so we must search the entire
# directory
if "TIX_LIBRARY" not in os.environ:
for name in os.listdir(prefix):
if name.startswith("tix"):
tixdir = os.path.join(prefix,name)
if os.path.isdir(tixdir):
os.environ["TIX_LIBRARY"] = tixdir
|
apache-2.0
|
hyperized/ansible
|
lib/ansible/modules/monitoring/grafana_plugin.py
|
20
|
8684
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Thierry Sallé (@seuf)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'
}
DOCUMENTATION = '''
---
module: grafana_plugin
author:
- Thierry Sallé (@seuf)
version_added: "2.5"
short_description: Manage Grafana plugins via grafana-cli
description:
- Install and remove Grafana plugins.
- See U(https://grafana.com/docs/plugins/installation/) for upstream documentation.
options:
name:
description:
- Name of the plugin.
required: true
version:
description:
- Version of the plugin to install.
- Defaults to C(latest).
grafana_plugins_dir:
description:
- Directory where the Grafana plugin will be installed.
- If omitted, defaults to C(/var/lib/grafana/plugins).
grafana_repo:
description:
- URL to the Grafana plugin repository.
- "If omitted, grafana-cli will use the default value: U(https://grafana.com/api/plugins)."
grafana_plugin_url:
description:
- Full URL to the plugin zip file instead of downloading the file from U(https://grafana.com/api/plugins).
- Requires grafana 4.6.x or later.
state:
description:
- Whether the plugin should be installed.
choices:
- present
- absent
default: present
'''
EXAMPLES = '''
---
- name: Install/update Grafana piechart panel plugin
grafana_plugin:
name: grafana-piechart-panel
version: latest
state: present
'''
RETURN = '''
---
version:
description: version of the installed/removed/updated plugin.
type: str
returned: always
'''
import base64
import json
import os
from ansible.module_utils.basic import AnsibleModule
__metaclass__ = type
class GrafanaCliException(Exception):
pass
def grafana_cli_bin(params):
'''
Get the grafana-cli binary path with global options.
Raise a GrafanaCliException if the grafana-cli is not present or not in PATH
:param params: ansible module params. Used to fill grafana-cli global params.
'''
program = 'grafana-cli'
grafana_cli = None
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
grafana_cli = program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
grafana_cli = exe_file
break
if grafana_cli is None:
raise GrafanaCliException('grafana-cli binary is not present or not in PATH')
else:
if 'grafana_plugin_url' in params and params['grafana_plugin_url']:
grafana_cli = '{0} {1} {2}'.format(grafana_cli, '--pluginUrl', params['grafana_plugin_url'])
if 'grafana_plugins_dir' in params and params['grafana_plugins_dir']:
grafana_cli = '{0} {1} {2}'.format(grafana_cli, '--pluginsDir', params['grafana_plugins_dir'])
if 'grafana_repo' in params and params['grafana_repo']:
grafana_cli = '{0} {1} {2}'.format(grafana_cli, '--repo', params['grafana_repo'])
if 'validate_certs' in params and params['validate_certs'] is False:
grafana_cli = '{0} {1}'.format(grafana_cli, '--insecure')
return '{0} {1}'.format(grafana_cli, 'plugins')
def get_grafana_plugin_version(module, params):
'''
Fetch grafana installed plugin version. Return None if plugin is not installed.
:param module: ansible module object. used to run system commands.
:param params: ansible module params.
'''
grafana_cli = grafana_cli_bin(params)
rc, stdout, stderr = module.run_command('{0} ls'.format(grafana_cli))
stdout_lines = stdout.split("\n")
for line in stdout_lines:
if line.find(' @ ') != -1:
line = line.rstrip()
plugin_name, plugin_version = line.split(' @ ')
if plugin_name == params['name']:
return plugin_version
return None
def get_grafana_plugin_version_latest(module, params):
'''
Fetch the latest version available from grafana-cli.
Return the newest version number or None not found.
:param module: ansible module object. used to run system commands.
:param params: ansible module params.
'''
grafana_cli = grafana_cli_bin(params)
rc, stdout, stderr = module.run_command('{0} list-versions {1}'.format(grafana_cli,
params['name']))
stdout_lines = stdout.split("\n")
if stdout_lines[0]:
return stdout_lines[0].rstrip()
return None
def grafana_plugin(module, params):
'''
Install update or remove grafana plugin
:param module: ansible module object. used to run system commands.
:param params: ansible module params.
'''
grafana_cli = grafana_cli_bin(params)
if params['state'] == 'present':
grafana_plugin_version = get_grafana_plugin_version(module, params)
if grafana_plugin_version is not None:
if 'version' in params and params['version']:
if params['version'] == grafana_plugin_version:
return {'msg': 'Grafana plugin already installed',
'changed': False,
'version': grafana_plugin_version}
else:
if params['version'] == 'latest' or params['version'] is None:
latest_version = get_grafana_plugin_version_latest(module, params)
if latest_version == grafana_plugin_version:
return {'msg': 'Grafana plugin already installed',
'changed': False,
'version': grafana_plugin_version}
cmd = '{0} update {1}'.format(grafana_cli, params['name'])
else:
cmd = '{0} install {1} {2}'.format(grafana_cli, params['name'], params['version'])
else:
return {'msg': 'Grafana plugin already installed',
'changed': False,
'version': grafana_plugin_version}
else:
if 'version' in params:
if params['version'] == 'latest' or params['version'] is None:
cmd = '{0} install {1}'.format(grafana_cli, params['name'])
else:
cmd = '{0} install {1} {2}'.format(grafana_cli, params['name'], params['version'])
else:
cmd = '{0} install {1}'.format(grafana_cli, params['name'])
else:
cmd = '{0} uninstall {1}'.format(grafana_cli, params['name'])
rc, stdout, stderr = module.run_command(cmd)
if rc == 0:
stdout_lines = stdout.split("\n")
for line in stdout_lines:
if line.find(params['name']):
if line.find(' @ ') != -1:
line = line.rstrip()
plugin_name, plugin_version = line.split(' @ ')
else:
plugin_version = None
return {'msg': 'Grafana plugin {0} installed : {1}'.format(params['name'], cmd),
'changed': True,
'version': plugin_version}
else:
raise GrafanaCliException("'{0}' execution returned an error : [{1}] {2} {3}".format(cmd, rc, stdout, stderr))
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True,
type='str'),
version=dict(type='str'),
grafana_plugins_dir=dict(type='str'),
grafana_repo=dict(type='str'),
grafana_plugin_url=dict(type='str'),
state=dict(choices=['present', 'absent'],
default='present')
),
supports_check_mode=False
)
try:
result = grafana_plugin(module, module.params)
except GrafanaCliException as e:
module.fail_json(
failed=True,
msg="{0}".format(e)
)
return
except Exception as e:
module.fail_json(
failed=True,
msg="{0} : {1} ".format(type(e), e)
)
return
module.exit_json(
failed=False,
**result
)
return
if __name__ == '__main__':
main()
|
gpl-3.0
|
chenkianwee/pyliburo
|
py4design/py2radiance/write_rad.py
|
2
|
6667
|
# ==================================================================================================
#
# Copyright (c) 2016, Chen Kian Wee ([email protected])
#
# This file is part of py4design
#
# py4design is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# py4design is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with py4design. If not, see <http://www.gnu.org/licenses/>.
#
# Authors: Patrick Janssen <[email protected]>
# Chen Kian Wee <[email protected]>
# ==================================================================================================
def surface(name, material, points):
"""
This function writes the surface information into Radiance readable string format.
Parameters
----------
name : str
The name of the surface.
material : str
The name of the material of the surface. The material name must be in the base.rad file.
points : pyptlist
List of points defining the surface. Pyptlist is a list of tuples of floats. A pypt is a tuple that documents the xyz coordinates of a
pt e.g. (x,y,z), thus a pyptlist is a list of tuples e.g. [(x1,y1,z1), (x2,y2,z2), ...]
Returns
-------
rad surface : str
The surface written into radiance readable string.
"""
surface = material + " polygon " + name + "\n"+\
"0\n"+\
"0\n"+\
str(len(points) * 3) + "\n"
for point in points:
surface = surface + " " + str(point[0]) + " " + str(point[1]) + " " + str(point[2]) + "\n"
surface = surface + "\n"
return surface
def glow(name, colour):
"""
This function writes the glow function for Radiance.
Parameters
----------
name: str
The name of glow.
colour: tuple of floats
A tuple of floats describing the colour. e.g. (1,1,1) for a white sky.
Returns
-------
rad glow : str
The glow written into radiance readable string.
"""
glow = "skyfunc glow " + name + "\n"+\
"0\n"+\
"0\n"+\
"4 " +\
str(colour[0])+ " " +\
str(colour[1]) + " " +\
str(colour[2]) + " 0\n"
glow = glow + "\n"
return glow
def source(name, material, direction):
"""
This function writes the source function for Radiance.
Parameters
----------
name: str
The name of source.
material: str
The material of the source, can be either "sky_glow" or "ground_glow".
direction: tuple of floats
A tuple of floats describing the direction of the source. e.g. (0,0,1) for point up.
Returns
-------
rad source : str
The source written into radiance readable string.
"""
source = material + " source " + name + "\n"+\
"0\n"+\
"0\n"+\
"4 " +\
str(direction[0])+ " " +\
str(direction[1]) + " " +\
str(direction[2]) + " 180\n"
source = source + "\n"
return source
def brightfunc(cal_name):
"""
This function writes the brightfunc function for Radiance.
Parameters
----------
cal_name: str
The name of cal.
Returns
-------
rad brightfunc : str
The brightfunc written into radiance readable string.
"""
brightfunc = "void brightfunc skyfunc\n" +\
"2 skybright " + cal_name + "\n"+\
"0\n"+\
"0\n\n"
return brightfunc
def material_glass(name, transmission):
"""
This function writes the Radiance glass material.
Parameters
----------
name: str
The name of glass.
transmission: tuple of floats
A tuple of floats describing the transmission of the glass.
Returns
-------
rad glass : str
The glass written into radiance readable string.
"""
material_glass = "# Glass material\n"+\
"void glass " + name + "\n"+\
"0\n"+\
"0\n"+\
"3 " +\
str(transmission[0])+ " " +\
str(transmission[1]) + " " +\
str(transmission[2]) + "\n\n"
return material_glass
def material_plastic(name, colour, spec, rough):
"""
This function writes the Radiance plastic material.
Parameters
----------
name: str
The name of plastic.
colour: tuple of floats
A tuple of floats describing the colour of the glass.
spec: float
A float describing the specularity of the plastic.
rough: float
A float describing the roughness of the plastic.
Returns
-------
rad plastic : str
The plastic written into radiance readable string.
"""
material_plastic = "# Plastic material\n"+\
"void plastic " + name + "\n"+\
"0\n"+\
"0\n"+\
"5 " +\
str(colour[0])+ " " +\
str(colour[1]) + " " +\
str(colour[2]) + " " +\
str(spec) + " " +\
str(rough) + "\n\n"
return material_plastic
def sensor_file(positions, normals):
"""
This function writes the sensor points and their normals for the Radiance/Daysim simulation.
Parameters
----------
positions: pyptlist
List of positions for sensing. Pyptlist is a list of tuples of floats. A pypt is a tuple that documents the xyz coordinates of a
pt e.g. (x,y,z), thus a pyptlist is a list of tuples e.g. [(x1,y1,z1), (x2,y2,z2), ...]
normals: pyveclist
List of normals of the points sensing. Pyveclist is a list of tuples of floats. A pyvec is a tuple that documents the xyz coordinates of a
direction e.g. (x,y,z), thus a pyveclist is a list of tuples e.g. [(x1,y1,z1), (x2,y2,z2), ...]
Returns
-------
rad sensors : str
The sensors written into radiance readable string.
"""
if not positions or not normals:
raise Exception
if len(positions) != len(normals):
raise Exception
sensors = ""
for i in range(len(positions)):
pos = positions[i]
nor = normals[i]
sensors = sensors + str(pos[0]) + " " + str(pos[1]) + " " + str(pos[2]) + " " + str(nor[0]) + " " + str(nor[1]) + " " + str(nor[2]) + "\n"
return sensors
|
gpl-3.0
|
chai2010/webp
|
internal/libwebp-0.5.0/swig/libwebp.py
|
107
|
6605
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.4
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_libwebp', [dirname(__file__)])
except ImportError:
import _libwebp
return _libwebp
if fp is not None:
try:
_mod = imp.load_module('_libwebp', fp, pathname, description)
finally:
fp.close()
return _mod
_libwebp = swig_import_helper()
del swig_import_helper
else:
import _libwebp
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def WebPGetDecoderVersion():
"""WebPGetDecoderVersion() -> int"""
return _libwebp.WebPGetDecoderVersion()
def WebPGetInfo(*args):
"""WebPGetInfo(uint8_t data) -> (width, height)"""
return _libwebp.WebPGetInfo(*args)
def WebPDecodeRGB(*args):
"""WebPDecodeRGB(uint8_t data) -> (rgb, width, height)"""
return _libwebp.WebPDecodeRGB(*args)
def WebPDecodeRGBA(*args):
"""WebPDecodeRGBA(uint8_t data) -> (rgb, width, height)"""
return _libwebp.WebPDecodeRGBA(*args)
def WebPDecodeARGB(*args):
"""WebPDecodeARGB(uint8_t data) -> (rgb, width, height)"""
return _libwebp.WebPDecodeARGB(*args)
def WebPDecodeBGR(*args):
"""WebPDecodeBGR(uint8_t data) -> (rgb, width, height)"""
return _libwebp.WebPDecodeBGR(*args)
def WebPDecodeBGRA(*args):
"""WebPDecodeBGRA(uint8_t data) -> (rgb, width, height)"""
return _libwebp.WebPDecodeBGRA(*args)
def WebPGetEncoderVersion():
"""WebPGetEncoderVersion() -> int"""
return _libwebp.WebPGetEncoderVersion()
def wrap_WebPEncodeRGB(*args):
"""private, do not call directly."""
return _libwebp.wrap_WebPEncodeRGB(*args)
def wrap_WebPEncodeBGR(*args):
"""private, do not call directly."""
return _libwebp.wrap_WebPEncodeBGR(*args)
def wrap_WebPEncodeRGBA(*args):
"""private, do not call directly."""
return _libwebp.wrap_WebPEncodeRGBA(*args)
def wrap_WebPEncodeBGRA(*args):
"""private, do not call directly."""
return _libwebp.wrap_WebPEncodeBGRA(*args)
def wrap_WebPEncodeLosslessRGB(*args):
"""private, do not call directly."""
return _libwebp.wrap_WebPEncodeLosslessRGB(*args)
def wrap_WebPEncodeLosslessBGR(*args):
"""private, do not call directly."""
return _libwebp.wrap_WebPEncodeLosslessBGR(*args)
def wrap_WebPEncodeLosslessRGBA(*args):
"""private, do not call directly."""
return _libwebp.wrap_WebPEncodeLosslessRGBA(*args)
def wrap_WebPEncodeLosslessBGRA(*args):
"""private, do not call directly."""
return _libwebp.wrap_WebPEncodeLosslessBGRA(*args)
_UNUSED = 1
def WebPEncodeRGB(rgb, width, height, stride, quality_factor):
"""WebPEncodeRGB(uint8_t rgb, int width, int height, int stride, float quality_factor) -> lossy_webp"""
webp = wrap_WebPEncodeRGB(
rgb, _UNUSED, _UNUSED, width, height, stride, quality_factor)
if len(webp[0]) == 0:
return None
return webp[0]
def WebPEncodeRGBA(rgb, width, height, stride, quality_factor):
"""WebPEncodeRGBA(uint8_t rgb, int width, int height, int stride, float quality_factor) -> lossy_webp"""
webp = wrap_WebPEncodeRGBA(
rgb, _UNUSED, _UNUSED, width, height, stride, quality_factor)
if len(webp[0]) == 0:
return None
return webp[0]
def WebPEncodeBGR(rgb, width, height, stride, quality_factor):
"""WebPEncodeBGR(uint8_t rgb, int width, int height, int stride, float quality_factor) -> lossy_webp"""
webp = wrap_WebPEncodeBGR(
rgb, _UNUSED, _UNUSED, width, height, stride, quality_factor)
if len(webp[0]) == 0:
return None
return webp[0]
def WebPEncodeBGRA(rgb, width, height, stride, quality_factor):
"""WebPEncodeBGRA(uint8_t rgb, int width, int height, int stride, float quality_factor) -> lossy_webp"""
webp = wrap_WebPEncodeBGRA(
rgb, _UNUSED, _UNUSED, width, height, stride, quality_factor)
if len(webp[0]) == 0:
return None
return webp[0]
def WebPEncodeLosslessRGB(rgb, width, height, stride):
"""WebPEncodeLosslessRGB(uint8_t rgb, int width, int height, int stride) -> lossless_webp"""
webp = wrap_WebPEncodeLosslessRGB(rgb, _UNUSED, _UNUSED, width, height, stride)
if len(webp[0]) == 0:
return None
return webp[0]
def WebPEncodeLosslessRGBA(rgb, width, height, stride):
"""WebPEncodeLosslessRGBA(uint8_t rgb, int width, int height, int stride) -> lossless_webp"""
webp = wrap_WebPEncodeLosslessRGBA(rgb, _UNUSED, _UNUSED, width, height, stride)
if len(webp[0]) == 0:
return None
return webp[0]
def WebPEncodeLosslessBGR(rgb, width, height, stride):
"""WebPEncodeLosslessBGR(uint8_t rgb, int width, int height, int stride) -> lossless_webp"""
webp = wrap_WebPEncodeLosslessBGR(rgb, _UNUSED, _UNUSED, width, height, stride)
if len(webp[0]) == 0:
return None
return webp[0]
def WebPEncodeLosslessBGRA(rgb, width, height, stride):
"""WebPEncodeLosslessBGRA(uint8_t rgb, int width, int height, int stride) -> lossless_webp"""
webp = wrap_WebPEncodeLosslessBGRA(rgb, _UNUSED, _UNUSED, width, height, stride)
if len(webp[0]) == 0:
return None
return webp[0]
# This file is compatible with both classic and new-style classes.
|
bsd-3-clause
|
haroldl/homeworklog
|
django/contrib/gis/gdal/feature.py
|
321
|
3998
|
# The GDAL C library, OGR exception, and the Field object
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import OGRException, OGRIndexError
from django.contrib.gis.gdal.field import Field
from django.contrib.gis.gdal.geometries import OGRGeometry, OGRGeomType
from django.contrib.gis.gdal.srs import SpatialReference
# ctypes function prototypes
from django.contrib.gis.gdal.prototypes import ds as capi, geom as geom_api
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_F_* routines are relevant here.
class Feature(GDALBase):
"A class that wraps an OGR Feature, needs to be instantiated from a Layer object."
#### Python 'magic' routines ####
def __init__(self, feat, fdefn):
"Initializes on the pointers for the feature and the layer definition."
if not feat or not fdefn:
raise OGRException('Cannot create OGR Feature, invalid pointer given.')
self.ptr = feat
self._fdefn = fdefn
def __del__(self):
"Releases a reference to this object."
if self._ptr: capi.destroy_feature(self._ptr)
def __getitem__(self, index):
"""
Gets the Field object at the specified index, which may be either
an integer or the Field's string label. Note that the Field object
is not the field's _value_ -- use the `get` method instead to
retrieve the value (e.g. an integer) instead of a Field instance.
"""
if isinstance(index, basestring):
i = self.index(index)
else:
if index < 0 or index > self.num_fields:
raise OGRIndexError('index out of range')
i = index
return Field(self.ptr, i)
def __iter__(self):
"Iterates over each field in the Feature."
for i in xrange(self.num_fields):
yield self[i]
def __len__(self):
"Returns the count of fields in this feature."
return self.num_fields
def __str__(self):
"The string name of the feature."
return 'Feature FID %d in Layer<%s>' % (self.fid, self.layer_name)
def __eq__(self, other):
"Does equivalence testing on the features."
return bool(capi.feature_equal(self.ptr, other._ptr))
#### Feature Properties ####
@property
def fid(self):
"Returns the feature identifier."
return capi.get_fid(self.ptr)
@property
def layer_name(self):
"Returns the name of the layer for the feature."
return capi.get_feat_name(self._fdefn)
@property
def num_fields(self):
"Returns the number of fields in the Feature."
return capi.get_feat_field_count(self.ptr)
@property
def fields(self):
"Returns a list of fields in the Feature."
return [capi.get_field_name(capi.get_field_defn(self._fdefn, i))
for i in xrange(self.num_fields)]
@property
def geom(self):
"Returns the OGR Geometry for this Feature."
# Retrieving the geometry pointer for the feature.
geom_ptr = capi.get_feat_geom_ref(self.ptr)
return OGRGeometry(geom_api.clone_geom(geom_ptr))
@property
def geom_type(self):
"Returns the OGR Geometry Type for this Feture."
return OGRGeomType(capi.get_fd_geom_type(self._fdefn))
#### Feature Methods ####
def get(self, field):
"""
Returns the value of the field, instead of an instance of the Field
object. May take a string of the field name or a Field object as
parameters.
"""
field_name = getattr(field, 'name', field)
return self[field_name].value
def index(self, field_name):
"Returns the index of the given field name."
i = capi.get_field_index(self.ptr, field_name)
if i < 0: raise OGRIndexError('invalid OFT field name given: "%s"' % field_name)
return i
|
bsd-3-clause
|
xrmx/django
|
tests/get_or_create/models.py
|
90
|
1324
|
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Person(models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
birthday = models.DateField()
def __str__(self):
return '%s %s' % (self.first_name, self.last_name)
class DefaultPerson(models.Model):
first_name = models.CharField(max_length=100, default="Anonymous")
class ManualPrimaryKeyTest(models.Model):
id = models.IntegerField(primary_key=True)
data = models.CharField(max_length=100)
class Profile(models.Model):
person = models.ForeignKey(Person, primary_key=True)
class Tag(models.Model):
text = models.CharField(max_length=255, unique=True)
class Thing(models.Model):
name = models.CharField(max_length=256)
tags = models.ManyToManyField(Tag)
class Publisher(models.Model):
name = models.CharField(max_length=100)
class Author(models.Model):
name = models.CharField(max_length=100)
class Book(models.Model):
name = models.CharField(max_length=100)
authors = models.ManyToManyField(Author, related_name='books')
publisher = models.ForeignKey(Publisher, related_name='books', db_column="publisher_id_column")
|
bsd-3-clause
|
rdelval/aurora
|
src/test/python/apache/aurora/client/cli/test_inspect.py
|
5
|
6730
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import json
from mock import patch
from apache.aurora.client.cli.client import AuroraCommandLine
from apache.aurora.config import AuroraConfig
from apache.aurora.config.schema.base import Job
from apache.thermos.config.schema_base import MB, Process, Resources, Task
from .util import AuroraClientCommandTest
from gen.apache.aurora.api.constants import AURORA_EXECUTOR_NAME
class TestInspectCommand(AuroraClientCommandTest):
def get_job_config(self):
return AuroraConfig(job=Job(
cluster='west',
role='bozo',
environment='test',
name='the_job',
service=False,
task=Task(
name='task',
processes=[Process(cmdline='ls -la', name='process')],
resources=Resources(cpu=1.0, ram=1024 * MB, disk=1024 * MB)
),
contact='[email protected]',
instances=3,
cron_schedule='* * * * *'
))
def test_inspect_job(self):
mock_stdout = []
def mock_print_out(msg, indent=0):
indent_str = " " * indent
mock_stdout.append("%s%s" % (indent_str, msg))
with contextlib.nested(
patch('apache.aurora.client.cli.context.AuroraCommandContext.print_out',
side_effect=mock_print_out),
patch('apache.aurora.client.cli.context.AuroraCommandContext.get_job_config',
return_value=self.get_job_config())):
cmd = AuroraCommandLine()
assert cmd.execute(['job', 'inspect', 'west/bozo/test/hello', 'config.aurora']) == 0
output = '\n'.join(mock_stdout)
assert output == '''Job level information
name: 'the_job'
role: 'bozo'
contact: '[email protected]'
cluster: 'west'
instances: '3'
cron:
schedule: '* * * * *'
policy: 'KILL_EXISTING'
service: False
production: False
Task level information
name: 'task'
Process 'process':
cmdline:
ls -la
'''
def test_inspect_job_in_json(self):
mock_stdout = []
def mock_print_out(msg):
mock_stdout.append("%s" % msg)
with contextlib.nested(
patch('apache.aurora.client.cli.context.AuroraCommandContext.print_out',
side_effect=mock_print_out),
patch('apache.aurora.client.cli.context.AuroraCommandContext.get_job_config',
return_value=self.get_job_config())):
cmd = AuroraCommandLine()
assert cmd.execute([
'job', 'inspect', '--write-json', 'west/bozo/test/hello', 'config.aurora']) == 0
output = {
"environment": "test",
"health_check_config": {
"initial_interval_secs": 15.0,
"health_checker": {
"http": {
"expected_response_code": 0,
"endpoint": "/health",
"expected_response": "ok"}},
"interval_secs": 10.0,
"timeout_secs": 1.0,
"max_consecutive_failures": 0,
"min_consecutive_successes": 1},
"cluster": "west",
"cron_schedule": "* * * * *",
"service": False,
"update_config": {
"wait_for_batch_completion": False,
"batch_size": 1,
"watch_secs": 45,
"rollback_on_failure": True,
"max_per_shard_failures": 0,
"max_total_failures": 0,
"sla_aware": False},
"name": "the_job",
"max_task_failures": 1,
"cron_collision_policy": "KILL_EXISTING",
"enable_hooks": False,
"instances": 3,
"task": {
"processes": [{
"daemon": False,
"name": "process",
"ephemeral": False,
"max_failures": 1,
"min_duration": 5,
"cmdline": "ls -la",
"final": False}],
"name": "task",
"finalization_wait": 30,
"max_failures": 1,
"max_concurrency": 0,
"resources": {
"gpu": 0,
"disk": 1073741824,
"ram": 1073741824,
"cpu": 1.0},
"constraints": []},
"production": False,
"role": "bozo",
"contact": "[email protected]",
"executor_config": {
"name": AURORA_EXECUTOR_NAME,
"data": ""
},
"metadata": [],
"lifecycle": {
"http": {
"graceful_shutdown_endpoint": "/quitquitquit",
"port": "health",
"shutdown_endpoint": "/abortabortabort",
"graceful_shutdown_wait_secs": 5,
"shutdown_wait_secs": 5}},
"priority": 0}
mock_output = "\n".join(mock_stdout)
assert output == json.loads(mock_output)
def test_inspect_job_raw(self):
mock_stdout = []
def mock_print_out(msg, indent=0):
indent_str = " " * indent
mock_stdout.append("%s%s" % (indent_str, msg))
job_config = self.get_job_config()
with contextlib.nested(
patch('apache.aurora.client.cli.context.AuroraCommandContext.print_out',
side_effect=mock_print_out),
patch('apache.aurora.client.cli.context.AuroraCommandContext.get_job_config',
return_value=job_config)):
cmd = AuroraCommandLine()
assert cmd.execute(['job', 'inspect', '--raw', 'west/bozo/test/hello', 'config.aurora']) == 0
output = '\n'.join(mock_stdout)
# It's impossible to assert string equivalence of two objects with nested un-hashable types.
# Given that the only product of --raw flag is the thrift representation of AuroraConfig
# it's enough to do a spot check here and let thrift.py tests validate the structure.
assert 'TaskConfig' in output
# AURORA-990: Prevent regression of client passing invalid arguments to print_out.
# Since print_out is the final layer before print(), there's not much else we can do than
# ensure the command exits normally.
def test_inspect_job_raw_success(self):
with patch('apache.aurora.client.cli.context.AuroraCommandContext.get_job_config',
return_value=self.get_job_config()):
cmd = AuroraCommandLine()
assert cmd.execute(['job', 'inspect', '--raw', 'west/bozo/test/hello', 'config.aurora']) == 0
|
apache-2.0
|
pwillworth/galaxyharvester
|
html/markUnavailable.py
|
1
|
4274
|
#!/usr/bin/env python3
"""
Copyright 2020 Paul Willworth <[email protected]>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
from http import cookies
import dbSession
import dbShared
import ghShared
import cgi
import pymysql
def removeSpawn(spawnID, planets, userID, galaxy):
markAll = 0
try:
conn = dbShared.ghConn()
cursor = conn.cursor()
except Exception:
result = "Error: could not connect to database"
if (cursor):
if (planets == "all"):
markAll = 1
sqlStr = "UPDATE tResourcePlanet SET unavailable=NOW(), unavailableBy='" + userID + "' WHERE spawnID=" + str(spawnID) + ";"
else:
# try to look up planet by name if an ID was not provided
if (planets.isdigit() != True):
planets = dbShared.getPlanetID(planets)
sqlStr = "UPDATE tResourcePlanet SET unavailable=NOW(), unavailableBy='" + userID + "' WHERE spawnID=" + str(spawnID) + " AND planetID=" + str(planets) + ";"
# Only allow removal if user has positive reputation
stats = dbShared.getUserStats(userID, galaxy).split(",")
admin = dbShared.getUserAdmin(conn, userID, galaxy)
cursor.execute("SELECT enteredBy, unavailable FROM tResources WHERE spawnID=%s;", [spawnID])
row = cursor.fetchone()
if int(stats[2]) < ghShared.MIN_REP_VALS['REMOVE_RESOURCE'] and row[0] != userID and not admin:
result = "Error: You must earn a little reputation on the site before you can remove resources. Try adding or verifying some first. \r\n"
elif row[1] != None:
result = "Error: You cannot remove that resource because it is already removed."
else:
cursor.execute(sqlStr)
# add cleanup event
if not planets.isdigit():
planets = 0
dbShared.logEvent("INSERT INTO tResourceEvents (galaxy, spawnID, userID, eventTime, eventType, planetID) VALUES (" + str(galaxy) + "," + str(spawnID) + ",'" + userID + "',NOW(),'r'," + str(planets) + ");", 'r', userID, galaxy, str(spawnID))
result = "Spawn marked unavailable."
cursor.close()
else:
result = "Error: Could not connect to database"
conn.close()
return result
def main():
# Get current url
try:
url = os.environ['SCRIPT_NAME']
except KeyError:
url = ''
form = cgi.FieldStorage()
# Get Cookies
useCookies = 1
C = cookies.SimpleCookie()
try:
C.load(os.environ['HTTP_COOKIE'])
except KeyError:
useCookies = 0
if useCookies:
try:
currentUser = C['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = C['loginAttempt'].value
except KeyError:
loginResult = 'success'
try:
sid = C['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
else:
currentUser = ''
sid = form.getfirst('gh_sid', '')
spawnName = form.getfirst('spawn', '')
galaxy = form.getfirst('galaxy', '')
planets = form.getfirst('planets', '')
# escape input to prevent sql injection
sid = dbShared.dbInsertSafe(sid)
spawnName = dbShared.dbInsertSafe(spawnName)
galaxy = dbShared.dbInsertSafe(galaxy)
planets = dbShared.dbInsertSafe(planets)
# Get a session
logged_state = 0
sess = dbSession.getSession(sid)
if (sess != ''):
logged_state = 1
currentUser = sess
# Main program
print('Content-type: text/html\n')
if (logged_state > 0):
if (dbShared.galaxyState(galaxy) == 1):
spawnID = dbShared.getSpawnID(spawnName, galaxy)
result = removeSpawn(spawnID, planets, currentUser, galaxy)
else:
result = "Error: That Galaxy is Inactive."
else:
result = "Error: You must be logged in to mark a resource unavailable."
print(result)
if (result.find("Error:") > -1):
sys.exit(500)
else:
sys.exit(200)
if __name__ == "__main__":
main()
|
gpl-3.0
|
micfan/dinner
|
src/apps/oauth2/views.py
|
1
|
20543
|
#coding=utf-8
__author__ = 'laonan, http://laonan.net'
'''
调用weibo api例子, 将API的“/”变为“__”,并传入关键字参数,但不包括source和access_token参数:
client.get.statuses__user_timeline()
client.post.statuses__update(status=u'测试OAuth 2.0发微博')
f = open('/Users/Alan/Workspace/dongting/static/images/player_bg.png')
client.upload.statuses__upload(status=u'测试OAuth 2.0带图片发微博', pic=f)
f.close()
'''
import json
from django.core.urlresolvers import reverse
from django.contrib.auth import authenticate, login as auth_login
from apps.oauth2.models import WeiboUser, QQUser
from django.contrib.auth.models import User,Group
def LevelRule():
pass
def update_user_points():
pass
def gen_random():
pass
def generate_key():
pass
def format_baosteel_ip():
pass
# from django.contrib.sites.models import get_current_site
def get_current_site():
pass
from django.contrib.auth.views import login as auth_login_view
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from weibo import APIClient
from qq import OauthVars, OauthClient
def is_captcha_match():
pass
# production env config: [email protected]
#APP_KEY = '1064269280' # app key
#APP_SECRET = 'd8649d60c61ee42bd397537caad25343' # app secret
# test config:[email protected]
APP_KEY = '3727112766' # app key
APP_SECRET = 'cd1ce0ba3ce8c7e463019c976533af60' # app secret
def weibo_login(request):
client = APIClient(app_key=APP_KEY, app_secret=APP_SECRET, redirect_uri=_get_weibo_callback_url(request))
url = client.get_authorize_url()
return HttpResponseRedirect(url)
def weibo_auth(request):
# 获取URL参数code:
code = request.GET.get('code')
client = APIClient(app_key=APP_KEY, app_secret=APP_SECRET, redirect_uri=_get_weibo_callback_url(request))
token_obj = client.request_access_token(code)
client.set_access_token(token_obj.access_token, token_obj.expires_in)
if request.session.has_key('oauth_access_token'):
del request.session['oauth_access_token']
request.session['oauth_access_token'] = { 'uid' : token_obj.uid, 'access_token' : token_obj.access_token, 'expires_in' : token_obj.expires_in}
oauth_access_token = request.session.get('oauth_access_token', None)
#跳转到首页
back_to_url = reverse('home.views.index')
if token_obj:
try:
w_user = WeiboUser.objects.get(weibo_user_id=oauth_access_token['uid'])
user = authenticate(weibo_username=w_user.user.username)
if user and user.is_active:
auth_login(request,user)
except WeiboUser.DoesNotExist:
back_to_url = reverse('sns_api.views.bind_weibo_user')
return HttpResponseRedirect(back_to_url)
def bind_weibo_user(request):
oauth_access_token = request.session.get('oauth_access_token', None)
client = APIClient(app_key=APP_KEY, app_secret=APP_SECRET, redirect_uri=_get_weibo_callback_url(request))
client.set_access_token(oauth_access_token['access_token'], oauth_access_token['expires_in'])
weibo_user = client.get.users__show(uid=oauth_access_token['uid'])
weibo_username = weibo_user.screen_name
template_var = dict()
template_var['weibo_username'] = weibo_username
def auth_user():
#为其授权到登录状态
w_user = WeiboUser.objects.get(weibo_user_id=oauth_access_token['uid'])
user = authenticate(weibo_username=w_user.user.username)
if user and user.is_active: auth_login(request,user)
#发条微博====> ok
if bao_msg: client.post.statuses__update(status=bao_msg[0:140])
result['uname'] = weibo_username if weibo_username else None
result['binded'] = 1
if request.method == 'POST':
result = dict()
bao_id = request.POST.get('wbind_id', None) #宝时达用户名或注册邮箱
bao_pwd = request.POST.get('wbind_pwd', None)
bao_msg = request.POST.get('wbind_msg', None)
if bao_id:
try:
w_user = WeiboUser.objects.get(weibo_username=weibo_username)
if w_user and (w_user.user.username == bao_id or w_user.user.email == bao_id):
result['rebind'] = 1
#授权进入
w_user = WeiboUser.objects.get(weibo_user_id=oauth_access_token['uid'])
user = authenticate(weibo_username=w_user.user.username)
if user and user.is_active: auth_login(request,user)
except WeiboUser.DoesNotExist:
try:
user = User.objects.get(email=bao_id)
result['wbind_id'] = 1
result['wbind_pwd'] = (1 if authenticate(username=bao_id, password=bao_pwd) or authenticate(email=bao_id, password=bao_pwd) else 0)
#bind
if result['wbind_id'] and result['wbind_pwd']:
WeiboUser(
user = user,
weibo_user_id = oauth_access_token['uid'],
weibo_username = weibo_username,
oauth_access_token = oauth_access_token['access_token']
).save()
auth_user() #授权登录
except User.DoesNotExist:
user = User.objects.get(username=bao_id)
result['wbind_id'] = 1
result['wbind_pwd'] = (1 if authenticate(username=bao_id, password=bao_pwd) or authenticate(email=bao_id, password=bao_pwd) else 0)
#bind
if result['wbind_id'] and result['wbind_pwd']:
WeiboUser(
user = user,
weibo_user_id = oauth_access_token['uid'],
weibo_username = weibo_username,
oauth_access_token = oauth_access_token['access_token']
).save()
auth_user() #为其授权登录
except User.DoesNotExist:
result['wbind_id'] = 0 #id不存在
result['wbind_pwd'] = None #json解析为null值,不显示密码提示
else:
result['wbind_id'] = None
if not bao_pwd:
result['wbind_pwd'] = None
result['wbind_msg'] = 1 if bao_msg else None
return HttpResponse(json.dumps(result), 'application/json')
else:
return auth_login_view(request, template_name='sns/bind_weibo_user.html', extra_context=template_var)
def create_user_from_weibo(request, template_name='sns/create_user_from_weibo.html'):
oauth_access_token = request.session.get('oauth_access_token', None)
client = APIClient(app_key=APP_KEY, app_secret=APP_SECRET, redirect_uri=_get_weibo_callback_url(request))
client.set_access_token(oauth_access_token['access_token'], oauth_access_token['expires_in'])
weibo_user = client.get.users__show(uid=oauth_access_token['uid'])
weibo_username = weibo_user.screen_name
json_result = dict()
template_var = dict()
template_var['weibo_username'] = weibo_username
if request.user.is_authenticated() or oauth_access_token is None:
return HttpResponseRedirect(reverse('home.views.index'))
try:
w_user = WeiboUser.objects.get(weibo_username=weibo_username)
if w_user:
json_result['wuser_existed'] = '您已经注册了宝时达帐号并绑定到微博,请直接登录'
except WeiboUser.DoesNotExist:
if request.method == 'POST':
get = lambda fieldId: request.POST.get(fieldId, None)
reg_email = get('reg_email')
reg_password = get('reg_password')
reg_re_password = get('reg_re_password')
reg_company = get('reg_company')
reg_tax_code = get('reg_tax_code')
reg_contact_man = get('reg_contact_man')
reg_mobile = get('reg_mobile')
reg_survey = get('reg_survey')
reg_survey_affix = get('reg_survey_affix')
reg_input_captcha = get('reg_input_captcha')
if reg_email: json_result['reg_email'] = not User.objects.filter(email = reg_email).count()
if reg_password and reg_re_password: json_result['reg_re_password'] = True if reg_re_password == reg_password else False
if reg_company: json_result['reg_company'] = not Company.objects.filter(name = reg_company).count()
if reg_tax_code: json_result['reg_tax_code'] = True
if reg_contact_man: json_result['reg_contact_man'] = True
if reg_mobile: json_result['reg_mobile'] = True
if reg_survey: json_result['reg_survey'] = True
if reg_survey_affix: json_result['reg_survey_affix'] = True
if reg_input_captcha: json_result['reg_input_captcha'] = (is_captcha_match(reg_input_captcha, request.session.get('captcha', None)))
calculate = 0 #初始化计算器
for i in json_result.values():
calculate += i
if calculate >= 8:
if request.META.has_key('HTTP_X_FORWARDED_FOR'):
ip = format_baosteel_ip(request.META['HTTP_X_FORWARDED_FOR'])
else:
ip = request.META['REMOTE_ADDR']
username = 'GM_' + gen_random(4,'mix')
user = User.objects.create_user(username=username, email=reg_email, password=reg_password)
user.is_active = True
user.save()
company = Company(name=reg_company, organization_code='0000000000', code='111111111', contact=user,
corporation_tax=reg_tax_code)
company.save()
user_profile = UserProfile(user=user, company=company, mobile=reg_mobile, register_ip=ip, latest_login_ip=ip)
user_profile.save()
#init company level & level_name ===>ok
update_user_points(request,user_id=user.id, code='init')
reg_survey = RegisterSurvey(user=user, source_code=reg_survey, source_desc=reg_survey_affix)
reg_survey.save()
g = Group.objects.get(name='menu_buyer_default')
g.user_set.add(user)
#绑定微博
WeiboUser(
user = user,
weibo_user_id = oauth_access_token['uid'],
weibo_username = weibo_username,
oauth_access_token = oauth_access_token['access_token']
).save()
#为其授权到登录状态
w_user = WeiboUser.objects.get(weibo_user_id=oauth_access_token['uid'])
user = authenticate(weibo_username=w_user.user.username)
if user and user.is_active:
auth_login(request,user)
json_result['reg_user_id'] = user.id
return HttpResponse(json.dumps(json_result), 'application/json')
return render_to_response(template_name, template_var, context_instance=RequestContext(request))
def _post_weibo(request, msg, url=None):
oauth_access_token = request.session.get('oauth_access_token', None)
if oauth_access_token:
client = APIClient(app_key=APP_KEY, app_secret=APP_SECRET, redirect_uri=_get_weibo_callback_url(request))
client.set_access_token(oauth_access_token['access_token'], oauth_access_token['expires_in'])
if url:
short_url = client.get.short_url__shorten(url_long=url)['urls'][0]['url_short']
else:
short_url = ''
message = msg + short_url
if len(message) > 140:
msg_count = 140 - len(short_url)
message = msg[0:msg_count] + short_url
client.post.statuses__update(status=message)
# call back url
def _get_weibo_callback_url(request):
current_site = get_current_site(request)
domain = current_site.domain
url = 'http://%s%s' %(domain, reverse('sns_weibo_login_done'))
return url
# ======================= qq =====================
def qq_login(request):
client = OauthClient()
csrf = client.gen_csrf()
url = client.get_authorize_url(csrf)
return HttpResponseRedirect(url)
def qq_auth(request):
code = request.GET.get('code') # 获取URL参数code
client = OauthClient()
token = client.request_token(code) #request_token() => dict
openid = client.request_openid() #qq用户openid
if request.session.has_key('qq_oauth'): del request.session['qq_oauth']
request.session['qq_oauth'] = {'openid': openid, 'access_token': token['access_token'], 'expires_in': token['expires_in']}
qq_oauth = request.session.get('qq_oauth', None)
back_to_url = reverse('home.views.index')
if token:
try:
q_user = QQUser.objects.get(qq_user_id=qq_oauth['openid'])
user = authenticate(qq_nickname=q_user.user.username)
if user and user.is_active: auth_login(request,user)
except QQUser.DoesNotExist:
back_to_url = reverse('sns_api.views.bind_qq_user')#www.baostar.com:8000/account/login/qq/user/bind/
return HttpResponseRedirect(back_to_url)
def bind_qq_user(request, template_name):
template_var = dict()
result = dict()
token = request.session.get('qq_oauth', None)['access_token']
openid = request.session.get('qq_oauth', None)['openid']
client = OauthClient(token,openid)
client.init_openapi_url(token=token, openid=openid)
qq_user = client.request_openapi(api_path_str='user/get_user_info')
qq_nickname = qq_user['nickname']
template_var['qq_nickname'] = qq_nickname
def insert_qq_user(user,openid,qq_nickname, token):
QQUser( user = user,
qq_user_id = openid,
qq_nickname = qq_nickname,
oauth_access_token = token
).save()
#为其授权到登录状态
q_user = QQUser.objects.get(qq_user_id=openid)
user = authenticate(qq_nickname=q_user.user.username)
if user and user.is_active: auth_login(request,user)
result['binded'] = 1 #ajax插入user成功,回调跳转条件
if request.method == 'POST':
bao_id = request.POST.get('qbind_id', None) #宝时达用户名或注册邮箱
bao_pwd = request.POST.get('qbind_pwd', None)
if bao_id:
try:
q_user = QQUser.objects.get(qq_nickname=qq_nickname)
if q_user and (q_user.user.username == bao_id or q_user.user.email == bao_id):
result['rebind'] = 1
insert_qq_user(q_user.user, openid, qq_nickname, token)
except QQUser.DoesNotExist:
try:
user = User.objects.get(email=bao_id)
result['qbind_id'] = 1
result['qbind_pwd'] = (1 if authenticate(username=bao_id, password=bao_pwd) or authenticate(email=bao_id, password=bao_pwd) else 0)
if result['qbind_id'] and result['qbind_pwd']:
insert_qq_user(user,openid,qq_nickname, token)
except User.DoesNotExist:
try:
user = User.objects.get(username=bao_id)
result['qbind_id'] = 1
result['qbind_pwd'] = (1 if authenticate(username=bao_id, password=bao_pwd) or authenticate(email=bao_id, password=bao_pwd) else 0)
if result['qbind_id'] and result['qbind_pwd']:
insert_qq_user(user,openid,qq_nickname, token)
except User.DoesNotExist:
result['qbind_id'] = 0 #id不存在
result['qbind_pwd'] = None #json解析为null值,不显示密码提示
else:
result['qbind_id'] = None
if not bao_pwd: result['qbind_pwd'] = None
return HttpResponse(json.dumps(result), 'application/json')
return auth_login_view(request, template_name, extra_context=template_var)
def create_user_from_qq(request,template_name):
template_var = dict()
json_result = dict()
qq_oauth = request.session.get('qq_oauth', None)
token = qq_oauth['access_token']
openid = qq_oauth['openid']
client = OauthClient(token,openid)
client.init_openapi_url(token=token, openid=openid)
qq_user = client.request_openapi(api_path_str='user/get_user_info')
qq_nickname = qq_user['nickname']
template_var['qq_nickname'] = qq_nickname
if request.user.is_authenticated() or qq_oauth is None:
return HttpResponseRedirect(reverse('home.views.index'))
try:
q_user = QQUser.objects.get(qq_nickname=qq_nickname)
if q_user: json_result['wuser_existed'] = '您已经注册了宝时达帐号并绑定到微博,请直接登录'
except QQUser.DoesNotExist:
if request.method == 'POST':
get = lambda fieldId: request.POST.get(fieldId, None)
reg_email = get('reg_email')
reg_password = get('reg_password')
reg_re_password = get('reg_re_password')
reg_company = get('reg_company')
reg_tax_code = get('reg_tax_code')
reg_contact_man = get('reg_contact_man')
reg_mobile = get('reg_mobile')
reg_survey = get('reg_survey')
reg_survey_affix = get('reg_survey_affix')
reg_input_captcha = get('reg_input_captcha')
if reg_email: json_result['reg_email'] = not User.objects.filter(email = reg_email).count()
if reg_password and reg_re_password: json_result['reg_re_password'] = True if reg_re_password == reg_password else False
if reg_company: json_result['reg_company'] = not Company.objects.filter(name = reg_company).count()
if reg_tax_code: json_result['reg_tax_code'] = True
if reg_contact_man: json_result['reg_contact_man'] = True
if reg_mobile: json_result['reg_mobile'] = True
if reg_survey: json_result['reg_survey'] = True
if reg_survey_affix: json_result['reg_survey_affix'] = True
if reg_input_captcha: json_result['reg_input_captcha'] = (is_captcha_match(reg_input_captcha, request.session.get('captcha', None)))
calculate = 0 #初始化计算器
for i in json_result.values():
calculate += i
if calculate >= 8:
if request.META.has_key('HTTP_X_FORWARDED_FOR'):
ip = format_baosteel_ip(request.META['HTTP_X_FORWARDED_FOR'])
else:
ip = request.META['REMOTE_ADDR']
username = 'GM_' + gen_random(4,'mix')
user = User.objects.create_user(username=username, email=reg_email, password=reg_password)
user.is_active = True
user.save()
company = Company(name=reg_company, organization_code='0000000000', code='111111111', contact=user,
corporation_tax=reg_tax_code)
company.save()
user_profile = UserProfile(user=user, company=company, mobile=reg_mobile, register_ip=ip, latest_login_ip=ip)
user_profile.save()
#init company level & level_name ===>ok
update_user_points(request,user_id=user.id, code='init')
reg_survey = RegisterSurvey(user=user, source_code=reg_survey, source_desc=reg_survey_affix)
reg_survey.save()
g = Group.objects.get(name='menu_buyer_default')
g.user_set.add(user)
#绑定微博
QQUser(
user = user,
qq_user_id = openid,
qq_nickname = qq_nickname,
oauth_access_token = token
).save()
#为其授权到登录状态
q_user = QQUser.objects.get(qq_user_id=openid)
user = authenticate(qq_nickname=q_user.user.username)
if user and user.is_active:
auth_login(request,user)
json_result['reg_user_id'] = user.id
return HttpResponse(json.dumps(json_result), 'application/json')
return auth_login_view(request, template_name, extra_context=template_var)
|
mit
|
j4/horizon
|
horizon/views.py
|
67
|
3855
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import shortcuts
from django import template
from django.utils import encoding
from django.views import generic
import horizon
from horizon import exceptions
class PageTitleMixin(object):
"""A mixin that renders out a page title into a view.
Many views in horizon have a page title that would ordinarily be
defined and passed through in get_context_data function, this often
leads to a lot of duplicated work in each view.
This mixin standardises the process of defining a page title, letting
views simply define a variable that is rendered into the context for
them.
There are cases when page title in a view may also display some context
data, for that purpose the page_title variable supports the django
templating language and will be rendered using the context defined by the
views get_context_data.
"""
page_title = ""
def render_context_with_title(self, context):
"""This function takes in a context dict and uses it to render the
page_title variable, it then appends this title to the context using
the 'page_title' key. If there is already a page_title key defined in
context received then this function will do nothing.
"""
if "page_title" not in context:
con = template.Context(context)
# NOTE(sambetts): Use force_text to ensure lazy translations
# are handled correctly.
temp = template.Template(encoding.force_text(self.page_title))
context["page_title"] = temp.render(con)
return context
def render_to_response(self, context):
"""This is an override of the default render_to_response function that
exists in the django generic views, this is here to inject the
page title into the context before the main template is rendered.
"""
context = self.render_context_with_title(context)
return super(PageTitleMixin, self).render_to_response(context)
class HorizonTemplateView(PageTitleMixin, generic.TemplateView):
pass
class HorizonFormView(PageTitleMixin, generic.FormView):
pass
def user_home(request):
"""Reversible named view to direct a user to the appropriate homepage."""
return shortcuts.redirect(horizon.get_user_home(request.user))
class APIView(HorizonTemplateView):
"""A quick class-based view for putting API data into a template.
Subclasses must define one method, ``get_data``, and a template name
via the ``template_name`` attribute on the class.
Errors within the ``get_data`` function are automatically caught by
the :func:`horizon.exceptions.handle` error handler if not otherwise
caught.
"""
def get_data(self, request, context, *args, **kwargs):
"""This method should handle any necessary API calls, update the
context object, and return the context object at the end.
"""
return context
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
try:
context = self.get_data(request, context, *args, **kwargs)
except Exception:
exceptions.handle(request)
return self.render_to_response(context)
|
apache-2.0
|
RoboCupULaval/UI-Debug
|
Model/DataObject/DrawingData/DrawMultipleLinesDataIn.py
|
1
|
2189
|
# Under MIT License, see LICENSE.txt
from Model.DataObject.BaseDataObject import catch_format_error
from Model.DataObject.DrawingData.BaseDataDraw import BaseDataDraw
__author__ = 'RoboCupULaval'
class DrawMultipleLinesDataIn(BaseDataDraw):
def __init__(self, data_in):
super().__init__(data_in)
self._format_data()
@catch_format_error
def _check_obligatory_data(self):
""" Vérifie les données obligatoires """
assert isinstance(self.data, dict),\
"data: {} n'est pas un dictionnaire.".format(type(self.data))
keys = self.data.keys()
assert 'points' in keys, "data['points'] n'existe pas."
assert isinstance(self.data['points'], list), "data['points'] n'est pas une liste."
for point in self.data['points']:
assert self._point_is_valid(point), "data['points']: {} n'est pas un point valide.".format(point)
@catch_format_error
def _check_optional_data(self):
""" Vérifie les données optionnelles """
keys = self.data.keys()
if 'color' in keys:
assert self._colorRGB_is_valid(self.data['color']), \
"data['color']: {} n'est pas une couleur valide.".format(self.data['color'])
else:
self.data['color'] = (0, 0, 0)
if 'width' in keys:
assert 0 < self.data['width'], \
"data['width']: {} n'est pas une épaisseur valide".format(self.data['width'])
else:
self.data['width'] = 2
if 'style' in keys:
assert self.data['style'] in self.line_style_allowed, \
"data['style']: {} n'est pas une style valide".format(self.data['style'])
else:
self.data['style'] = 'SolidLine'
if 'timeout' in keys:
assert self.data['timeout'] >= 0, \
"data['timeout']: {} n'est pas valide.".format(self.data['timeout'])
else:
self.data['timeout'] = 0
@staticmethod
def get_default_data_dict():
return dict(zip(['points'],
[[(x * 100, 50) for x in range(5)]]))
@staticmethod
def get_type():
return 3002
|
mit
|
nex3/pygments
|
pygments/lexers/parsers.py
|
72
|
23197
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.parsers
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for parser generators.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, DelegatingLexer, \
include, bygroups, using
from pygments.token import Punctuation, Other, Text, Comment, Operator, \
Keyword, Name, String, Number, Whitespace
from pygments.lexers.compiled import JavaLexer, CLexer, CppLexer, \
ObjectiveCLexer, DLexer
from pygments.lexers.dotnet import CSharpLexer
from pygments.lexers.agile import RubyLexer, PythonLexer, PerlLexer
from pygments.lexers.web import ActionScriptLexer
__all__ = ['RagelLexer', 'RagelEmbeddedLexer', 'RagelCLexer', 'RagelDLexer',
'RagelCppLexer', 'RagelObjectiveCLexer', 'RagelRubyLexer',
'RagelJavaLexer', 'AntlrLexer', 'AntlrPythonLexer',
'AntlrPerlLexer', 'AntlrRubyLexer', 'AntlrCppLexer',
#'AntlrCLexer',
'AntlrCSharpLexer', 'AntlrObjectiveCLexer',
'AntlrJavaLexer', "AntlrActionScriptLexer"]
class RagelLexer(RegexLexer):
"""
A pure `Ragel <http://www.complang.org/ragel/>`_ lexer. Use this for
fragments of Ragel. For ``.rl`` files, use RagelEmbeddedLexer instead
(or one of the language-specific subclasses).
*New in Pygments 1.1.*
"""
name = 'Ragel'
aliases = ['ragel']
filenames = []
tokens = {
'whitespace': [
(r'\s+', Whitespace)
],
'comments': [
(r'\#.*$', Comment),
],
'keywords': [
(r'(access|action|alphtype)\b', Keyword),
(r'(getkey|write|machine|include)\b', Keyword),
(r'(any|ascii|extend|alpha|digit|alnum|lower|upper)\b', Keyword),
(r'(xdigit|cntrl|graph|print|punct|space|zlen|empty)\b', Keyword)
],
'numbers': [
(r'0x[0-9A-Fa-f]+', Number.Hex),
(r'[+-]?[0-9]+', Number.Integer),
],
'literals': [
(r'"(\\\\|\\"|[^"])*"', String), # double quote string
(r"'(\\\\|\\'|[^'])*'", String), # single quote string
(r'\[(\\\\|\\\]|[^\]])*\]', String), # square bracket literals
(r'/(?!\*)(\\\\|\\/|[^/])*/', String.Regex), # regular expressions
],
'identifiers': [
(r'[a-zA-Z_][a-zA-Z_0-9]*', Name.Variable),
],
'operators': [
(r',', Operator), # Join
(r'\||&|-|--', Operator), # Union, Intersection and Subtraction
(r'\.|<:|:>|:>>', Operator), # Concatention
(r':', Operator), # Label
(r'->', Operator), # Epsilon Transition
(r'(>|\$|%|<|@|<>)(/|eof\b)', Operator), # EOF Actions
(r'(>|\$|%|<|@|<>)(!|err\b)', Operator), # Global Error Actions
(r'(>|\$|%|<|@|<>)(\^|lerr\b)', Operator), # Local Error Actions
(r'(>|\$|%|<|@|<>)(~|to\b)', Operator), # To-State Actions
(r'(>|\$|%|<|@|<>)(\*|from\b)', Operator), # From-State Actions
(r'>|@|\$|%', Operator), # Transition Actions and Priorities
(r'\*|\?|\+|{[0-9]*,[0-9]*}', Operator), # Repetition
(r'!|\^', Operator), # Negation
(r'\(|\)', Operator), # Grouping
],
'root': [
include('literals'),
include('whitespace'),
include('comments'),
include('keywords'),
include('numbers'),
include('identifiers'),
include('operators'),
(r'{', Punctuation, 'host'),
(r'=', Operator),
(r';', Punctuation),
],
'host': [
(r'(' + r'|'.join(( # keep host code in largest possible chunks
r'[^{}\'"/#]+', # exclude unsafe characters
r'[^\\][\\][{}]', # allow escaped { or }
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\"|[^"])*"', # double quote string
r"'(\\\\|\\'|[^'])*'", # single quote string
r'//.*$\n?', # single line comment
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
r'\#.*$\n?', # ruby comment
# regular expression: There's no reason for it to start
# with a * and this stops confusion with comments.
r'/(?!\*)(\\\\|\\/|[^/])*/',
# / is safe now that we've handled regex and javadoc comments
r'/',
)) + r')+', Other),
(r'{', Punctuation, '#push'),
(r'}', Punctuation, '#pop'),
],
}
class RagelEmbeddedLexer(RegexLexer):
"""
A lexer for `Ragel`_ embedded in a host language file.
This will only highlight Ragel statements. If you want host language
highlighting then call the language-specific Ragel lexer.
*New in Pygments 1.1.*
"""
name = 'Embedded Ragel'
aliases = ['ragel-em']
filenames = ['*.rl']
tokens = {
'root': [
(r'(' + r'|'.join(( # keep host code in largest possible chunks
r'[^%\'"/#]+', # exclude unsafe characters
r'%(?=[^%]|$)', # a single % sign is okay, just not 2 of them
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\"|[^"])*"', # double quote string
r"'(\\\\|\\'|[^'])*'", # single quote string
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
r'//.*$\n?', # single line comment
r'\#.*$\n?', # ruby/ragel comment
r'/(?!\*)(\\\\|\\/|[^/])*/', # regular expression
# / is safe now that we've handled regex and javadoc comments
r'/',
)) + r')+', Other),
# Single Line FSM.
# Please don't put a quoted newline in a single line FSM.
# That's just mean. It will break this.
(r'(%%)(?![{%])(.*)($|;)(\n?)', bygroups(Punctuation,
using(RagelLexer),
Punctuation, Text)),
# Multi Line FSM.
(r'(%%%%|%%){', Punctuation, 'multi-line-fsm'),
],
'multi-line-fsm': [
(r'(' + r'|'.join(( # keep ragel code in largest possible chunks.
r'(' + r'|'.join((
r'[^}\'"\[/#]', # exclude unsafe characters
r'}(?=[^%]|$)', # } is okay as long as it's not followed by %
r'}%(?=[^%]|$)', # ...well, one %'s okay, just not two...
r'[^\\][\\][{}]', # ...and } is okay if it's escaped
# allow / if it's preceded with one of these symbols
# (ragel EOF actions)
r'(>|\$|%|<|@|<>)/',
# specifically allow regex followed immediately by *
# so it doesn't get mistaken for a comment
r'/(?!\*)(\\\\|\\/|[^/])*/\*',
# allow / as long as it's not followed by another / or by a *
r'/(?=[^/\*]|$)',
# We want to match as many of these as we can in one block.
# Not sure if we need the + sign here,
# does it help performance?
)) + r')+',
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\"|[^"])*"', # double quote string
r"'(\\\\|\\'|[^'])*'", # single quote string
r"\[(\\\\|\\\]|[^\]])*\]", # square bracket literal
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
r'//.*$\n?', # single line comment
r'\#.*$\n?', # ruby/ragel comment
)) + r')+', using(RagelLexer)),
(r'}%%', Punctuation, '#pop'),
]
}
def analyse_text(text):
return '@LANG: indep' in text or 0.1
class RagelRubyLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a Ruby host file.
*New in Pygments 1.1.*
"""
name = 'Ragel in Ruby Host'
aliases = ['ragel-ruby', 'ragel-rb']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelRubyLexer, self).__init__(RubyLexer, RagelEmbeddedLexer,
**options)
def analyse_text(text):
return '@LANG: ruby' in text
class RagelCLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a C host file.
*New in Pygments 1.1.*
"""
name = 'Ragel in C Host'
aliases = ['ragel-c']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelCLexer, self).__init__(CLexer, RagelEmbeddedLexer,
**options)
def analyse_text(text):
return '@LANG: c' in text
class RagelDLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a D host file.
*New in Pygments 1.1.*
"""
name = 'Ragel in D Host'
aliases = ['ragel-d']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelDLexer, self).__init__(DLexer, RagelEmbeddedLexer, **options)
def analyse_text(text):
return '@LANG: d' in text
class RagelCppLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a CPP host file.
*New in Pygments 1.1.*
"""
name = 'Ragel in CPP Host'
aliases = ['ragel-cpp']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelCppLexer, self).__init__(CppLexer, RagelEmbeddedLexer, **options)
def analyse_text(text):
return '@LANG: c++' in text
class RagelObjectiveCLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in an Objective C host file.
*New in Pygments 1.1.*
"""
name = 'Ragel in Objective C Host'
aliases = ['ragel-objc']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelObjectiveCLexer, self).__init__(ObjectiveCLexer,
RagelEmbeddedLexer,
**options)
def analyse_text(text):
return '@LANG: objc' in text
class RagelJavaLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a Java host file.
*New in Pygments 1.1.*
"""
name = 'Ragel in Java Host'
aliases = ['ragel-java']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelJavaLexer, self).__init__(JavaLexer, RagelEmbeddedLexer,
**options)
def analyse_text(text):
return '@LANG: java' in text
class AntlrLexer(RegexLexer):
"""
Generic `ANTLR`_ Lexer.
Should not be called directly, instead
use DelegatingLexer for your target language.
*New in Pygments 1.1.*
.. _ANTLR: http://www.antlr.org/
"""
name = 'ANTLR'
aliases = ['antlr']
filenames = []
_id = r'[A-Za-z][A-Za-z_0-9]*'
_TOKEN_REF = r'[A-Z][A-Za-z_0-9]*'
_RULE_REF = r'[a-z][A-Za-z_0-9]*'
_STRING_LITERAL = r'\'(?:\\\\|\\\'|[^\']*)\''
_INT = r'[0-9]+'
tokens = {
'whitespace': [
(r'\s+', Whitespace),
],
'comments': [
(r'//.*$', Comment),
(r'/\*(.|\n)*?\*/', Comment),
],
'root': [
include('whitespace'),
include('comments'),
(r'(lexer|parser|tree)?(\s*)(grammar\b)(\s*)(' + _id + ')(;)',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Class,
Punctuation)),
# optionsSpec
(r'options\b', Keyword, 'options'),
# tokensSpec
(r'tokens\b', Keyword, 'tokens'),
# attrScope
(r'(scope)(\s*)(' + _id + ')(\s*)({)',
bygroups(Keyword, Whitespace, Name.Variable, Whitespace,
Punctuation), 'action'),
# exception
(r'(catch|finally)\b', Keyword, 'exception'),
# action
(r'(@' + _id + ')(\s*)(::)?(\s*)(' + _id + ')(\s*)({)',
bygroups(Name.Label, Whitespace, Punctuation, Whitespace,
Name.Label, Whitespace, Punctuation), 'action'),
# rule
(r'((?:protected|private|public|fragment)\b)?(\s*)(' + _id + ')(!)?', \
bygroups(Keyword, Whitespace, Name.Label, Punctuation),
('rule-alts', 'rule-prelims')),
],
'exception': [
(r'\n', Whitespace, '#pop'),
(r'\s', Whitespace),
include('comments'),
(r'\[', Punctuation, 'nested-arg-action'),
(r'\{', Punctuation, 'action'),
],
'rule-prelims': [
include('whitespace'),
include('comments'),
(r'returns\b', Keyword),
(r'\[', Punctuation, 'nested-arg-action'),
(r'\{', Punctuation, 'action'),
# throwsSpec
(r'(throws)(\s+)(' + _id + ')',
bygroups(Keyword, Whitespace, Name.Label)),
(r'(?:(,)(\s*)(' + _id + '))+',
bygroups(Punctuation, Whitespace, Name.Label)), # Additional throws
# optionsSpec
(r'options\b', Keyword, 'options'),
# ruleScopeSpec - scope followed by target language code or name of action
# TODO finish implementing other possibilities for scope
# L173 ANTLRv3.g from ANTLR book
(r'(scope)(\s+)({)', bygroups(Keyword, Whitespace, Punctuation),
'action'),
(r'(scope)(\s+)(' + _id + ')(\s*)(;)',
bygroups(Keyword, Whitespace, Name.Label, Whitespace, Punctuation)),
# ruleAction
(r'(@' + _id + ')(\s*)({)',
bygroups(Name.Label, Whitespace, Punctuation), 'action'),
# finished prelims, go to rule alts!
(r':', Punctuation, '#pop')
],
'rule-alts': [
include('whitespace'),
include('comments'),
# These might need to go in a separate 'block' state triggered by (
(r'options\b', Keyword, 'options'),
(r':', Punctuation),
# literals
(r"'(\\\\|\\'|[^'])*'", String),
(r'"(\\\\|\\"|[^"])*"', String),
(r'<<([^>]|>[^>])>>', String),
# identifiers
# Tokens start with capital letter.
(r'\$?[A-Z_][A-Za-z_0-9]*', Name.Constant),
# Rules start with small letter.
(r'\$?[a-z_][A-Za-z_0-9]*', Name.Variable),
# operators
(r'(\+|\||->|=>|=|\(|\)|\.\.|\.|\?|\*|\^|!|\#|~)', Operator),
(r',', Punctuation),
(r'\[', Punctuation, 'nested-arg-action'),
(r'\{', Punctuation, 'action'),
(r';', Punctuation, '#pop')
],
'tokens': [
include('whitespace'),
include('comments'),
(r'{', Punctuation),
(r'(' + _TOKEN_REF + r')(\s*)(=)?(\s*)(' + _STRING_LITERAL
+ ')?(\s*)(;)',
bygroups(Name.Label, Whitespace, Punctuation, Whitespace,
String, Whitespace, Punctuation)),
(r'}', Punctuation, '#pop'),
],
'options': [
include('whitespace'),
include('comments'),
(r'{', Punctuation),
(r'(' + _id + r')(\s*)(=)(\s*)(' +
'|'.join((_id, _STRING_LITERAL, _INT, '\*'))+ ')(\s*)(;)',
bygroups(Name.Variable, Whitespace, Punctuation, Whitespace,
Text, Whitespace, Punctuation)),
(r'}', Punctuation, '#pop'),
],
'action': [
(r'(' + r'|'.join(( # keep host code in largest possible chunks
r'[^\${}\'"/\\]+', # exclude unsafe characters
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\"|[^"])*"', # double quote string
r"'(\\\\|\\'|[^'])*'", # single quote string
r'//.*$\n?', # single line comment
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
# regular expression: There's no reason for it to start
# with a * and this stops confusion with comments.
r'/(?!\*)(\\\\|\\/|[^/])*/',
# backslashes are okay, as long as we are not backslashing a %
r'\\(?!%)',
# Now that we've handled regex and javadoc comments
# it's safe to let / through.
r'/',
)) + r')+', Other),
(r'(\\)(%)', bygroups(Punctuation, Other)),
(r'(\$[a-zA-Z]+)(\.?)(text|value)?',
bygroups(Name.Variable, Punctuation, Name.Property)),
(r'{', Punctuation, '#push'),
(r'}', Punctuation, '#pop'),
],
'nested-arg-action': [
(r'(' + r'|'.join(( # keep host code in largest possible chunks.
r'[^\$\[\]\'"/]+', # exclude unsafe characters
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\"|[^"])*"', # double quote string
r"'(\\\\|\\'|[^'])*'", # single quote string
r'//.*$\n?', # single line comment
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
# regular expression: There's no reason for it to start
# with a * and this stops confusion with comments.
r'/(?!\*)(\\\\|\\/|[^/])*/',
# Now that we've handled regex and javadoc comments
# it's safe to let / through.
r'/',
)) + r')+', Other),
(r'\[', Punctuation, '#push'),
(r'\]', Punctuation, '#pop'),
(r'(\$[a-zA-Z]+)(\.?)(text|value)?',
bygroups(Name.Variable, Punctuation, Name.Property)),
(r'(\\\\|\\\]|\\\[|[^\[\]])+', Other),
]
}
def analyse_text(text):
return re.search(r'^\s*grammar\s+[a-zA-Z0-9]+\s*;', text, re.M)
# http://www.antlr.org/wiki/display/ANTLR3/Code+Generation+Targets
# TH: I'm not aware of any language features of C++ that will cause
# incorrect lexing of C files. Antlr doesn't appear to make a distinction,
# so just assume they're C++. No idea how to make Objective C work in the
# future.
#class AntlrCLexer(DelegatingLexer):
# """
# ANTLR with C Target
#
# *New in Pygments 1.1*
# """
#
# name = 'ANTLR With C Target'
# aliases = ['antlr-c']
# filenames = ['*.G', '*.g']
#
# def __init__(self, **options):
# super(AntlrCLexer, self).__init__(CLexer, AntlrLexer, **options)
#
# def analyse_text(text):
# return re.match(r'^\s*language\s*=\s*C\s*;', text)
class AntlrCppLexer(DelegatingLexer):
"""
`ANTLR`_ with CPP Target
*New in Pygments 1.1.*
"""
name = 'ANTLR With CPP Target'
aliases = ['antlr-cpp']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrCppLexer, self).__init__(CppLexer, AntlrLexer, **options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*C\s*;', text, re.M)
class AntlrObjectiveCLexer(DelegatingLexer):
"""
`ANTLR`_ with Objective-C Target
*New in Pygments 1.1.*
"""
name = 'ANTLR With ObjectiveC Target'
aliases = ['antlr-objc']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrObjectiveCLexer, self).__init__(ObjectiveCLexer,
AntlrLexer, **options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*ObjC\s*;', text)
class AntlrCSharpLexer(DelegatingLexer):
"""
`ANTLR`_ with C# Target
*New in Pygments 1.1.*
"""
name = 'ANTLR With C# Target'
aliases = ['antlr-csharp', 'antlr-c#']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrCSharpLexer, self).__init__(CSharpLexer, AntlrLexer,
**options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*CSharp2\s*;', text, re.M)
class AntlrPythonLexer(DelegatingLexer):
"""
`ANTLR`_ with Python Target
*New in Pygments 1.1.*
"""
name = 'ANTLR With Python Target'
aliases = ['antlr-python']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrPythonLexer, self).__init__(PythonLexer, AntlrLexer,
**options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*Python\s*;', text, re.M)
class AntlrJavaLexer(DelegatingLexer):
"""
`ANTLR`_ with Java Target
*New in Pygments 1.1*
"""
name = 'ANTLR With Java Target'
aliases = ['antlr-java']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrJavaLexer, self).__init__(JavaLexer, AntlrLexer,
**options)
def analyse_text(text):
# Antlr language is Java by default
return AntlrLexer.analyse_text(text) and 0.9
class AntlrRubyLexer(DelegatingLexer):
"""
`ANTLR`_ with Ruby Target
*New in Pygments 1.1.*
"""
name = 'ANTLR With Ruby Target'
aliases = ['antlr-ruby', 'antlr-rb']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrRubyLexer, self).__init__(RubyLexer, AntlrLexer,
**options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*Ruby\s*;', text, re.M)
class AntlrPerlLexer(DelegatingLexer):
"""
`ANTLR`_ with Perl Target
*New in Pygments 1.1.*
"""
name = 'ANTLR With Perl Target'
aliases = ['antlr-perl']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrPerlLexer, self).__init__(PerlLexer, AntlrLexer,
**options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*Perl5\s*;', text, re.M)
class AntlrActionScriptLexer(DelegatingLexer):
"""
`ANTLR`_ with ActionScript Target
*New in Pygments 1.1.*
"""
name = 'ANTLR With ActionScript Target'
aliases = ['antlr-as', 'antlr-actionscript']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrActionScriptLexer, self).__init__(ActionScriptLexer,
AntlrLexer, **options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*ActionScript\s*;', text, re.M)
|
bsd-2-clause
|
florianholzapfel/home-assistant
|
homeassistant/components/media_player/yamaha.py
|
6
|
9875
|
"""
Support for Yamaha Receivers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.yamaha/
"""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,
SUPPORT_SELECT_SOURCE, SUPPORT_PLAY_MEDIA, SUPPORT_PAUSE, SUPPORT_STOP,
SUPPORT_NEXT_TRACK, SUPPORT_PREVIOUS_TRACK,
MEDIA_TYPE_MUSIC,
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.const import (CONF_NAME, CONF_HOST, STATE_OFF, STATE_ON,
STATE_PLAYING, STATE_IDLE)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['rxv==0.4.0']
_LOGGER = logging.getLogger(__name__)
SUPPORT_YAMAHA = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE
CONF_SOURCE_NAMES = 'source_names'
CONF_SOURCE_IGNORE = 'source_ignore'
CONF_ZONE_IGNORE = 'zone_ignore'
DEFAULT_NAME = 'Yamaha Receiver'
KNOWN = 'yamaha_known_receivers'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_SOURCE_IGNORE, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_ZONE_IGNORE, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_SOURCE_NAMES, default={}): {cv.string: cv.string},
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Yamaha platform."""
import rxv
# keep track of configured receivers so that we don't end up
# discovering a receiver dynamically that we have static config
# for.
if hass.data.get(KNOWN, None) is None:
hass.data[KNOWN] = set()
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
source_ignore = config.get(CONF_SOURCE_IGNORE)
source_names = config.get(CONF_SOURCE_NAMES)
zone_ignore = config.get(CONF_ZONE_IGNORE)
if discovery_info is not None:
name = discovery_info[0]
model = discovery_info[1]
ctrl_url = discovery_info[2]
desc_url = discovery_info[3]
if ctrl_url in hass.data[KNOWN]:
_LOGGER.info("%s already manually configured", ctrl_url)
return
receivers = rxv.RXV(
ctrl_url,
model_name=model,
friendly_name=name,
unit_desc_url=desc_url).zone_controllers()
_LOGGER.info("Receivers: %s", receivers)
# when we are dynamically discovered config is empty
zone_ignore = []
elif host is None:
receivers = []
for recv in rxv.find():
receivers.extend(recv.zone_controllers())
else:
ctrl_url = "http://{}:80/YamahaRemoteControl/ctrl".format(host)
receivers = rxv.RXV(ctrl_url, name).zone_controllers()
for receiver in receivers:
if receiver.zone not in zone_ignore:
hass.data[KNOWN].add(receiver.ctrl_url)
add_devices([
YamahaDevice(name, receiver, source_ignore, source_names)])
class YamahaDevice(MediaPlayerDevice):
"""Representation of a Yamaha device."""
def __init__(self, name, receiver, source_ignore, source_names):
"""Initialize the Yamaha Receiver."""
self._receiver = receiver
self._muted = False
self._volume = 0
self._pwstate = STATE_OFF
self._current_source = None
self._source_list = None
self._source_ignore = source_ignore or []
self._source_names = source_names or {}
self._reverse_mapping = None
self._is_playback_supported = False
self._play_status = None
self.update()
self._name = name
self._zone = receiver.zone
def update(self):
"""Get the latest details from the device."""
self._play_status = self._receiver.play_status()
if self._receiver.on:
if self._play_status is None:
self._pwstate = STATE_ON
elif self._play_status.playing:
self._pwstate = STATE_PLAYING
else:
self._pwstate = STATE_IDLE
else:
self._pwstate = STATE_OFF
self._muted = self._receiver.mute
self._volume = (self._receiver.volume / 100) + 1
if self.source_list is None:
self.build_source_list()
current_source = self._receiver.input
self._current_source = self._source_names.get(
current_source, current_source)
self._is_playback_supported = self._receiver.is_playback_supported(
self._current_source)
def build_source_list(self):
"""Build the source list."""
self._reverse_mapping = {alias: source for source, alias in
self._source_names.items()}
self._source_list = sorted(
self._source_names.get(source, source) for source in
self._receiver.inputs()
if source not in self._source_ignore)
@property
def name(self):
"""Return the name of the device."""
name = self._name
if self._zone != "Main_Zone":
# Zone will be one of Main_Zone, Zone_2, Zone_3
name += " " + self._zone.replace('_', ' ')
return name
@property
def state(self):
"""Return the state of the device."""
return self._pwstate
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def source(self):
"""Return the current input source."""
return self._current_source
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
@property
def supported_media_commands(self):
"""Flag of media commands that are supported."""
supported_commands = SUPPORT_YAMAHA
supports = self._receiver.get_playback_support()
mapping = {'play': SUPPORT_PLAY_MEDIA,
'pause': SUPPORT_PAUSE,
'stop': SUPPORT_STOP,
'skip_f': SUPPORT_NEXT_TRACK,
'skip_r': SUPPORT_PREVIOUS_TRACK}
for attr, feature in mapping.items():
if getattr(supports, attr, False):
supported_commands |= feature
return supported_commands
def turn_off(self):
"""Turn off media player."""
self._receiver.on = False
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
receiver_vol = 100 - (volume * 100)
negative_receiver_vol = -receiver_vol
self._receiver.volume = negative_receiver_vol
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
self._receiver.mute = mute
def turn_on(self):
"""Turn the media player on."""
self._receiver.on = True
self._volume = (self._receiver.volume / 100) + 1
def media_play(self):
"""Send play commmand."""
self._call_playback_function(self._receiver.play, "play")
def media_pause(self):
"""Send pause command."""
self._call_playback_function(self._receiver.pause, "pause")
def media_stop(self):
"""Send stop command."""
self._call_playback_function(self._receiver.stop, "stop")
def media_previous_track(self):
"""Send previous track command."""
self._call_playback_function(self._receiver.previous, "previous track")
def media_next_track(self):
"""Send next track command."""
self._call_playback_function(self._receiver.next, "next track")
def _call_playback_function(self, function, function_text):
import rxv
try:
function()
except rxv.exceptions.ResponseException:
_LOGGER.warning(
'Failed to execute %s on %s', function_text, self._name)
def select_source(self, source):
"""Select input source."""
self._receiver.input = self._reverse_mapping.get(source, source)
def play_media(self, media_type, media_id, **kwargs):
"""Play media from an ID.
This exposes a pass through for various input sources in the
Yamaha to direct play certain kinds of media. media_type is
treated as the input type that we are setting, and media id is
specific to it.
"""
if media_type == "NET RADIO":
self._receiver.net_radio(media_id)
@property
def media_artist(self):
"""Artist of current playing media."""
if self._play_status is not None:
return self._play_status.artist
@property
def media_album_name(self):
"""Album of current playing media."""
if self._play_status is not None:
return self._play_status.album
@property
def media_content_type(self):
"""Content type of current playing media."""
# Loose assumption that if playback is supported, we are playing music
if self._is_playback_supported:
return MEDIA_TYPE_MUSIC
return None
@property
def media_title(self):
"""Artist of current playing media."""
if self._play_status is not None:
song = self._play_status.song
station = self._play_status.station
# If both song and station is available, print both, otherwise
# just the one we have.
if song and station:
return '{}: {}'.format(station, song)
else:
return song or station
|
mit
|
edwardsamuel/py-mysql2pgsql
|
mysql2pgsql/lib/postgres_db_writer.py
|
1
|
7769
|
from __future__ import with_statement, absolute_import
import time
from contextlib import closing
import psycopg2
from . import print_row_progress, status_logger
from .postgres_writer import PostgresWriter
class PostgresDbWriter(PostgresWriter):
"""Class used to stream DDL and/or data
from a MySQL server to a PostgreSQL.
:Parameters:
- `db_options`: :py:obj:`dict` containing connection specific variables
- `verbose`: whether or not to log progress to :py:obj:`stdout`
"""
class FileObjFaker(object):
"""A file-like class to support streaming
table data directly to :py:meth:`pscopg2.copy_from`.
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
- `data`:
- `processor`:
- `verbose`: whether or not to log progress to :py:obj:`stdout`
"""
def __init__(self, table, data, processor, verbose=False):
self.data = iter(data)
self.table = table
self.processor = processor
self.verbose = verbose
if verbose:
self.idx = 1
self.start_time = time.time()
self.prev_val_len = 0
self.prev_idx = 0
def readline(self, *args, **kwargs):
try:
row = list(self.data.next())
except StopIteration:
if self.verbose:
print('')
return ''
else:
self.processor(self.table, row)
try:
return '%s\n' % ('\t'.join(row))
except UnicodeDecodeError:
return '%s\n' % ('\t'.join(r.decode('utf8') for r in row))
finally:
if self.verbose:
if (self.idx % 20000) == 0:
now = time.time()
elapsed = now - self.start_time
val = '%.2f rows/sec [%s] ' % ((self.idx - self.prev_idx) / elapsed, self.idx)
print_row_progress('%s%s' % (("\b" * self.prev_val_len), val)),
self.prev_val_len = len(val) + 3
self.start_time = now
self.prev_idx = self.idx + 0
self.idx += 1
def read(self, *args, **kwargs):
return self.readline(*args, **kwargs)
def __init__(self, db_options, verbose=False, *args, **kwargs):
super(PostgresDbWriter, self).__init__(*args, **kwargs)
self.verbose = verbose
self.db_options = {
'host': str(db_options['hostname']),
'port': db_options.get('port', 5432),
'database': str(db_options['database']),
'password': str(db_options.get('password', None)) or '',
'user': str(db_options['username']),
}
if ':' in str(db_options['database']):
self.db_options['database'], self.schema = self.db_options['database'].split(':')
else:
self.schema = None
self.open()
def open(self):
self.conn = psycopg2.connect(**self.db_options)
with closing(self.conn.cursor()) as cur:
if self.schema:
cur.execute('SET search_path TO %s' % self.schema)
cur.execute('SET client_encoding = \'UTF8\'')
if self.conn.server_version >= 80200:
cur.execute('SET standard_conforming_strings = off')
cur.execute('SET check_function_bodies = false')
cur.execute('SET client_min_messages = warning')
def query(self, sql, args=(), one=False):
with closing(self.conn.cursor()) as cur:
cur.execute(sql, args)
return cur.fetchone() if one else cur
def execute(self, sql, args=(), many=False):
with closing(self.conn.cursor()) as cur:
if many:
cur.executemany(sql, args)
else:
cur.execute(sql, args)
self.conn.commit()
def copy_from(self, file_obj, table_name, columns):
with closing(self.conn.cursor()) as cur:
cur.copy_from(file_obj,
table=table_name,
columns=columns
)
self.conn.commit()
def close(self):
"""Closes connection to the PostgreSQL server"""
self.conn.close()
def exists(self, relname):
rc = self.query('SELECT COUNT(!) FROM pg_class WHERE relname = %s', (relname, ), one=True)
return rc and int(rc[0]) == 1
@status_logger
def truncate(self, table):
"""Send DDL to truncate the specified `table`
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
truncate_sql, serial_key_sql = super(PostgresDbWriter, self).truncate(table)
self.execute(truncate_sql)
if serial_key_sql:
self.execute(serial_key_sql)
@status_logger
def write_table(self, table):
"""Send DDL to create the specified `table`
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
table_sql, serial_key_sql = super(PostgresDbWriter, self).write_table(table)
for sql in serial_key_sql + table_sql:
if sql != "":
self.execute(sql)
@status_logger
def write_indexes(self, table):
"""Send DDL to create the specified `table` indexes
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
index_sql = super(PostgresDbWriter, self).write_indexes(table)
for sql in index_sql:
self.execute(sql)
@status_logger
def write_triggers(self, table):
"""Send DDL to create the specified `table` triggers
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
index_sql = super(PostgresDbWriter, self).write_triggers(table)
for sql in index_sql:
self.execute(sql)
@status_logger
def write_constraints(self, table):
"""Send DDL to create the specified `table` constraints
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
constraint_sql = super(PostgresDbWriter, self).write_constraints(table)
for sql in constraint_sql:
self.execute(sql)
@status_logger
def write_contents(self, table, reader):
"""Write the contents of `table`
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
- `reader`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader` object that allows reading from the data source.
Returns None
"""
f = self.FileObjFaker(table, reader.read(table), self.process_row, self.verbose)
self.copy_from(f, '"%s"' % super(PostgresDbWriter, self).convert_case(table.name), ['"%s"' % super(PostgresDbWriter, self).convert_case(c['name']) for c in table.columns])
|
mit
|
nomnombtc/bitcoin
|
qa/rpc-tests/pruning.py
|
4
|
15031
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test pruning code
# ********
# WARNING:
# This test uses 4GB of disk space.
# This test takes 30 mins or more (up to 2 hours)
# ********
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
import os
def calc_usage(blockdir):
return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.)
class PruneTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache_0 = []
self.utxo_cache_1 = []
def setup_network(self):
self.nodes = []
self.is_network_split = False
# Create nodes 0 and 1 to mine
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
# Create node 2 to test pruning
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-prune=550"], timewait=900))
self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/"
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[2], 0)
sync_blocks(self.nodes[0:3])
def create_big_chain(self):
# Start by creating some coinbases we can spend later
self.nodes[1].generate(200)
sync_blocks(self.nodes[0:2])
self.nodes[0].generate(150)
# Then mine enough full blocks to create more than 550MiB of data
for i in range(645):
mine_large_block(self.nodes[0], self.utxo_cache_0)
sync_blocks(self.nodes[0:3])
def test_height_min(self):
if not os.path.isfile(self.prunedir+"blk00000.dat"):
raise AssertionError("blk00000.dat is missing, pruning too early")
print("Success")
print("Though we're already using more than 550MiB, current usage:", calc_usage(self.prunedir))
print("Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
for i in range(25):
mine_large_block(self.nodes[0], self.utxo_cache_0)
waitstart = time.time()
while os.path.isfile(self.prunedir+"blk00000.dat"):
time.sleep(0.1)
if time.time() - waitstart > 30:
raise AssertionError("blk00000.dat not pruned when it should be")
print("Success")
usage = calc_usage(self.prunedir)
print("Usage should be below target:", usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
def create_chain_with_staleblocks(self):
# Create stale blocks in manageable sized chunks
print("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
for j in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
# Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)
# Mine 24 blocks in node 1
for i in range(24):
if j == 0:
mine_large_block(self.nodes[1], self.utxo_cache_1)
else:
self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
# Reorg back with 25 block chain from node 0
for i in range(25):
mine_large_block(self.nodes[0], self.utxo_cache_0)
# Create connections in the order so both nodes can see the reorg at the same time
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
sync_blocks(self.nodes[0:3])
print("Usage can be over target because of high stale rate:", calc_usage(self.prunedir))
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
# Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
# Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
stop_node(self.nodes[1],1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
height = self.nodes[1].getblockcount()
print("Current block height:", height)
invalidheight = height-287
badhash = self.nodes[1].getblockhash(invalidheight)
print("Invalidating block at height:",invalidheight,badhash)
self.nodes[1].invalidateblock(badhash)
# We've now switched to our previously mined-24 block fork on node 1, but thats not what we want
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
while curhash != mainchainhash:
self.nodes[1].invalidateblock(curhash)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
assert(self.nodes[1].getblockcount() == invalidheight - 1)
print("New best height", self.nodes[1].getblockcount())
# Reboot node1 to clear those giant tx's from mempool
stop_node(self.nodes[1],1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
print("Generating new longer chain of 300 more blocks")
self.nodes[1].generate(300)
print("Reconnect nodes")
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[2], 1)
sync_blocks(self.nodes[0:3], timeout=120)
print("Verify height on node 2:",self.nodes[2].getblockcount())
print("Usage possibly still high bc of stale blocks in block files:", calc_usage(self.prunedir))
print("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)")
for i in range(22):
# This can be slow, so do this in multiple RPC calls to avoid
# RPC timeouts.
self.nodes[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects
sync_blocks(self.nodes[0:3], timeout=300)
usage = calc_usage(self.prunedir)
print("Usage should be below target:", usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
return invalidheight,badhash
def reorg_back(self):
# Verify that a block on the old main chain fork has been pruned away
try:
self.nodes[2].getblock(self.forkhash)
raise AssertionError("Old block wasn't pruned so can't test redownload")
except JSONRPCException as e:
print("Will need to redownload block",self.forkheight)
# Verify that we have enough history to reorg back to the fork point
# Although this is more than 288 blocks, because this chain was written more recently
# and only its other 299 small and 220 large block are in the block files after it,
# its expected to still be retained
self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
first_reorg_height = self.nodes[2].getblockcount()
curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
self.nodes[2].invalidateblock(curchainhash)
goalbestheight = self.mainchainheight
goalbesthash = self.mainchainhash2
# As of 0.10 the current block download logic is not able to reorg to the original chain created in
# create_chain_with_stale_blocks because it doesn't know of any peer thats on that chain from which to
# redownload its missing blocks.
# Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
# because it has all the block data.
# However it must mine enough blocks to have a more work chain than the reorg_test chain in order
# to trigger node 2's block download logic.
# At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
if self.nodes[2].getblockcount() < self.mainchainheight:
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
print("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed:", blocks_to_mine)
self.nodes[0].invalidateblock(curchainhash)
assert(self.nodes[0].getblockcount() == self.mainchainheight)
assert(self.nodes[0].getbestblockhash() == self.mainchainhash2)
goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1]
goalbestheight = first_reorg_height + 1
print("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
waitstart = time.time()
while self.nodes[2].getblockcount() < goalbestheight:
time.sleep(0.1)
if time.time() - waitstart > 900:
raise AssertionError("Node 2 didn't reorg to proper height")
assert(self.nodes[2].getbestblockhash() == goalbesthash)
# Verify we can now have the data for a block previously pruned
assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight)
def run_test(self):
print("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)")
print("Mining a big blockchain of 995 blocks")
self.create_big_chain()
# Chain diagram key:
# * blocks on main chain
# +,&,$,@ blocks on other forks
# X invalidated block
# N1 Node 1
#
# Start by mining a simple chain that all nodes have
# N0=N1=N2 **...*(995)
print("Check that we haven't started pruning yet because we're below PruneAfterHeight")
self.test_height_min()
# Extend this chain past the PruneAfterHeight
# N0=N1=N2 **...*(1020)
print("Check that we'll exceed disk space target if we have a very high stale block rate")
self.create_chain_with_staleblocks()
# Disconnect N0
# And mine a 24 block chain on N1 and a separate 25 block chain on N0
# N1=N2 **...*+...+(1044)
# N0 **...**...**(1045)
#
# reconnect nodes causing reorg on N1 and N2
# N1=N2 **...*(1020) *...**(1045)
# \
# +...+(1044)
#
# repeat this process until you have 12 stale forks hanging off the
# main chain on N1 and N2
# N0 *************************...***************************(1320)
#
# N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320)
# \ \ \
# +...+(1044) &.. $...$(1319)
# Save some current chain state for later use
self.mainchainheight = self.nodes[2].getblockcount() #1320
self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
print("Check that we can survive a 288 block reorg still")
(self.forkheight,self.forkhash) = self.reorg_test() #(1033, )
# Now create a 288 block reorg by mining a longer chain on N1
# First disconnect N1
# Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
# N1 **...*(1020) **...**(1032)X..
# \
# ++...+(1031)X..
#
# Now mine 300 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@(1332)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# Reconnect nodes and mine 220 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# N2 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ *...**(1320)
# \ \
# ++...++(1044) ..
#
# N0 ********************(1032) @@...@@@(1552)
# \
# *...**(1320)
print("Test that we can rerequest a block we previously pruned if needed for a reorg")
self.reorg_back()
# Verify that N2 still has block 1033 on current chain (@), but not on main chain (*)
# Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to
# original main chain (*), but will require redownload of some blocks
# In order to have a peer we think we can download from, must also perform this invalidation
# on N0 and mine a new longest chain to trigger.
# Final result:
# N0 ********************(1032) **...****(1553)
# \
# X@...@@@(1552)
#
# N2 **...*(1020) **...**(1032) **...****(1553)
# \ \
# \ X@...@@@(1552)
# \
# +..
#
# N1 doesn't change because 1033 on main chain (*) is invalid
print("Done")
if __name__ == '__main__':
PruneTest().main()
|
mit
|
walkon302/CDIPS_Recommender
|
lib/vgg16.py
|
4
|
7225
|
# -*- coding: utf-8 -*-
'''VGG16 model for Keras.
# Reference:
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)
'''
from __future__ import print_function
import numpy as np
import warnings
from keras.models import Model
from keras.layers import Flatten, Dense, Input
from keras.layers import Convolution2D, MaxPooling2D
from keras.preprocessing import image
from keras.utils.layer_utils import convert_all_kernels_in_model
from keras.utils.data_utils import get_file
from keras import backend as K
from imagenet_utils import decode_predictions, preprocess_input
TH_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels.h5'
TF_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
TH_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels_notop.h5'
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
def VGG16(include_top=True, weights='imagenet',
input_tensor=None):
'''Instantiate the VGG16 architecture,
optionally loading weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_dim_ordering="tf"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The dimension ordering
convention used by the model is the one
specified in your Keras config file.
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
# Returns
A Keras model instance.
'''
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
# Determine proper input shape
if K.image_dim_ordering() == 'th':
if include_top:
input_shape = (3, 224, 224)
else:
input_shape = (3, None, None)
else:
if include_top:
input_shape = (224, 224, 3)
else:
input_shape = (None, None, 3)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor)
else:
img_input = input_tensor
# Block 1
x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv1')(img_input)
x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='block2_conv1')(x)
x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv1')(x)
x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv2')(x)
x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv1')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv2')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv1')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv2')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
# Classification block
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(1000, activation='softmax', name='predictions')(x)
# Create model
model = Model(img_input, x)
# load weights
if weights == 'imagenet':
print('K.image_dim_ordering:', K.image_dim_ordering())
if K.image_dim_ordering() == 'th':
if include_top:
weights_path = get_file('vgg16_weights_th_dim_ordering_th_kernels.h5',
TH_WEIGHTS_PATH,
cache_subdir='models')
else:
weights_path = get_file('vgg16_weights_th_dim_ordering_th_kernels_notop.h5',
TH_WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
model.load_weights(weights_path)
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image dimension ordering convention '
'(`image_dim_ordering="th"`). '
'For best performance, set '
'`image_dim_ordering="tf"` in '
'your Keras config '
'at ~/.keras/keras.json.')
convert_all_kernels_in_model(model)
else:
if include_top:
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels.h5',
TF_WEIGHTS_PATH,
cache_subdir='models')
else:
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
model.load_weights(weights_path)
if K.backend() == 'theano':
convert_all_kernels_in_model(model)
return model
if __name__ == '__main__':
model = VGG16(include_top=True, weights='imagenet')
img_path = 'elephant.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print('Input image shape:', x.shape)
preds = model.predict(x)
print('Predicted:', decode_predictions(preds))
|
apache-2.0
|
vismartltd/edx-platform
|
common/lib/xmodule/xmodule/modulestore/tests/django_utils.py
|
4
|
16715
|
# encoding: utf-8
"""
Modulestore configuration for test cases.
"""
import datetime
import pytz
from tempfile import mkdtemp
from uuid import uuid4
from mock import patch
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
from request_cache.middleware import RequestCache
from courseware.field_overrides import OverrideFieldData # pylint: disable=import-error
from xmodule.contentstore.django import _CONTENTSTORE
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore, clear_existing_modulestores
from xmodule.modulestore.tests.mongo_connection import MONGO_PORT_NUM, MONGO_HOST
from xmodule.modulestore.tests.sample_courses import default_block_info_tree, TOY_BLOCK_INFO_TREE
from xmodule.modulestore.tests.factories import XMODULE_FACTORY_LOCK
from xmodule.tabs import CoursewareTab, CourseInfoTab, StaticTab, DiscussionTab, ProgressTab, WikiTab
class StoreConstructors(object):
"""Enumeration of store constructor types."""
draft, split, xml = range(3)
def mixed_store_config(data_dir, mappings, include_xml=False, xml_source_dirs=None, store_order=None):
"""
Return a `MixedModuleStore` configuration, which provides
access to both Mongo- and XML-backed courses.
Args:
data_dir (string): the directory from which to load XML-backed courses.
mappings (string): a dictionary mapping course IDs to modulestores, for example:
{
'MITx/2.01x/2013_Spring': 'xml',
'edx/999/2013_Spring': 'default'
}
where 'xml' and 'default' are the two options provided by this configuration,
mapping (respectively) to XML-backed and Mongo-backed modulestores..
Keyword Args:
include_xml (boolean): If True, include an XML modulestore in the configuration.
xml_source_dirs (list): The directories containing XML courses to load from disk.
note: For the courses to be loaded into the XML modulestore and accessible do the following:
* include_xml should be True
* xml_source_dirs should be the list of directories (relative to data_dir)
containing the courses you want to load
* mappings should be configured, pointing the xml courses to the xml modulestore
"""
if store_order is None:
store_order = [StoreConstructors.draft, StoreConstructors.split]
if include_xml and StoreConstructors.xml not in store_order:
store_order.append(StoreConstructors.xml)
store_constructors = {
StoreConstructors.split: split_mongo_store_config(data_dir)['default'],
StoreConstructors.draft: draft_mongo_store_config(data_dir)['default'],
StoreConstructors.xml: xml_store_config(data_dir, source_dirs=xml_source_dirs)['default'],
}
store = {
'default': {
'ENGINE': 'xmodule.modulestore.mixed.MixedModuleStore',
'OPTIONS': {
'mappings': mappings,
'stores': [store_constructors[store] for store in store_order],
}
}
}
return store
def draft_mongo_store_config(data_dir):
"""
Defines default module store using DraftMongoModuleStore.
"""
modulestore_options = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': data_dir,
'render_template': 'edxmako.shortcuts.render_to_string'
}
store = {
'default': {
'NAME': 'draft',
'ENGINE': 'xmodule.modulestore.mongo.draft.DraftModuleStore',
'DOC_STORE_CONFIG': {
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
'db': 'test_xmodule',
'collection': 'modulestore_{0}'.format(uuid4().hex[:5]),
},
'OPTIONS': modulestore_options
}
}
return store
def split_mongo_store_config(data_dir):
"""
Defines split module store.
"""
modulestore_options = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': data_dir,
'render_template': 'edxmako.shortcuts.render_to_string',
}
store = {
'default': {
'NAME': 'draft',
'ENGINE': 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore',
'DOC_STORE_CONFIG': {
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
'db': 'test_xmodule',
'collection': 'modulestore_{0}'.format(uuid4().hex[:5]),
},
'OPTIONS': modulestore_options
}
}
return store
def xml_store_config(data_dir, source_dirs=None):
"""
Defines default module store using XMLModuleStore.
Note: you should pass in a list of source_dirs that you care about,
otherwise all courses in the data_dir will be processed.
"""
store = {
'default': {
'NAME': 'xml',
'ENGINE': 'xmodule.modulestore.xml.XMLModuleStore',
'OPTIONS': {
'data_dir': data_dir,
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'source_dirs': source_dirs,
}
}
}
return store
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
# This is an XML only modulestore with only the toy course loaded
TEST_DATA_XML_MODULESTORE = xml_store_config(TEST_DATA_DIR, source_dirs=['toy'])
# This modulestore will provide both a mixed mongo editable modulestore, and
# an XML store with just the toy course loaded.
TEST_DATA_MIXED_TOY_MODULESTORE = mixed_store_config(
TEST_DATA_DIR, {'edX/toy/2012_Fall': 'xml', }, include_xml=True, xml_source_dirs=['toy']
)
# This modulestore will provide both a mixed mongo editable modulestore, and
# an XML store with common/test/data/2014 loaded, which is a course that is closed.
TEST_DATA_MIXED_CLOSED_MODULESTORE = mixed_store_config(
TEST_DATA_DIR, {'edX/detached_pages/2014': 'xml', }, include_xml=True, xml_source_dirs=['2014']
)
# This modulestore will provide both a mixed mongo editable modulestore, and
# an XML store with common/test/data/graded loaded, which is a course that is graded.
TEST_DATA_MIXED_GRADED_MODULESTORE = mixed_store_config(
TEST_DATA_DIR, {'edX/graded/2012_Fall': 'xml', }, include_xml=True, xml_source_dirs=['graded']
)
# All store requests now go through mixed
# Use this modulestore if you specifically want to test mongo and not a mocked modulestore.
# This modulestore definition below will not load any xml courses.
TEST_DATA_MONGO_MODULESTORE = mixed_store_config(mkdtemp(), {}, include_xml=False)
# All store requests now go through mixed
# Use this modulestore if you specifically want to test split-mongo and not a mocked modulestore.
# This modulestore definition below will not load any xml courses.
TEST_DATA_SPLIT_MODULESTORE = mixed_store_config(
mkdtemp(),
{},
include_xml=False,
store_order=[StoreConstructors.split, StoreConstructors.draft]
)
class ModuleStoreTestCase(TestCase):
"""
Subclass for any test case that uses a ModuleStore.
Ensures that the ModuleStore is cleaned before/after each test.
Usage:
1. Create a subclass of `ModuleStoreTestCase`
2. (optional) If you need a specific variety of modulestore, or particular ModuleStore
options, set the MODULESTORE class attribute of your test class to the
appropriate modulestore config.
For example:
class FooTest(ModuleStoreTestCase):
MODULESTORE = mixed_store_config(data_dir, mappings)
# ...
3. Use factories (e.g. `CourseFactory`, `ItemFactory`) to populate
the modulestore with test data.
NOTE:
* For Mongo-backed courses (created with `CourseFactory`),
the state of the course will be reset before/after each
test method executes.
* For XML-backed courses, the course state will NOT
reset between test methods (although it will reset
between test classes)
The reason is: XML courses are not editable, so to reset
a course you have to reload it from disk, which is slow.
If you do need to reset an XML course, use
`clear_existing_modulestores()` directly in
your `setUp()` method.
"""
MODULESTORE = mixed_store_config(mkdtemp(), {}, include_xml=False)
def setUp(self, **kwargs):
"""
Creates a test User if `create_user` is True.
Returns the password for the test User.
Args:
create_user - specifies whether or not to create a test User. Default is True.
"""
settings_override = override_settings(MODULESTORE=self.MODULESTORE)
settings_override.__enter__()
self.addCleanup(settings_override.__exit__, None, None, None)
# Clear out any existing modulestores,
# which will cause them to be re-created
clear_existing_modulestores()
self.addCleanup(self.drop_mongo_collections)
self.addCleanup(RequestCache().clear_request_cache)
# Enable XModuleFactories for the space of this test (and its setUp).
self.addCleanup(XMODULE_FACTORY_LOCK.disable)
XMODULE_FACTORY_LOCK.enable()
# When testing CCX, we should make sure that
# OverrideFieldData.provider_classes is always reset to `None` so
# that they're recalculated for every test
OverrideFieldData.provider_classes = None
super(ModuleStoreTestCase, self).setUp()
self.store = modulestore()
uname = 'testuser'
email = '[email protected]'
password = 'foo'
if kwargs.pop('create_user', True):
# Create the user so we can log them in.
self.user = User.objects.create_user(uname, email, password)
# Note that we do not actually need to do anything
# for registration if we directly mark them active.
self.user.is_active = True
# Staff has access to view all courses
self.user.is_staff = True
self.user.save()
return password
def create_non_staff_user(self):
"""
Creates a non-staff test user.
Returns the non-staff test user and its password.
"""
uname = 'teststudent'
password = 'foo'
nonstaff_user = User.objects.create_user(uname, '[email protected]', password)
# Note that we do not actually need to do anything
# for registration if we directly mark them active.
nonstaff_user.is_active = True
nonstaff_user.is_staff = False
nonstaff_user.save()
return nonstaff_user, password
def update_course(self, course, user_id):
"""
Updates the version of course in the modulestore
'course' is an instance of CourseDescriptor for which we want
to update metadata.
"""
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course.id):
self.store.update_item(course, user_id)
updated_course = self.store.get_course(course.id)
return updated_course
@staticmethod
@patch('xmodule.modulestore.django.create_modulestore_instance')
def drop_mongo_collections(mock_create):
"""
If using a Mongo-backed modulestore & contentstore, drop the collections.
"""
# Do not create the modulestore if it does not exist.
mock_create.return_value = None
module_store = modulestore()
if hasattr(module_store, '_drop_database'):
module_store._drop_database() # pylint: disable=protected-access
_CONTENTSTORE.clear()
if hasattr(module_store, 'close_connections'):
module_store.close_connections()
def create_sample_course(self, org, course, run, block_info_tree=None, course_fields=None):
"""
create a course in the default modulestore from the collection of BlockInfo
records defining the course tree
Returns:
course_loc: the CourseKey for the created course
"""
if block_info_tree is None:
block_info_tree = default_block_info_tree
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, None):
course = self.store.create_course(org, course, run, self.user.id, fields=course_fields)
self.course_loc = course.location # pylint: disable=attribute-defined-outside-init
def create_sub_tree(parent_loc, block_info):
"""Recursively creates a sub_tree on this parent_loc with this block."""
block = self.store.create_child(
self.user.id,
# TODO remove version_agnostic() when we impl the single transaction
parent_loc.version_agnostic(),
block_info.category, block_id=block_info.block_id,
fields=block_info.fields,
)
for tree in block_info.sub_tree:
create_sub_tree(block.location, tree)
setattr(self, block_info.block_id, block.location.version_agnostic())
for tree in block_info_tree:
create_sub_tree(self.course_loc, tree)
# remove version_agnostic when bulk write works
self.store.publish(self.course_loc.version_agnostic(), self.user.id)
return self.course_loc.course_key.version_agnostic()
def create_toy_course(self, org='edX', course='toy', run='2012_Fall'):
"""
Create an equivalent to the toy xml course
"""
# with self.store.bulk_operations(self.store.make_course_key(org, course, run)):
self.toy_loc = self.create_sample_course( # pylint: disable=attribute-defined-outside-init
org, course, run, TOY_BLOCK_INFO_TREE,
{
"textbooks": [["Textbook", "https://s3.amazonaws.com/edx-textbooks/guttag_computation_v3/"]],
"wiki_slug": "toy",
"display_name": "Toy Course",
"graded": True,
"tabs": [
CoursewareTab(),
CourseInfoTab(),
StaticTab(name="Syllabus", url_slug="syllabus"),
StaticTab(name="Resources", url_slug="resources"),
DiscussionTab(),
WikiTab(),
ProgressTab(),
],
"discussion_topics": {"General": {"id": "i4x-edX-toy-course-2012_Fall"}},
"graceperiod": datetime.timedelta(days=2, seconds=21599),
"start": datetime.datetime(2015, 07, 17, 12, tzinfo=pytz.utc),
"xml_attributes": {"filename": ["course/2012_Fall.xml", "course/2012_Fall.xml"]},
"pdf_textbooks": [
{
"tab_title": "Sample Multi Chapter Textbook",
"id": "MyTextbook",
"chapters": [
{"url": "/static/Chapter1.pdf", "title": "Chapter 1"},
{"url": "/static/Chapter2.pdf", "title": "Chapter 2"}
]
}
],
"course_image": "just_a_test.jpg",
}
)
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, self.toy_loc):
self.store.create_item(
self.user.id, self.toy_loc, "about", block_id="short_description",
fields={"data": "A course about toys."}
)
self.store.create_item(
self.user.id, self.toy_loc, "about", block_id="effort",
fields={"data": "6 hours"}
)
self.store.create_item(
self.user.id, self.toy_loc, "about", block_id="end_date",
fields={"data": "TBD"}
)
self.store.create_item(
self.user.id, self.toy_loc, "course_info", "handouts",
fields={"data": "<a href='/static/handouts/sample_handout.txt'>Sample</a>"}
)
self.store.create_item(
self.user.id, self.toy_loc, "static_tab", "resources",
fields={"display_name": "Resources"},
)
self.store.create_item(
self.user.id, self.toy_loc, "static_tab", "syllabus",
fields={"display_name": "Syllabus"},
)
return self.toy_loc
|
agpl-3.0
|
nlholdem/icodoom
|
.venv/lib/python2.7/site-packages/tensorflow/contrib/distributions/python/ops/multinomial.py
|
11
|
10471
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Multinomial distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
_multinomial_prob_note = """
For each batch of counts `[n_1,...,n_k]`, `P[counts]` is the probability
that after sampling `n` draws from this Multinomial distribution, the
number of draws falling in class `j` is `n_j`. Note that different
sequences of draws can result in the same counts, thus the probability
includes a combinatorial coefficient.
Note that input "counts" must be a non-negative tensor with dtype `dtype`
and whose shape can be broadcast with `self.p` and `self.n`. For fixed
leading dimensions, the last dimension represents counts for the
corresponding Multinomial distribution in `self.p`. `counts` is only legal
if it sums up to `n` and its components are equal to integer values.
"""
class Multinomial(distribution.Distribution):
"""Multinomial distribution.
This distribution is parameterized by a vector `p` of probability
parameters for `k` classes and `n`, the counts per each class..
#### Mathematical details
The Multinomial is a distribution over k-class count data, meaning
for each k-tuple of non-negative integer `counts = [n_1,...,n_k]`, we have a
probability of these draws being made from the distribution. The distribution
has hyperparameters `p = (p_1,...,p_k)`, and probability mass
function (pmf):
```pmf(counts) = n! / (n_1!...n_k!) * (p_1)^n_1*(p_2)^n_2*...(p_k)^n_k```
where above `n = sum_j n_j`, `n!` is `n` factorial.
#### Examples
Create a 3-class distribution, with the 3rd class is most likely to be drawn,
using logits..
```python
logits = [-50., -43, 0]
dist = Multinomial(n=4., logits=logits)
```
Create a 3-class distribution, with the 3rd class is most likely to be drawn.
```python
p = [.2, .3, .5]
dist = Multinomial(n=4., p=p)
```
The distribution functions can be evaluated on counts.
```python
# counts same shape as p.
counts = [1., 0, 3]
dist.prob(counts) # Shape []
# p will be broadcast to [[.2, .3, .5], [.2, .3, .5]] to match counts.
counts = [[1., 2, 1], [2, 2, 0]]
dist.prob(counts) # Shape [2]
# p will be broadcast to shape [5, 7, 3] to match counts.
counts = [[...]] # Shape [5, 7, 3]
dist.prob(counts) # Shape [5, 7]
```
Create a 2-batch of 3-class distributions.
```python
p = [[.1, .2, .7], [.3, .3, .4]] # Shape [2, 3]
dist = Multinomial(n=[4., 5], p=p)
counts = [[2., 1, 1], [3, 1, 1]]
dist.prob(counts) # Shape [2]
```
"""
def __init__(self,
n,
logits=None,
p=None,
validate_args=False,
allow_nan_stats=True,
name="Multinomial"):
"""Initialize a batch of Multinomial distributions.
Args:
n: Non-negative floating point tensor with shape broadcastable to
`[N1,..., Nm]` with `m >= 0`. Defines this as a batch of
`N1 x ... x Nm` different Multinomial distributions. Its components
should be equal to integer values.
logits: Floating point tensor representing the log-odds of a
positive event with shape broadcastable to `[N1,..., Nm, k], m >= 0`,
and the same dtype as `n`. Defines this as a batch of `N1 x ... x Nm`
different `k` class Multinomial distributions. Only one of `logits` or
`p` should be passed in.
p: Positive floating point tensor with shape broadcastable to
`[N1,..., Nm, k]` `m >= 0` and same dtype as `n`. Defines this as
a batch of `N1 x ... x Nm` different `k` class Multinomial
distributions. `p`'s components in the last portion of its shape should
sum up to 1. Only one of `logits` or `p` should be passed in.
validate_args: `Boolean`, default `False`. Whether to assert valid
values for parameters `n` and `p`, and `x` in `prob` and `log_prob`.
If `False`, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to prefix Ops created by this distribution class.
Examples:
```python
# Define 1-batch of 2-class multinomial distribution,
# also known as a Binomial distribution.
dist = Multinomial(n=2., p=[.1, .9])
# Define a 2-batch of 3-class distributions.
dist = Multinomial(n=[4., 5], p=[[.1, .3, .6], [.4, .05, .55]])
```
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[n, p]) as ns:
with ops.control_dependencies([
check_ops.assert_non_negative(
n, message="n has negative components."),
distribution_util.assert_integer_form(
n, message="n has non-integer components.")
] if validate_args else []):
self._logits, self._p = distribution_util.get_logits_and_prob(
name=name, logits=logits, p=p, validate_args=validate_args,
multidimensional=True)
self._n = array_ops.identity(n, name="convert_n")
self._mean_val = array_ops.expand_dims(n, -1) * self._p
self._broadcast_shape = math_ops.reduce_sum(
self._mean_val, reduction_indices=[-1], keep_dims=False)
super(Multinomial, self).__init__(
dtype=self._p.dtype,
is_continuous=False,
is_reparameterized=False,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._p, self._n, self._mean_val,
self._logits, self._broadcast_shape],
name=ns)
@property
def n(self):
"""Number of trials."""
return self._n
@property
def logits(self):
"""Vector of coordinatewise logits."""
return self._logits
@property
def p(self):
"""Vector of probabilities summing to one.
Each element is the probability of drawing that coordinate."""
return self._p
def _batch_shape(self):
return array_ops.shape(self._broadcast_shape)
def _get_batch_shape(self):
return self._broadcast_shape.get_shape()
def _event_shape(self):
return array_ops.gather(array_ops.shape(self._mean_val),
[array_ops.rank(self._mean_val) - 1])
def _get_event_shape(self):
return self._mean_val.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
n_draws = math_ops.cast(self.n, dtype=dtypes.int32)
if self.n.get_shape().ndims is not None:
if self.n.get_shape().ndims != 0:
raise NotImplementedError(
"Sample only supported for scalar number of draws.")
elif self.validate_args:
is_scalar = check_ops.assert_rank(
n_draws, 0,
message="Sample only supported for scalar number of draws.")
n_draws = control_flow_ops.with_dependencies([is_scalar], n_draws)
k = self.event_shape()[0]
# Flatten batch dims so logits has shape [B, k],
# where B = reduce_prod(self.batch_shape()).
logits = array_ops.reshape(self.logits, [-1, k])
draws = random_ops.multinomial(logits=logits,
num_samples=n * n_draws,
seed=seed)
draws = array_ops.reshape(draws, shape=[-1, n, n_draws])
x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k),
reduction_indices=-2) # shape: [B, n, k]
x = array_ops.transpose(x, perm=[1, 0, 2])
final_shape = array_ops.concat([[n], self.batch_shape(), [k]], 0)
return array_ops.reshape(x, final_shape)
@distribution_util.AppendDocstring(_multinomial_prob_note)
def _log_prob(self, counts):
counts = self._assert_valid_sample(counts)
log_unnormalized_prob = math_ops.reduce_sum(
counts * math_ops.log(self.p),
reduction_indices=[-1])
log_normalizer = -distribution_util.log_combinations(self.n, counts)
return log_unnormalized_prob - log_normalizer
@distribution_util.AppendDocstring(_multinomial_prob_note)
def _prob(self, counts):
return math_ops.exp(self._log_prob(counts))
def _mean(self):
return array_ops.identity(self._mean_val)
def _variance(self):
p = self.p * array_ops.expand_dims(array_ops.ones_like(self.n), -1)
outer_prod = math_ops.matmul(
array_ops.expand_dims(self._mean_val, -1), array_ops.expand_dims(p, -2))
return array_ops.matrix_set_diag(-outer_prod,
self._mean_val - self._mean_val * p)
def _assert_valid_sample(self, counts):
"""Check counts for proper shape, values, then return tensor version."""
if not self.validate_args: return counts
return control_flow_ops.with_dependencies([
check_ops.assert_non_negative(
counts, message="counts has negative components."),
check_ops.assert_equal(
self.n, math_ops.reduce_sum(counts, reduction_indices=[-1]),
message="counts do not sum to n."),
distribution_util.assert_integer_form(
counts, message="counts have non-integer components.")
], counts)
|
gpl-3.0
|
zenoss/Community-Zenpacks
|
ZenPacks.community.HPMon/ZenPacks/community/HPMon/modeler/plugins/community/snmp/HPIdeControllerMap.py
|
2
|
2298
|
################################################################################
#
# This program is part of the HPMon Zenpack for Zenoss.
# Copyright (C) 2008 Egor Puzanov.
#
# This program can be used under the GNU General Public License version 2
# You can find full information here: http://www.zenoss.com/oss
#
################################################################################
__doc__="""HPIdeControllerMap
HPIdeControllerMap maps the cpqIdeControllerTable table to cpqIdeController objects
$Id: HPIdeControllerMap.py,v 1.1 2009/08/18 16:50:53 egor Exp $"""
__version__ = '$Revision: 1.1 $'[11:-2]
from Products.DataCollector.plugins.CollectorPlugin import GetTableMap
from HPExpansionCardMap import HPExpansionCardMap
class HPIdeControllerMap(HPExpansionCardMap):
"""Map HP/Compaq insight manager cpqIdeControllerTable table to model."""
maptype = "cpqIdeController"
modname = "ZenPacks.community.HPMon.cpqIdeController"
snmpGetTableMaps = (
GetTableMap('cpqIdeControllerTable',
'.1.3.6.1.4.1.232.14.2.3.1.1',
{
'.3': 'model',
'.4': 'FWRev',
'.5': 'slot',
'.6': 'status',
'.8': 'serialNumber',
}
),
)
def process(self, device, results, log):
"""collect snmp information from this device"""
log.info('processing %s for device %s', self.name(), device.id)
getdata, tabledata = results
cardtable = tabledata.get('cpqIdeControllerTable')
if not device.id in HPExpansionCardMap.oms:
HPExpansionCardMap.oms[device.id] = []
for oid, card in cardtable.iteritems():
try:
om = self.objectMap(card)
om.snmpindex = oid.strip('.')
om.id = self.prepId("cpqIdeController%s" % om.snmpindex)
om.slot = getattr(om, 'slot', 0)
if om.slot == -1: om.slot = 0
om.model = getattr(om, 'model', 'Unknown IDE Controller')
om.setProductKey = "%s" % om.model
except AttributeError:
continue
HPExpansionCardMap.oms[device.id].append(om)
return
|
gpl-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.