code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python
import os
import site
site.addsitedir(os.path.abspath(os.path.join(os.path.dirname(__file__), '../')))
from django.core.management import execute_manager
import settings
if __name__ == "__main__":
execute_manager(settings)
| boar/boar | boar/manage.py | Python | bsd-3-clause | 253 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import collections
import copy
import datetime
import decimal
import math
import uuid
import warnings
from base64 import b64decode, b64encode
from itertools import tee
from django.apps import apps
from django.db import connection
from django.db.models.lookups import default_lookups, RegisterLookupMixin
from django.db.models.query_utils import QueryWrapper
from django.conf import settings
from django import forms
from django.core import exceptions, validators, checks
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import parse_date, parse_datetime, parse_time, parse_duration
from django.utils.duration import duration_string
from django.utils.functional import cached_property, curry, total_ordering, Promise
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import (smart_text, force_text, force_bytes,
python_2_unicode_compatible)
from django.utils.ipv6 import clean_ipv6_address
from django.utils import six
from django.utils.itercompat import is_iterable
# When the _meta object was formalized, this exception was moved to
# django.core.exceptions. It is retained here for backwards compatibility
# purposes.
from django.core.exceptions import FieldDoesNotExist # NOQA
# Avoid "TypeError: Item in ``from list'' not a string" -- unicode_literals
# makes these strings unicode
__all__ = [str(x) for x in (
'AutoField', 'BLANK_CHOICE_DASH', 'BigIntegerField', 'BinaryField',
'BooleanField', 'CharField', 'CommaSeparatedIntegerField', 'DateField',
'DateTimeField', 'DecimalField', 'DurationField', 'EmailField', 'Empty',
'Field', 'FieldDoesNotExist', 'FilePathField', 'FloatField',
'GenericIPAddressField', 'IPAddressField', 'IntegerField', 'NOT_PROVIDED',
'NullBooleanField', 'PositiveIntegerField', 'PositiveSmallIntegerField',
'SlugField', 'SmallIntegerField', 'TextField', 'TimeField', 'URLField',
'UUIDField',
)]
class Empty(object):
pass
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
def _load_field(app_label, model_name, field_name):
return apps.get_model(app_label, model_name)._meta.get_field(field_name)
# A guide to Field parameters:
#
# * name: The name of the field specified in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
def _empty(of_cls):
new = Empty()
new.__class__ = of_cls
return new
@total_ordering
@python_2_unicode_compatible
class Field(RegisterLookupMixin):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
empty_values = list(validators.EMPTY_VALUES)
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _('Value %(value)r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
# Translators: The 'lookup_type' is one of 'date', 'year' or 'month'.
# Eg: "Title must be unique for pub_date year"
'unique_for_date': _("%(field_label)s must be unique for "
"%(date_field_label)s %(lookup_type)s."),
}
class_lookups = default_lookups.copy()
system_check_deprecated_details = None
system_check_removed_details = None
# Field flags
hidden = False
many_to_many = None
many_to_one = None
one_to_many = None
one_to_one = None
related_model = None
# Generic field type description, usually overridden by subclasses
def _description(self):
return _('Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name # May be set by set_attributes_from_name
self._verbose_name = verbose_name # Store original for deconstruction
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
self.rel = rel
self.is_relation = self.rel is not None
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date = unique_for_date
self.unique_for_month = unique_for_month
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't
# explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self._validators = validators # Store for deconstruction later
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self._error_messages = error_messages # Store for deconstruction later
self.error_messages = messages
def __str__(self):
""" Return "app_label.model_label.field_name". """
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def __repr__(self):
"""
Displays the module, class and name of the field.
"""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_choices())
errors.extend(self._check_db_index())
errors.extend(self._check_null_allowed_for_primary_keys())
errors.extend(self._check_backend_specific_checks(**kwargs))
errors.extend(self._check_deprecation_details())
return errors
def _check_field_name(self):
""" Check if field name is valid, i.e. 1) does not end with an
underscore, 2) does not contain "__" and 3) is not "pk". """
if self.name.endswith('_'):
return [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=self,
id='fields.E001',
)
]
elif '__' in self.name:
return [
checks.Error(
'Field names must not contain "__".',
hint=None,
obj=self,
id='fields.E002',
)
]
elif self.name == 'pk':
return [
checks.Error(
"'pk' is a reserved word that cannot be used as a field name.",
hint=None,
obj=self,
id='fields.E003',
)
]
else:
return []
def _check_choices(self):
if self.choices:
if (isinstance(self.choices, six.string_types) or
not is_iterable(self.choices)):
return [
checks.Error(
"'choices' must be an iterable (e.g., a list or tuple).",
hint=None,
obj=self,
id='fields.E004',
)
]
elif any(isinstance(choice, six.string_types) or
not is_iterable(choice) or len(choice) != 2
for choice in self.choices):
return [
checks.Error(
("'choices' must be an iterable containing "
"(actual value, human readable name) tuples."),
hint=None,
obj=self,
id='fields.E005',
)
]
else:
return []
else:
return []
def _check_db_index(self):
if self.db_index not in (None, True, False):
return [
checks.Error(
"'db_index' must be None, True or False.",
hint=None,
obj=self,
id='fields.E006',
)
]
else:
return []
def _check_null_allowed_for_primary_keys(self):
if (self.primary_key and self.null and
not connection.features.interprets_empty_strings_as_nulls):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
return [
checks.Error(
'Primary keys must not have null=True.',
hint=('Set null=False on the field, or '
'remove primary_key=True argument.'),
obj=self,
id='fields.E007',
)
]
else:
return []
def _check_backend_specific_checks(self, **kwargs):
return connection.validation.check_field(self, **kwargs)
def _check_deprecation_details(self):
if self.system_check_removed_details is not None:
return [
checks.Error(
self.system_check_removed_details.get(
'msg',
'%s has been removed except for support in historical '
'migrations.' % self.__class__.__name__
),
hint=self.system_check_removed_details.get('hint'),
obj=self,
id=self.system_check_removed_details.get('id', 'fields.EXXX'),
)
]
elif self.system_check_deprecated_details is not None:
return [
checks.Warning(
self.system_check_deprecated_details.get(
'msg',
'%s has been deprecated.' % self.__class__.__name__
),
hint=self.system_check_deprecated_details.get('hint'),
obj=self,
id=self.system_check_deprecated_details.get('id', 'fields.WXXX'),
)
]
return []
def get_col(self, alias, source=None):
if source is None:
source = self
if alias != self.model._meta.db_table or source != self:
from django.db.models.expressions import Col
return Col(alias, self, source)
else:
return self.cached_col
@cached_property
def cached_col(self):
from django.db.models.expressions import Col
return Col(self.model._meta.db_table, self)
def select_format(self, compiler, sql, params):
"""
Custom format for select clauses. For example, GIS columns need to be
selected as AsText(table.col) on MySQL as the table.col data can't be used
by Django.
"""
return sql, params
def deconstruct(self):
"""
Returns enough information to recreate the field as a 4-tuple:
* The name of the field on the model, if contribute_to_class has been run
* The import path of the field, including the class: django.db.models.IntegerField
This should be the most portable version, so less specific may be better.
* A list of positional arguments
* A dict of keyword arguments
Note that the positional or keyword arguments must contain values of the
following types (including inner values of collection types):
* None, bool, str, unicode, int, long, float, complex, set, frozenset, list, tuple, dict
* UUID
* datetime.datetime (naive), datetime.date
* top-level classes, top-level functions - will be referenced by their full import path
* Storage instances - these have their own deconstruct() method
This is because the values here must be serialized into a text format
(possibly new Python code, possibly JSON) and these are the only types
with encoding handlers defined.
There's no need to return the exact way the field was instantiated this time,
just ensure that the resulting field is the same - prefer keyword arguments
over positional ones, and omit parameters with their default values.
"""
# Short-form way of fetching all the default parameters
keywords = {}
possibles = {
"verbose_name": None,
"primary_key": False,
"max_length": None,
"unique": False,
"blank": False,
"null": False,
"db_index": False,
"default": NOT_PROVIDED,
"editable": True,
"serialize": True,
"unique_for_date": None,
"unique_for_month": None,
"unique_for_year": None,
"choices": [],
"help_text": '',
"db_column": None,
"db_tablespace": settings.DEFAULT_INDEX_TABLESPACE,
"auto_created": False,
"validators": [],
"error_messages": None,
}
attr_overrides = {
"unique": "_unique",
"choices": "_choices",
"error_messages": "_error_messages",
"validators": "_validators",
"verbose_name": "_verbose_name",
}
equals_comparison = {"choices", "validators", "db_tablespace"}
for name, default in possibles.items():
value = getattr(self, attr_overrides.get(name, name))
# Unroll anything iterable for choices into a concrete list
if name == "choices" and isinstance(value, collections.Iterable):
value = list(value)
# Do correct kind of comparison
if name in equals_comparison:
if value != default:
keywords[name] = value
else:
if value is not default:
keywords[name] = value
# Work out path - we shorten it for known Django core fields
path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if path.startswith("django.db.models.fields.related"):
path = path.replace("django.db.models.fields.related", "django.db.models")
if path.startswith("django.db.models.fields.files"):
path = path.replace("django.db.models.fields.files", "django.db.models")
if path.startswith("django.db.models.fields.proxy"):
path = path.replace("django.db.models.fields.proxy", "django.db.models")
if path.startswith("django.db.models.fields"):
path = path.replace("django.db.models.fields", "django.db.models")
# Return basic info - other fields should override this.
return (
force_text(self.name, strings_only=True),
path,
[],
keywords,
)
def clone(self):
"""
Uses deconstruct() to clone a new copy of this Field.
Will not preserve any class attachments/attribute names.
"""
name, path, args, kwargs = self.deconstruct()
return self.__class__(*args, **kwargs)
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(other, Field):
return self.creation_counter == other.creation_counter
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, Field):
return self.creation_counter < other.creation_counter
return NotImplemented
def __hash__(self):
return hash(self.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
if hasattr(self.rel, 'field') and self.rel.field is self:
obj.rel.field = obj
memodict[id(self)] = obj
return obj
def __copy__(self):
# We need to avoid hitting __reduce__, so define this
# slightly weird copy construct.
obj = Empty()
obj.__class__ = self.__class__
obj.__dict__ = self.__dict__.copy()
return obj
def __reduce__(self):
"""
Pickling should return the model._meta.fields instance of the field,
not a new copy of that field. So, we use the app registry to load the
model and then the field back.
"""
if not hasattr(self, 'model'):
# Fields are sometimes used without attaching them to models (for
# example in aggregation). In this case give back a plain field
# instance. The code below will create a new empty instance of
# class self.__class__, then update its dict with self.__dict__
# values - so, this is very close to normal pickle.
return _empty, (self.__class__,), self.__dict__
if self.model._deferred:
# Deferred model will not be found from the app registry. This
# could be fixed by reconstructing the deferred model on unpickle.
raise RuntimeError("Fields of deferred models can't be reduced")
return _load_field, (self.model._meta.app_label, self.model._meta.object_name,
self.name)
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
@cached_property
def validators(self):
# Some validators can't be created at field initialization time.
# This method provides a way to delay their creation until required.
return self.default_validators + self._validators
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self._choices and value not in self.empty_values:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'], code='null')
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(self.error_messages['blank'], code='blank')
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific data_types dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return connection.data_types[self.get_internal_type()] % data
except KeyError:
return None
def db_parameters(self, connection):
"""
Extension of db_type(), providing a range of different return
values (type, checks).
This will look at db_type(), allowing custom model fields to override it.
"""
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
type_string = self.db_type(connection)
try:
check_string = connection.data_type_check_constraints[self.get_internal_type()] % data
except KeyError:
check_string = None
return {
"type": type_string,
"check": check_string,
}
def db_type_suffix(self, connection):
return connection.data_types_suffix.get(self.get_internal_type())
def get_db_converters(self, connection):
if hasattr(self, 'from_db_value'):
return [self.from_db_value]
return []
@property
def unique(self):
return self._unique or self.primary_key
def set_attributes_from_name(self, name):
if not self.name:
self.name = name
self.attname, self.column = self.get_attname_column()
self.concrete = self.column is not None
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name, virtual_only=False):
self.set_attributes_from_name(name)
self.model = cls
if virtual_only:
cls._meta.add_field(self, virtual=True)
else:
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name,
curry(cls._get_FIELD_display, field=self))
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""
Returns field's value just before saving.
"""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
"""
if isinstance(value, Promise):
value = value._proxy____cast()
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""
Returns field's value prepared for saving into a database.
"""
return self.get_db_prep_value(value, connection=connection,
prepared=False)
def get_prep_lookup(self, lookup_type, value):
"""
Perform preliminary non-db specific lookup checks and conversions
"""
if hasattr(value, '_prepare'):
return value._prepare()
if lookup_type in {
'iexact', 'contains', 'icontains',
'startswith', 'istartswith', 'endswith', 'iendswith',
'month', 'day', 'week_day', 'hour', 'minute', 'second',
'isnull', 'search', 'regex', 'iregex',
}:
return value
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return self.get_prep_value(value)
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
elif lookup_type == 'year':
try:
return int(value)
except ValueError:
raise ValueError("The __year lookup type requires an integer "
"argument")
return self.get_prep_value(value)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
"""
Returns field's value prepared for database lookup.
"""
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
prepared = True
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabeled_clone method it means the
# value will be handled later on.
if hasattr(value, 'relabeled_clone'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('month', 'day', 'week_day', 'hour', 'minute',
'second', 'search', 'regex', 'iregex', 'contains',
'icontains', 'iexact', 'startswith', 'endswith',
'istartswith', 'iendswith'):
return [value]
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return [self.get_db_prep_value(value, connection=connection,
prepared=prepared)]
elif lookup_type in ('range', 'in'):
return [self.get_db_prep_value(v, connection=connection,
prepared=prepared) for v in value]
elif lookup_type == 'isnull':
return []
elif lookup_type == 'year':
if isinstance(self, DateTimeField):
return connection.ops.year_lookup_bounds_for_datetime_field(value)
elif isinstance(self, DateField):
return connection.ops.year_lookup_bounds_for_date_field(value)
else:
return [value] # this isn't supposed to happen
else:
return [value]
def has_default(self):
"""
Returns a boolean of whether this field has a default value.
"""
return self.default is not NOT_PROVIDED
def get_default(self):
"""
Returns the default value for this field.
"""
if self.has_default():
if callable(self.default):
return self.default()
return self.default
if (not self.empty_strings_allowed or (self.null and
not connection.features.interprets_empty_strings_as_nulls)):
return None
return ""
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
blank_defined = False
choices = list(self.choices) if self.choices else []
named_groups = choices and isinstance(choices[0][1], (list, tuple))
if not named_groups:
for choice, __ in choices:
if choice in ('', None):
blank_defined = True
break
first_choice = (blank_choice if include_blank and
not blank_defined else [])
if self.choices:
return first_choice + choices
rel_model = self.rel.to
limit_choices_to = limit_choices_to or self.get_limit_choices_to()
if hasattr(self.rel, 'get_related_field'):
lst = [(getattr(x, self.rel.get_related_field().attname),
smart_text(x))
for x in rel_model._default_manager.complex_filter(
limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_text(x))
for x in rel_model._default_manager.complex_filter(
limit_choices_to)]
return first_choice + lst
def get_choices_default(self):
return self.get_choices()
def get_flatchoices(self, include_blank=True,
blank_choice=BLANK_CHOICE_DASH):
"""
Returns flattened choices with a default blank choice included.
"""
first_choice = blank_choice if include_blank else []
return first_choice + list(self.flatchoices)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_text(self._get_val_from_obj(obj))
def _get_choices(self):
if isinstance(self._choices, collections.Iterator):
choices, self._choices = tee(self._choices)
return choices
else:
return self._choices
choices = property(_get_choices)
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice, value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=None, choices_form_class=None, **kwargs):
"""
Returns a django.forms.Field instance for this database Field.
"""
defaults = {'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
if choices_form_class is not None:
form_class = choices_form_class
else:
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
if form_class is None:
form_class = forms.CharField
return form_class(**defaults)
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
"""
return getattr(obj, self.attname)
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super(AutoField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(AutoField, self).check(**kwargs)
errors.extend(self._check_primary_key())
return errors
def _check_primary_key(self):
if not self.primary_key:
return [
checks.Error(
'AutoFields must set primary_key=True.',
hint=None,
obj=self,
id='fields.E100',
),
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(AutoField, self).deconstruct()
del kwargs['blank']
kwargs['primary_key'] = True
return name, path, args, kwargs
def get_internal_type(self):
return "AutoField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
value = connection.ops.validate_autopk_value(value)
return value
def get_prep_value(self, value):
value = super(AutoField, self).get_prep_value(value)
if value is None:
return None
return int(value)
def contribute_to_class(self, cls, name, **kwargs):
assert not cls._meta.has_auto_field, \
"A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name, **kwargs)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either True or False."),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super(BooleanField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(BooleanField, self).check(**kwargs)
errors.extend(self._check_null(**kwargs))
return errors
def _check_null(self, **kwargs):
if getattr(self, 'null', False):
return [
checks.Error(
'BooleanFields do not accept null values.',
hint='Use a NullBooleanField instead.',
obj=self,
id='fields.E110',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(BooleanField, self).deconstruct()
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(BooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
value = super(BooleanField, self).get_prep_value(value)
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = not (self.has_default() or 'initial' in kwargs)
defaults = {'choices': self.get_choices(include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def check(self, **kwargs):
errors = super(CharField, self).check(**kwargs)
errors.extend(self._check_max_length_attribute(**kwargs))
return errors
def _check_max_length_attribute(self, **kwargs):
try:
max_length = int(self.max_length)
if max_length <= 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"CharFields must define a 'max_length' attribute.",
hint=None,
obj=self,
id='fields.E120',
)
]
except ValueError:
return [
checks.Error(
"'max_length' must be a positive integer.",
hint=None,
obj=self,
id='fields.E121',
)
]
else:
return []
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def get_prep_value(self, value):
value = super(CharField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
# TODO: Maybe move this into contrib, because it's specialized.
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _('Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
class DateTimeCheckMixin(object):
def check(self, **kwargs):
errors = super(DateTimeCheckMixin, self).check(**kwargs)
errors.extend(self._check_mutually_exclusive_options())
errors.extend(self._check_fix_default_value())
return errors
def _check_mutually_exclusive_options(self):
# auto_now, auto_now_add, and default are mutually exclusive
# options. The use of more than one of these options together
# will trigger an Error
mutually_exclusive_options = [self.auto_now_add, self.auto_now,
self.has_default()]
enabled_options = [option not in (None, False)
for option in mutually_exclusive_options].count(True)
if enabled_options > 1:
return [
checks.Error(
"The options auto_now, auto_now_add, and default "
"are mutually exclusive. Only one of these options "
"may be present.",
hint=None,
obj=self,
id='fields.E160',
)
]
else:
return []
def _check_fix_default_value(self):
return []
class DateField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid date format. It must be "
"in YYYY-MM-DD format."),
'invalid_date': _("'%(value)s' value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super(DateField, self).__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
date or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
if not timezone.is_naive(value):
value = timezone.make_naive(value, timezone.utc)
value = value.date()
elif isinstance(value, datetime.date):
# Nothing to do, as dates don't have tz information
pass
else:
# No explicit date / datetime value -- no checks necessary
return []
offset = datetime.timedelta(days=1)
lower = (now - offset).date()
upper = (now + offset).date()
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(DateField, self).deconstruct()
if self.auto_now:
kwargs['auto_now'] = True
if self.auto_now_add:
kwargs['auto_now_add'] = True
if self.auto_now or self.auto_now_add:
del kwargs['editable']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
if settings.USE_TZ and timezone.is_aware(value):
# Convert aware datetimes to the default time zone
# before casting them to dates (#17742).
default_timezone = timezone.get_default_timezone()
value = timezone.make_naive(value, default_timezone)
return value.date()
if isinstance(value, datetime.date):
return value
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name, **kwargs):
super(DateField, self).contribute_to_class(cls, name, **kwargs)
if not self.null:
setattr(cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=True))
setattr(cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=False))
def get_prep_lookup(self, lookup_type, value):
# For dates lookups, convert the value to an int
# so the database backend always sees a consistent type.
if lookup_type in ('month', 'day', 'week_day', 'hour', 'minute', 'second'):
return int(value)
return super(DateField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
value = super(DateField, self).get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_date(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."),
'invalid_date': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD) but it is an invalid date."),
'invalid_datetime': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
"but it is an invalid date/time."),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
date or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.date):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
lower = datetime.datetime(lower.year, lower.month, lower.day)
upper = now + second_offset
upper = datetime.datetime(upper.year, upper.month, upper.day)
value = datetime.datetime(value.year, value.month, value.day)
else:
# No explicit date / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
if settings.USE_TZ:
# For backwards compatibility, interpret naive datetimes in
# local time. This won't work during DST change, but we can't
# do much about it, so we let the exceptions percolate up the
# call stack.
warnings.warn("DateTimeField %s.%s received a naive datetime "
"(%s) while time zone support is active." %
(self.model.__name__, self.name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_datetime'],
code='invalid_datetime',
params={'value': value},
)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
# get_prep_lookup is inherited from DateField
def get_prep_value(self, value):
value = super(DateTimeField, self).get_prep_value(value)
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
try:
name = '%s.%s' % (self.model.__name__, self.name)
except AttributeError:
name = '(unbound)'
warnings.warn("DateTimeField %s received a naive datetime (%s)"
" while time zone support is active." %
(name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_datetime(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a decimal number."),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super(DecimalField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(DecimalField, self).check(**kwargs)
digits_errors = self._check_decimal_places()
digits_errors.extend(self._check_max_digits())
if not digits_errors:
errors.extend(self._check_decimal_places_and_max_digits(**kwargs))
else:
errors.extend(digits_errors)
return errors
def _check_decimal_places(self):
try:
decimal_places = int(self.decimal_places)
if decimal_places < 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'decimal_places' attribute.",
hint=None,
obj=self,
id='fields.E130',
)
]
except ValueError:
return [
checks.Error(
"'decimal_places' must be a non-negative integer.",
hint=None,
obj=self,
id='fields.E131',
)
]
else:
return []
def _check_max_digits(self):
try:
max_digits = int(self.max_digits)
if max_digits <= 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'max_digits' attribute.",
hint=None,
obj=self,
id='fields.E132',
)
]
except ValueError:
return [
checks.Error(
"'max_digits' must be a positive integer.",
hint=None,
obj=self,
id='fields.E133',
)
]
else:
return []
def _check_decimal_places_and_max_digits(self, **kwargs):
if int(self.decimal_places) > int(self.max_digits):
return [
checks.Error(
"'max_digits' must be greater or equal to 'decimal_places'.",
hint=None,
obj=self,
id='fields.E134',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(DecimalField, self).deconstruct()
if self.max_digits is not None:
kwargs['max_digits'] = self.max_digits
if self.decimal_places is not None:
kwargs['decimal_places'] = self.decimal_places
return name, path, args, kwargs
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def _format(self, value):
if isinstance(value, six.string_types):
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.utils.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import utils
return utils.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.value_to_db_decimal(self.to_python(value),
self.max_digits, self.decimal_places)
def get_prep_value(self, value):
value = super(DecimalField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class DurationField(Field):
"""Stores timedelta objects.
Uses interval on postgres, INVERAL DAY TO SECOND on Oracle, and bigint of
microseconds on other databases.
"""
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"[DD] [HH:[MM:]]ss[.uuuuuu] format.")
}
description = _("Duration")
def get_internal_type(self):
return "DurationField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.timedelta):
return value
try:
parsed = parse_duration(value)
except ValueError:
pass
else:
if parsed is not None:
return parsed
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_db_prep_value(self, value, connection, prepared=False):
if connection.features.has_native_duration_field:
return value
if value is None:
return None
return value.total_seconds() * 1000000
def get_db_converters(self, connection):
converters = []
if not connection.features.has_native_duration_field:
converters.append(connection.ops.convert_durationfield_value)
return converters + super(DurationField, self).get_db_converters(connection)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else duration_string(val)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("Email address")
def __init__(self, *args, **kwargs):
# max_length=254 to be compliant with RFCs 3696 and 5321
kwargs['max_length'] = kwargs.get('max_length', 254)
super(EmailField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(EmailField, self).deconstruct()
# We do not exclude max_length if it matches default as we want to change
# the default in future.
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None,
recursive=False, allow_files=True, allow_folders=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FilePathField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(FilePathField, self).check(**kwargs)
errors.extend(self._check_allowing_files_or_folders(**kwargs))
return errors
def _check_allowing_files_or_folders(self, **kwargs):
if not self.allow_files and not self.allow_folders:
return [
checks.Error(
"FilePathFields must have either 'allow_files' or 'allow_folders' set to True.",
hint=None,
obj=self,
id='fields.E140',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(FilePathField, self).deconstruct()
if self.path != '':
kwargs['path'] = self.path
if self.match is not None:
kwargs['match'] = self.match
if self.recursive is not False:
kwargs['recursive'] = self.recursive
if self.allow_files is not True:
kwargs['allow_files'] = self.allow_files
if self.allow_folders is not False:
kwargs['allow_folders'] = self.allow_folders
if kwargs.get("max_length", None) == 100:
del kwargs["max_length"]
return name, path, args, kwargs
def get_prep_value(self, value):
value = super(FilePathField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
'allow_files': self.allow_files,
'allow_folders': self.allow_folders,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
value = super(FloatField, self).get_prep_value(value)
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
description = _("Integer")
def check(self, **kwargs):
errors = super(IntegerField, self).check(**kwargs)
errors.extend(self._check_max_length_warning())
return errors
def _check_max_length_warning(self):
if self.max_length is not None:
return [
checks.Warning(
"'max_length' is ignored when used with IntegerField",
hint="Remove 'max_length' from field",
obj=self,
id='fields.W122',
)
]
return []
@cached_property
def validators(self):
# These validators can't be added at field initialization time since
# they're based on values retrieved from `connection`.
range_validators = []
internal_type = self.get_internal_type()
min_value, max_value = connection.ops.integer_field_range(internal_type)
if min_value is not None:
range_validators.append(validators.MinValueValidator(min_value))
if max_value is not None:
range_validators.append(validators.MaxValueValidator(max_value))
return super(IntegerField, self).validators + range_validators
def get_prep_value(self, value):
value = super(IntegerField, self).get_prep_value(value)
if value is None:
return None
return int(value)
def get_prep_lookup(self, lookup_type, value):
if ((lookup_type == 'gte' or lookup_type == 'lt')
and isinstance(value, float)):
value = math.ceil(value)
return super(IntegerField, self).get_prep_lookup(lookup_type, value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
system_check_deprecated_details = {
'msg': (
'IPAddressField has been deprecated. Support for it (except in '
'historical migrations) will be removed in Django 1.9.'
),
'hint': 'Use GenericIPAddressField instead.',
'id': 'fields.W900',
}
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
super(IPAddressField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(IPAddressField, self).deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_prep_value(self, value):
value = super(IPAddressField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def get_internal_type(self):
return "IPAddressField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.IPAddressField}
defaults.update(kwargs)
return super(IPAddressField, self).formfield(**defaults)
class GenericIPAddressField(Field):
empty_strings_allowed = False
description = _("IP address")
default_error_messages = {}
def __init__(self, verbose_name=None, name=None, protocol='both',
unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.protocol = protocol
self.default_validators, invalid_error_message = \
validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 39
super(GenericIPAddressField, self).__init__(verbose_name, name, *args,
**kwargs)
def check(self, **kwargs):
errors = super(GenericIPAddressField, self).check(**kwargs)
errors.extend(self._check_blank_and_null_values(**kwargs))
return errors
def _check_blank_and_null_values(self, **kwargs):
if not getattr(self, 'null', False) and getattr(self, 'blank', False):
return [
checks.Error(
('GenericIPAddressFields cannot have blank=True if null=False, '
'as blank values are stored as nulls.'),
hint=None,
obj=self,
id='fields.E150',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(GenericIPAddressField, self).deconstruct()
if self.unpack_ipv4 is not False:
kwargs['unpack_ipv4'] = self.unpack_ipv4
if self.protocol != "both":
kwargs['protocol'] = self.protocol
if kwargs.get("max_length", None) == 39:
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value and ':' in value:
return clean_ipv6_address(value,
self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_ipaddress(value)
def get_prep_value(self, value):
value = super(GenericIPAddressField, self).get_prep_value(value)
if value is None:
return None
if value and ':' in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {
'protocol': self.protocol,
'form_class': forms.GenericIPAddressField,
}
defaults.update(kwargs)
return super(GenericIPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
super(NullBooleanField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(NullBooleanField, self).deconstruct()
del kwargs['null']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(NullBooleanField, self).get_prep_lookup(lookup_type,
value)
def get_prep_value(self, value):
value = super(NullBooleanField, self).get_prep_value(value)
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerField(IntegerField):
description = _("Positive integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(IntegerField):
description = _("Positive small integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
default_validators = [validators.validate_slug]
description = _("Slug (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super(SlugField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(SlugField, self).deconstruct()
if kwargs.get("max_length", None) == 50:
del kwargs['max_length']
if self.db_index is False:
kwargs['db_index'] = False
else:
del kwargs['db_index']
return name, path, args, kwargs
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def get_prep_value(self, value):
value = super(TextField, self).get_prep_value(value)
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length, 'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"HH:MM[:ss[.uuuuuu]] format."),
'invalid_time': _("'%(value)s' value has the correct format "
"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."),
}
description = _("Time")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super(TimeField, self).__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
time or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.time):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
value = datetime.datetime.combine(now.date(), value)
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc).time()
else:
# No explicit time / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(TimeField, self).deconstruct()
if self.auto_now is not False:
kwargs["auto_now"] = self.auto_now
if self.auto_now_add is not False:
kwargs["auto_now_add"] = self.auto_now_add
if self.auto_now or self.auto_now_add:
del kwargs['blank']
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_time'],
code='invalid_time',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
value = super(TimeField, self).get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_time(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
default_validators = [validators.URLValidator()]
description = _("URL")
def __init__(self, verbose_name=None, name=None, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
super(URLField, self).__init__(verbose_name, name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(URLField, self).deconstruct()
if kwargs.get("max_length", None) == 200:
del kwargs['max_length']
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
class BinaryField(Field):
description = _("Raw binary data")
empty_values = [None, b'']
def __init__(self, *args, **kwargs):
kwargs['editable'] = False
super(BinaryField, self).__init__(*args, **kwargs)
if self.max_length is not None:
self.validators.append(validators.MaxLengthValidator(self.max_length))
def deconstruct(self):
name, path, args, kwargs = super(BinaryField, self).deconstruct()
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "BinaryField"
def get_default(self):
if self.has_default() and not callable(self.default):
return self.default
default = super(BinaryField, self).get_default()
if default == '':
return b''
return default
def get_db_prep_value(self, value, connection, prepared=False):
value = super(BinaryField, self).get_db_prep_value(value, connection, prepared)
if value is not None:
return connection.Database.Binary(value)
return value
def value_to_string(self, obj):
"""Binary data is serialized as base64"""
return b64encode(force_bytes(self._get_val_from_obj(obj))).decode('ascii')
def to_python(self, value):
# If it's a string, it should be base64-encoded data
if isinstance(value, six.text_type):
return six.memoryview(b64decode(force_bytes(value)))
return value
class UUIDField(Field):
default_error_messages = {
'invalid': _("'%(value)s' is not a valid UUID."),
}
description = 'Universally unique identifier'
empty_strings_allowed = False
def __init__(self, **kwargs):
kwargs['max_length'] = 32
super(UUIDField, self).__init__(**kwargs)
def get_internal_type(self):
return "UUIDField"
def get_db_prep_value(self, value, connection, prepared=False):
if isinstance(value, uuid.UUID):
if connection.features.has_native_uuid_field:
return value
return value.hex
if isinstance(value, six.string_types):
return value.replace('-', '')
return value
def to_python(self, value):
if value and not isinstance(value, uuid.UUID):
try:
return uuid.UUID(value)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
return value
def formfield(self, **kwargs):
defaults = {
'form_class': forms.UUIDField,
}
defaults.update(kwargs)
return super(UUIDField, self).formfield(**defaults)
| runekaagaard/django-contrib-locking | django/db/models/fields/__init__.py | Python | bsd-3-clause | 88,217 |
"""
Most descriptor compounds selection
"""
# Author: Giuseppe Marco Randazzo [email protected]
# License: BSD 3 clause
from numpy import zeros, array
class MDC(object):
"""Perform Most-Descriptor-Compound object selection
Parameters
----------
dmx : array, shape(row,row)
A square distance matrix.
To build a distance matrix see scipy at:
http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
nobjects : int, optional, default: 0
Number of object to select. 0 means an autostop
criterion.
Attributes
----------
info_ : array, shape (row_,)
Information Vector to select the mdc
Returns
------
mdcids: list
Return the list of id selected from the algorithm.
Notes
-----
See examples/plot_mdc_example.py for an example.
References
----------
Brian D. Hudson, Richard M. Hyde, Elizabeth Rahr and John Wood,
Parameter Based Methods for Compound Selection from Chemical Databases,
Quant. Struct. Act. Relat. j. 185-289 1996
"""
def __init__(self, dmx, nobjects=0):
try:
self.dmx_ = dmx.tolist() #convert to list to be faster
except AttributeError:
self.dmx_ = dmx
self.nobjects = nobjects
self.info_ = None
self._build_infovector()
self.mdcids = []
def mdclist(self):
""" Return the list of most descriptor compounds """
return self.mdcids
def getnext(self):
""" Get the next most descriptor compound """
self._appendnext()
return self.mdcids[-1]
def select(self):
""" Run the Most Descriptive Compound Selection """
stopcondition = True
while stopcondition:
self._appendnext()
self._rm_mdc_contrib()
# Check Stop Condition
if self.nobjects > 0:
if len(self.mdcids) == len(self.dmx_):
stopcondition = False
else:
if len(self.mdcids) < self.nobjects:
continue
else:
stopcondition = False
else:
ncheck = 0
for item in self.info_:
if item < 1:
ncheck += 1
else:
continue
if ncheck > len(self.mdcids):
stopcondition = False
return self.mdcids
def _build_infovector(self):
""" build the information vector """
row = len(self.dmx_)
self.info_ = zeros(row)
tmp = zeros((row, 2))
for i in range(row):
for j in range(row):
tmp[j][0] = self.dmx_[i][j]
tmp[j][1] = j
tmp = array(sorted(tmp, key=lambda item: item[0]))
# Reciprocal of the rank
div = 2.0
for j in range(row):
if j == i:
self.info_[j] += 1
else:
k = int(tmp[j][1])
self.info_[k] += 1/div
div += 1.0
def _appendnext(self):
""" Append the next most descriptive compound to list """
dist = self.info_[0]
mdc = 0
# Select the MDC with the major information
for i in range(1, len(self.info_)):
if self.info_[i] > dist:
dist = self.info_[i]
mdc = i
else:
continue
self.mdcids.append(mdc)
def _rm_mdc_contrib(self):
""" remove the most descriptive compound contribution """
mdc = self.mdcids[-1]
row = len(self.dmx_)
tmp = zeros((row, 2))
rank = zeros(row)
for j in range(row):
tmp[j][0] = self.dmx_[mdc][j]
tmp[j][1] = j
tmp = array(sorted(tmp, key=lambda item: item[0]))
div = 2.0
for i in range(row):
j = int(tmp[i][1])
if j == mdc:
rank[j] = 0.0
else:
rank[j] = 1.0 - (1.0/div)
div += 1.0
for i in range(row):
self.info_[i] *= rank[i]
| zeld/scikit-optobj | optobj/mdc.py | Python | bsd-3-clause | 4,248 |
import logging
import emission.analysis.modelling.tour_model.tour_model_matrix as tm
import emission.analysis.modelling.tour_model.cluster_pipeline as eamtcp
from uuid import UUID
import random, datetime, sys
def create_tour_model(user, list_of_cluster_data):
# Highest level function, create tour model from the cluster data that nami gives me
our_tm = set_up(list_of_cluster_data, user) ## Adds nodes to graph
make_graph_edges(list_of_cluster_data, our_tm)
populate_prob_field_for_locatons(list_of_cluster_data, our_tm)
return our_tm
## Second level functions that are part of main
def set_up(list_of_cluster_data, user_name):
time0 = datetime.datetime(1900, 1, 1, hour=0)
our_tour_model = tm.TourModel(user_name, 0, time0)
for dct in list_of_cluster_data:
start_name = dct['start']
end_name = dct['end']
start_coords = dct['start_coords']
end_coords = dct['end_coords']
for sec in dct['sections']:
start_loc = tm.Location(start_name, our_tour_model)
end_loc = tm.Location(end_name, our_tour_model)
our_tour_model.add_location(start_loc, start_coords)
our_tour_model.add_location(end_loc, end_coords)
return our_tour_model
def make_graph_edges(list_of_cluster_data, tour_model):
for cd in list_of_cluster_data:
start_loc = cd['start']
end_loc = cd['end']
start_loc_temp = tm.Location(start_loc, tour_model)
start_loc_temp = tour_model.get_location(start_loc_temp)
end_loc_temp = tm.Location(end_loc, tour_model)
end_loc_temp = tour_model.get_location(end_loc_temp)
e = make_graph_edge(start_loc_temp, end_loc_temp, tour_model)
logging.debug("making edge %s" % e)
for trip_entry in cd["sections"]:
e.add_trip(trip_entry)
def populate_prob_field_for_locatons(list_of_cluster_data, tour_model):
for cd in list_of_cluster_data:
start_loc = cd['start']
end_loc = cd['end']
for model_trip in cd["sections"]:
start_loc_temp = tm.Location(start_loc, tour_model)
start_loc_temp = tour_model.get_location(start_loc_temp)
end_loc_temp = tm.Location(end_loc, tour_model)
end_loc_temp = tour_model.get_location(end_loc_temp)
com = tm.Commute(start_loc_temp, end_loc_temp)
tour_model.add_start_hour(start_loc_temp, model_trip.start_time)
start_loc_temp.increment_successor(end_loc_temp,
get_start_hour(model_trip),
get_day(model_trip))
## Utility functions
def make_graph_edge(start_point, end_point, tour_model):
sp = tour_model.get_location(start_point)
ep = tour_model.get_location(end_point)
comm = tm.Commute(sp, ep)
tour_model.add_edge(comm)
return comm
def get_start_hour(section_info):
return section_info.start_time.hour
def get_end_hour(section_info):
return section_info.start_time.hour
def get_day(section_info):
return section_info.start_time.weekday()
def get_mode_num(section_info):
map_modes_to_numbers = {"walking" : 0, "car" : 1, "train" : 2, "bart" : 3, "bike" : 4}
return random.randint(0, 4)
final_tour_model = None
if __name__ == "__main__":
if len(sys.argv) > 1:
user = UUID(sys.argv[1])
else:
user = None
list_of_cluster_data = eamtcp.main(user)
final_tour_model = create_tour_model("shankari", list_of_cluster_data)
| yw374cornell/e-mission-server | emission/analysis/modelling/tour_model/create_tour_model_matrix.py | Python | bsd-3-clause | 3,536 |
# coding: utf-8
import re
from copy import copy
from django import forms
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.db import IntegrityError
from django.contrib import messages
from django.utils.translation import ugettext as _
from django.utils.encoding import force_unicode
class CSVFilterForm(forms.Form):
"""
filter the data of a queryset.
"""
def __init__(self, *args, **kwargs):
self.model = kwargs.pop('model')
super(CSVFilterForm, self).__init__(*args, **kwargs)
if not self.model:
raise ImproperlyConfigured('Seems like there is no model defined. check our urlpatterns (add model to kwargs).')
self.csv_filter_definition = settings.CSV_EXPORTER_FILTER_DEFINITION[self.model._meta.module_name]
self.create_fields(filter_def=self.csv_filter_definition)
def create_fields(self, filter_def={}, prefix=""):
for key in filter_def:
if type(filter_def[key]) == dict:
self.create_fields(filter_def=filter_def[key], prefix=prefix + key + "__")
elif type(filter_def[key]) == list:
for filter_type in filter_def[key]:
self.fields[prefix + key + "__" + filter_type] = forms.CharField(required=False)
else:
self.fields[prefix + key + "__" + filter_def[key]] = forms.CharField(required=False)
def clean(self):
filters = {}
for item in self.cleaned_data:
if self.cleaned_data[item]:
filters[item] = self.cleaned_data[item]
if len(filters) == 0:
raise forms.ValidationError("no filters selected!")
self.filters = filters
return super(CSVFilterForm, self).clean()
def save(self):
return self.model.objects.filter(**self.filters)
| fetzig/django-csv-exporter | csvexporter/forms.py | Python | bsd-3-clause | 2,038 |
import md5
def _generate_verbose_key(prefix, klass, properties):
return "%s:%s.%s(%s)" % (prefix, klass.__module__, klass.__name__, properties)
def _generate_terse_key(prefix, klass, properties):
compressed_hash = md5.new("%s.%s(%s)" % (klass.__module__, klass.__name__, properties)).hexdigest()
return "%s:%s(%s)" % (prefix, klass.__name__, compressed_hash)
class KeyValueHelper(object):
"""Internal helper object that can generate unique keys for a store that
stores objects in key/value pairs. Given a class/instance and a property
dictionary, this helper creates a unique lookup key (e.g. 'mymodule.MyClass(foo=abc;bar=123)')"""
def __init__(self, verbose=False, polymorphic=False, prefix="stockpyle"):
self.__stockpyle_bases_lookup = {}
self.__polymorphic = polymorphic
self.__prefix = prefix
# TODO: create cython callbacks to speed up this operation
if verbose:
self.__generate_key_cb = _generate_verbose_key
else:
self.__generate_key_cb = _generate_terse_key
def generate_lookup_key(self, target_klass, property_dict):
return self.__generate_key_cb(self.__prefix, target_klass, sorted([kv for kv in property_dict.iteritems()]))
def generate_all_lookup_keys(self, obj):
lookup_keys = []
klasses = [obj.__class__]
if self.__polymorphic:
klasses += self.__get_stockpyle_base_classes(obj.__class__)
for klass in klasses:
for stockpyle_key in klass.__stockpyle_keys__:
if isinstance(stockpyle_key, basestring):
property_list = [(stockpyle_key, getattr(obj, stockpyle_key))]
else:
property_list = [(pn, getattr(obj, pn)) for pn in sorted(stockpyle_key)]
lookup_keys.append(self.__generate_key_cb(self.__prefix, klass, property_list))
return lookup_keys
def __get_stockpyle_base_classes(self, klass):
"""returns an ordered list of stockpyle-managed base classes by recursing
up the inheritance tree of the given class and collecting any base classes
that have __stockpyle_keys__ defined"""
if klass not in self.__stockpyle_bases_lookup:
# we haven't calculated the stockpyle bases for this class yet
# calculate them
bases = []
def collect(current_klass):
for b in current_klass.__bases__:
if hasattr(b, "__stockpyle_keys__"):
bases.append(b)
collect(b)
collect(klass)
# and then save for for faster lookup later
self.__stockpyle_bases_lookup[klass] = bases
# return those bases
return self.__stockpyle_bases_lookup[klass]
# if __name__ == "__main__":
#
# # performance testing
# import time
# import cProfile
# # import psyco
# # psyco.full()
#
#
# class Foo(object):
# __stockpyle_keys__ = [("foo", "bar")]
# foo = 1
# bar = "x"
#
#
# kvh_terse = KeyValueHelper()
# kvh_verbose = KeyValueHelper(verbose=True)
#
# def perform_verbose_keygen():
# start = time.time()
# for i in range(0, 50000):
# kvh_verbose.generate_lookup_key(Foo, {"foo": 1, "bar": "x"})
# delta = time.time() - start
# return delta
#
# def perform_terse_keygen():
# start = time.time()
# for i in range(0, 50000):
# kvh_terse.generate_lookup_key(Foo, {"foo": 1, "bar": "x"})
# delta = time.time() - start
# return delta
#
# def perform_verbose_objkeygen():
# start = time.time()
# obj = Foo()
# for i in range(0, 50000):
# kvh_verbose.generate_all_lookup_keys(obj)
# delta = time.time() - start
# return delta
#
# def perform_terse_objkeygen():
# start = time.time()
# obj = Foo()
# for i in range(0, 50000):
# kvh_terse.generate_all_lookup_keys(obj)
# delta = time.time() - start
# return delta
#
#
# print ">>> verbose keygen"
# print perform_verbose_keygen()
# print perform_verbose_keygen()
# print perform_verbose_keygen()
# print ">>> terse keygen"
# print perform_terse_keygen()
# print perform_terse_keygen()
# print perform_terse_keygen()
# print ">>> verbose objkeygen"
# print perform_verbose_objkeygen()
# print perform_verbose_objkeygen()
# print perform_verbose_objkeygen()
# print ">>> terse objkeygen"
# print perform_terse_objkeygen()
# print perform_terse_objkeygen()
# print perform_terse_objkeygen()
# print
# print
# print ">>> verbose keygen"
# cProfile.run("perform_verbose_keygen()")
# print ">>> terse keygen"
# cProfile.run("perform_terse_keygen()")
# print ">>> verbose objkeygen"
# cProfile.run("perform_verbose_objkeygen()")
# print ">>> terse objkeygen"
# cProfile.run("perform_terse_objkeygen()")
# | mjpizz/stockpyle | stockpyle/_helpers.py | Python | bsd-3-clause | 5,201 |
from __future__ import with_statement
import sys
from optparse import OptionParser, make_option as Option
from pprint import pformat
from textwrap import wrap
from anyjson import deserialize
from celery import __version__
from celery.app import app_or_default, current_app
from celery.bin.base import Command as CeleryCommand
from celery.utils import term
commands = {}
class Error(Exception):
pass
def command(fun, name=None):
commands[name or fun.__name__] = fun
return fun
class Command(object):
help = ""
args = ""
version = __version__
option_list = CeleryCommand.preload_options + (
Option("--quiet", "-q", action="store_true", dest="quiet",
default=False),
Option("--no-color", "-C", dest="no_color", action="store_true",
help="Don't colorize output."),
)
def __init__(self, app=None, no_color=False):
self.app = app_or_default(app)
self.colored = term.colored(enabled=not no_color)
def __call__(self, *args, **kwargs):
try:
self.run(*args, **kwargs)
except Error, exc:
self.error(self.colored.red("Error: %s" % exc))
def error(self, s):
return self.out(s, fh=sys.stderr)
def out(self, s, fh=sys.stdout):
s = str(s)
if not s.endswith("\n"):
s += "\n"
sys.stdout.write(s)
def create_parser(self, prog_name, command):
return OptionParser(prog=prog_name,
usage=self.usage(command),
version=self.version,
option_list=self.option_list)
def run_from_argv(self, prog_name, argv):
self.prog_name = prog_name
self.command = argv[0]
self.arglist = argv[1:]
self.parser = self.create_parser(self.prog_name, self.command)
options, args = self.parser.parse_args(self.arglist)
self.colored = term.colored(enabled=not options.no_color)
self(*args, **options.__dict__)
def run(self, *args, **kwargs):
raise NotImplementedError()
def usage(self, command):
return "%%prog %s [options] %s" % (command, self.args)
def prettify_list(self, n):
c = self.colored
if not n:
return "- empty -"
return "\n".join(str(c.reset(c.white("*"), " %s" % (item, )))
for item in n)
def prettify_dict_ok_error(self, n):
c = self.colored
if "ok" in n:
return (c.green("OK"),
indent(self.prettify(n["ok"])[1]))
elif "error" in n:
return (c.red("ERROR"),
indent(self.prettify(n["error"])[1]))
def prettify(self, n):
OK = str(self.colored.green("OK"))
if isinstance(n, list):
return OK, self.prettify_list(n)
if isinstance(n, dict):
if "ok" in n or "error" in n:
return self.prettify_dict_ok_error(n)
if isinstance(n, basestring):
return OK, unicode(n)
return OK, pformat(n)
class list_(Command):
args = "<bindings>"
def list_bindings(self, channel):
fmt = lambda q, e, r: self.out("%s %s %s" % (q.ljust(28),
e.ljust(28), r))
fmt("Queue", "Exchange", "Routing Key")
fmt("-" * 16, "-" * 16, "-" * 16)
for binding in channel.list_bindings():
fmt(*binding)
def run(self, what, *_, **kw):
topics = {"bindings": self.list_bindings}
if what not in topics:
raise ValueError("%r not in %r" % (what, topics.keys()))
with self.app.broker_connection() as conn:
self.app.amqp.get_task_consumer(conn).declare()
with conn.channel() as channel:
return topics[what](channel)
list_ = command(list_, "list")
class apply(Command):
args = "<task_name>"
option_list = Command.option_list + (
Option("--args", "-a", dest="args"),
Option("--kwargs", "-k", dest="kwargs"),
Option("--eta", dest="eta"),
Option("--countdown", dest="countdown", type="int"),
Option("--expires", dest="expires"),
Option("--serializer", dest="serializer", default="json"),
Option("--queue", dest="queue"),
Option("--exchange", dest="exchange"),
Option("--routing-key", dest="routing_key"),
)
def run(self, name, *_, **kw):
# Positional args.
args = kw.get("args") or ()
if isinstance(args, basestring):
args = deserialize(args)
# Keyword args.
kwargs = kw.get("kwargs") or {}
if isinstance(kwargs, basestring):
kwargs = deserialize(kwargs)
# Expires can be int.
expires = kw.get("expires") or None
try:
expires = int(expires)
except (TypeError, ValueError):
pass
res = self.app.send_task(name, args=args, kwargs=kwargs,
countdown=kw.get("countdown"),
serializer=kw.get("serializer"),
queue=kw.get("queue"),
exchange=kw.get("exchange"),
routing_key=kw.get("routing_key"),
eta=kw.get("eta"),
expires=expires)
self.out(res.task_id)
apply = command(apply)
def pluralize(n, text, suffix='s'):
if n > 1:
return text + suffix
return text
class purge(Command):
def run(self, *args, **kwargs):
app = current_app()
queues = len(app.amqp.queues.keys())
messages_removed = app.control.discard_all()
if messages_removed:
self.out("Purged %s %s from %s known task %s." % (
messages_removed, pluralize(messages_removed, "message"),
queues, pluralize(queues, "queue")))
else:
self.out("No messages purged from %s known %s" % (
queues, pluralize(queues, "queue")))
purge = command(purge)
class result(Command):
args = "<task_id>"
option_list = Command.option_list + (
Option("--task", "-t", dest="task"),
)
def run(self, task_id, *args, **kwargs):
from celery import registry
result_cls = self.app.AsyncResult
task = kwargs.get("task")
if task:
result_cls = registry.tasks[task].AsyncResult
result = result_cls(task_id)
self.out(self.prettify(result.get())[1])
result = command(result)
class inspect(Command):
choices = {"active": 1.0,
"active_queues": 1.0,
"scheduled": 1.0,
"reserved": 1.0,
"stats": 1.0,
"revoked": 1.0,
"registered_tasks": 1.0,
"enable_events": 1.0,
"disable_events": 1.0,
"ping": 0.2,
"add_consumer": 1.0,
"cancel_consumer": 1.0}
option_list = Command.option_list + (
Option("--timeout", "-t", type="float", dest="timeout",
default=None,
help="Timeout in seconds (float) waiting for reply"),
Option("--destination", "-d", dest="destination",
help="Comma separated list of destination node names."))
def usage(self, command):
return "%%prog %s [options] %s [%s]" % (
command, self.args, "|".join(self.choices.keys()))
def run(self, *args, **kwargs):
self.quiet = kwargs.get("quiet", False)
if not args:
raise Error("Missing inspect command. See --help")
command = args[0]
if command == "help":
raise Error("Did you mean 'inspect --help'?")
if command not in self.choices:
raise Error("Unknown inspect command: %s" % command)
destination = kwargs.get("destination")
timeout = kwargs.get("timeout") or self.choices[command]
if destination and isinstance(destination, basestring):
destination = map(str.strip, destination.split(","))
def on_reply(body):
c = self.colored
node = body.keys()[0]
reply = body[node]
status, preply = self.prettify(reply)
self.say("->", c.cyan(node, ": ") + status, indent(preply))
self.say("<-", command)
i = self.app.control.inspect(destination=destination,
timeout=timeout,
callback=on_reply)
replies = getattr(i, command)(*args[1:])
if not replies:
raise Error("No nodes replied within time constraint.")
return replies
def say(self, direction, title, body=""):
c = self.colored
if direction == "<-" and self.quiet:
return
dirstr = not self.quiet and c.bold(c.white(direction), " ") or ""
self.out(c.reset(dirstr, title))
if body and not self.quiet:
self.out(body)
inspect = command(inspect)
def indent(s, n=4):
i = [" " * n + l for l in s.split("\n")]
return "\n".join("\n".join(wrap(j)) for j in i)
class status(Command):
option_list = inspect.option_list
def run(self, *args, **kwargs):
replies = inspect(app=self.app,
no_color=kwargs.get("no_color", False)) \
.run("ping", **dict(kwargs, quiet=True))
if not replies:
raise Error("No nodes replied within time constraint")
nodecount = len(replies)
if not kwargs.get("quiet", False):
self.out("\n%s %s online." % (nodecount,
nodecount > 1 and "nodes" or "node"))
status = command(status)
class help(Command):
def usage(self, command):
return "%%prog <command> [options] %s" % (self.args, )
def run(self, *args, **kwargs):
self.parser.print_help()
usage = ["",
"Type '%s <command> --help' for help on a "
"specific command." % (self.prog_name, ),
"",
"Available commands:"]
for command in list(sorted(commands.keys())):
usage.append(" %s" % command)
self.out("\n".join(usage))
help = command(help)
class celeryctl(CeleryCommand):
commands = commands
def execute(self, command, argv=None):
try:
cls = self.commands[command]
except KeyError:
cls, argv = self.commands["help"], ["help"]
cls = self.commands.get(command) or self.commands["help"]
try:
cls(app=self.app).run_from_argv(self.prog_name, argv)
except Error:
return self.execute("help", argv)
def handle_argv(self, prog_name, argv):
self.prog_name = prog_name
try:
command = argv[0]
except IndexError:
command, argv = "help", ["help"]
return self.execute(command, argv)
def main():
try:
celeryctl().execute_from_commandline()
except KeyboardInterrupt:
pass
if __name__ == "__main__": # pragma: no cover
main()
| WoLpH/celery | celery/bin/celeryctl.py | Python | bsd-3-clause | 11,318 |
import sys
sys.path.append("..")
from sympy import sqrt, symbols, eye
w, x, y, z = symbols("wxyz")
L = [x,y,z]
V = eye(len(L))
for i in range(len(L)):
for j in range(len(L)):
V[i,j] = L[i]**j
det = 1
for i in range(len(L)):
det *= L[i]-L[i-1]
print "matrix"
print V
print "det:"
print V.det().expand()
print "correct result"
print det
print det.expand()
| certik/sympy-oldcore | examples/vandermonde.py | Python | bsd-3-clause | 372 |
# DO NOT EDIT THIS FILE. This file will be overwritten when re-running go-raml.
"""
Auto-generated class for UpdateOrganizationApiKeyReqBody
"""
from .OrganizationAPIKey import OrganizationAPIKey
from . import client_support
class UpdateOrganizationApiKeyReqBody(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(**kwargs):
"""
:type type: OrganizationAPIKey
:rtype: UpdateOrganizationApiKeyReqBody
"""
return UpdateOrganizationApiKeyReqBody(**kwargs)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'UpdateOrganizationApiKeyReqBody'
data = json or kwargs
# set attributes
data_types = [OrganizationAPIKey]
self.type = client_support.set_property('type', data, data_types, False, [], False, True, class_name)
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
| itsyouonline/identityserver | clients/python/itsyouonline/UpdateOrganizationApiKeyReqBody.py | Python | bsd-3-clause | 1,170 |
# ~*~ coding: utf-8 ~*~
"""
fleaker.config
~~~~~~~~~~~~~~
This module implements various utilities for configuring your Fleaker
:class:`App`.
:copyright: (c) 2016 by Croscon Consulting, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import copy
import importlib
import os
import types
from os.path import splitext
from werkzeug.datastructures import ImmutableDict
from ._compat import string_types
from .base import BaseApplication
class MultiStageConfigurableApp(BaseApplication):
"""The :class:`MultiStageConfigurableApp` is a mixin used to provide the
primary :meth:`configure` method used to configure a ``Fleaker``
:class:`~fleaker.App`.
.. versionadded:: 0.1.0
The :class:`MultiStageConfigurableApp` class has existed since Fleaker
was conceived.
"""
def __init__(self, import_name, **settings):
"""Construct the app.
Adds a list for storing our post configure callbacks.
All args and kwargs are the same as the
:class:`fleaker.base.BaseApplication`.
"""
# A dict of all callbacks we should run after configure finishes. These
# are then separated by those that should run once, or run multiple
# times
# @TODO (QoL): There has to be a cleaner way to do this, do that
self._post_configure_callbacks = {
'multiple': [],
'single': [],
}
super(MultiStageConfigurableApp, self).__init__(import_name,
**settings)
def configure(self, *args, **kwargs):
"""Configure the Application through a varied number of sources of
different types.
This function chains multiple possible configuration methods together
in order to just "make it work". You can pass multiple configuration
sources in to the method and each one will be tried in a sane fashion.
Later sources will override earlier sources if keys collide. For
example:
.. code:: python
from application import default_config
app.configure(default_config, os.environ, '.secrets')
In the above example, values stored in ``default_config`` will be
loaded first, then overwritten by those in ``os.environ``, and so on.
An endless number of configuration sources may be passed.
Configuration sources are type checked and processed according to the
following rules:
* ``string`` - if the source is a ``str``, we will assume it is a file
or module that should be loaded. If the file ends in ``.json``, then
:meth:`flask.Config.from_json` is used; if the file ends in ``.py``
or ``.cfg``, then :meth:`flask.Config.from_pyfile` is used; if the
module has any other extension we assume it is an import path, import
the module and pass that to :meth:`flask.Config.from_object`. See
below for a few more semantics on module loading.
* ``dict-like`` - if the source is ``dict-like``, then
:meth:`flask.Config.from_mapping` will be used. ``dict-like`` is
defined as anything implementing an ``items`` method that returns
a tuple of ``key``, ``val``.
* ``class`` or ``module`` - if the source is an uninstantiated
``class`` or ``module``, then :meth:`flask.Config.from_object` will
be used.
Just like Flask's standard configuration, only uppercased keys will be
loaded into the config.
If the item we are passed is a ``string`` and it is determined to be
a possible Python module, then a leading ``.`` is relevant. If
a leading ``.`` is provided, we assume that the module to import is
located in the current package and operate as such; if it begins with
anything else we assume the import path provided is absolute. This
allows you to source configuration stored in a module in your package,
or in another package.
Args:
*args (object):
Any object you want us to try to configure from.
Keyword Args:
whitelist_keys_from_mappings (bool):
Should we whitelist the keys we pull from mappings? Very useful
if you're passing in an entire OS ``environ`` and you want to
omit things like ``LESSPIPE``. If no whitelist is provided, we
use the pre-existing config keys as a whitelist.
whitelist (list[str]):
An explicit list of keys that should be allowed. If provided
and ``whitelist_keys`` is ``True``, we will use that as our
whitelist instead of pre-existing app config keys.
"""
whitelist_keys_from_mappings = kwargs.get(
'whitelist_keys_from_mappings', False
)
whitelist = kwargs.get('whitelist')
for item in args:
if isinstance(item, string_types):
_, ext = splitext(item)
if ext == '.json':
self._configure_from_json(item)
elif ext in ('.cfg', '.py'):
self._configure_from_pyfile(item)
else:
self._configure_from_module(item)
elif isinstance(item, (types.ModuleType, type)):
self._configure_from_object(item)
elif hasattr(item, 'items'):
# assume everything else is a mapping like object; ``.items()``
# is what Flask uses under the hood for this method
# @TODO: This doesn't handle the edge case of using a tuple of
# two element tuples to config; but Flask does that. IMO, if
# you do that, you're a monster.
self._configure_from_mapping(
item,
whitelist_keys=whitelist_keys_from_mappings,
whitelist=whitelist
)
else:
raise TypeError("Could not determine a valid type for this"
" configuration object: `{}`!".format(item))
# we just finished here, run the post configure callbacks
self._run_post_configure_callbacks(args)
def _configure_from_json(self, item):
"""Load configuration from a JSON file.
This method will essentially just ``json.load`` the file, grab the
resulting object and pass that to ``_configure_from_object``.
Args:
items (str):
The path to the JSON file to load.
Returns:
fleaker.App:
Returns itself.
"""
self.config.from_json(item)
return self
def _configure_from_pyfile(self, item):
"""Load configuration from a Python file. Python files include Python
source files (``.py``) and ConfigParser files (``.cfg``).
This behaves as if the file was imported and passed to
``_configure_from_object``.
Args:
items (str):
The path to the Python file to load.
Returns:
fleaker.App:
Returns itself.
"""
self.config.from_pyfile(item)
return self
def _configure_from_module(self, item):
"""Configure from a module by import path.
Effectively, you give this an absolute or relative import path, it will
import it, and then pass the resulting object to
``_configure_from_object``.
Args:
item (str):
A string pointing to a valid import path.
Returns:
fleaker.App:
Returns itself.
"""
package = None
if item[0] == '.':
package = self.import_name
obj = importlib.import_module(item, package=package)
self.config.from_object(obj)
return self
def _configure_from_mapping(self, item, whitelist_keys=False,
whitelist=None):
"""Configure from a mapping, or dict, like object.
Args:
item (dict):
A dict-like object that we can pluck values from.
Keyword Args:
whitelist_keys (bool):
Should we whitelist the keys before adding them to the
configuration? If no whitelist is provided, we use the
pre-existing config keys as a whitelist.
whitelist (list[str]):
An explicit list of keys that should be allowed. If provided
and ``whitelist_keys`` is true, we will use that as our
whitelist instead of pre-existing app config keys.
Returns:
fleaker.App:
Returns itself.
"""
if whitelist is None:
whitelist = self.config.keys()
if whitelist_keys:
item = {k: v for k, v in item.items() if k in whitelist}
self.config.from_mapping(item)
return self
def _configure_from_object(self, item):
"""Configure from any Python object based on it's attributes.
Args:
item (object):
Any other Python object that has attributes.
Returns:
fleaker.App:
Returns itself.
"""
self.config.from_object(item)
return self
def configure_from_environment(self, whitelist_keys=False, whitelist=None):
"""Configure from the entire set of available environment variables.
This is really a shorthand for grabbing ``os.environ`` and passing to
:meth:`_configure_from_mapping`.
As always, only uppercase keys are loaded.
Keyword Args:
whitelist_keys (bool):
Should we whitelist the keys by only pulling those that are
already present in the config? Useful for avoiding adding
things like ``LESSPIPE`` to your app config. If no whitelist is
provided, we use the current config keys as our whitelist.
whitelist (list[str]):
An explicit list of keys that should be allowed. If provided
and ``whitelist_keys`` is true, we will use that as our
whitelist instead of pre-existing app config keys.
Returns:
fleaker.base.BaseApplication:
Returns itself.
"""
self._configure_from_mapping(os.environ, whitelist_keys=whitelist_keys,
whitelist=whitelist)
return self
def add_post_configure_callback(self, callback, run_once=False):
"""Add a new callback to be run after every call to :meth:`configure`.
Functions run at the end of :meth:`configure` are given the
application's resulting configuration and the arguments passed to
:meth:`configure`, in that order. As a note, this first argument will
be an immutable dictionary.
The return value of all registered callbacks is entirely ignored.
Callbacks are run in the order they are registered, but you should
never depend on another callback.
.. admonition:: The "Resulting" Configuration
The first argument to the callback is always the "resulting"
configuration from the call to :meth:`configure`. What this means
is you will get the Application's FROZEN configuration after the
call to :meth:`configure` finished. Moreover, this resulting
configuration will be an
:class:`~werkzeug.datastructures.ImmutableDict`.
The purpose of a Post Configure callback is not to futher alter the
configuration, but rather to do lazy initialization for anything
that absolutely requires the configuration, so any attempt to alter
the configuration of the app has been made intentionally difficult!
Args:
callback (function):
The function you wish to run after :meth:`configure`. Will
receive the application's current configuration as the first
arugment, and the same arguments passed to :meth:`configure` as
the second.
Keyword Args:
run_once (bool):
Should this callback run every time configure is called? Or
just once and be deregistered? Pass ``True`` to only run it
once.
Returns:
fleaker.base.BaseApplication:
Returns itself for a fluent interface.
"""
if run_once:
self._post_configure_callbacks['single'].append(callback)
else:
self._post_configure_callbacks['multiple'].append(callback)
return self
def _run_post_configure_callbacks(self, configure_args):
"""Run all post configure callbacks we have stored.
Functions are passed the configuration that resulted from the call to
:meth:`configure` as the first argument, in an immutable form; and are
given the arguments passed to :meth:`configure` for the second
argument.
Returns from callbacks are ignored in all fashion.
Args:
configure_args (list[object]):
The full list of arguments passed to :meth:`configure`.
Returns:
None:
Does not return anything.
"""
resulting_configuration = ImmutableDict(self.config)
# copy callbacks in case people edit them while running
multiple_callbacks = copy.copy(
self._post_configure_callbacks['multiple']
)
single_callbacks = copy.copy(self._post_configure_callbacks['single'])
# clear out the singles
self._post_configure_callbacks['single'] = []
for callback in multiple_callbacks:
callback(resulting_configuration, configure_args)
# now do the single run callbacks
for callback in single_callbacks:
callback(resulting_configuration, configure_args)
| croscon/fleaker | fleaker/config.py | Python | bsd-3-clause | 14,203 |
# Copyright (c) 2011-2014 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
from django.conf.urls import patterns, url
urlpatterns = patterns('huxley.www.views',
# Match any URL and let the client take care of routing.
url(r'', 'index', name='index'),
)
| jmosky12/huxley | huxley/www/urls.py | Python | bsd-3-clause | 336 |
import font
import screenshot
import sys
if __name__ == '__main__':
f = font.Font()
s = screenshot.Screenshot(sys.argv[1], f)
print s.text
#print s.colours
#print s.get_coords_positions()
| barneygale/mcocr | mcocr/__init__.py | Python | bsd-3-clause | 209 |
"""
PlexAPI Utils
"""
from datetime import datetime
try:
from urllib import quote # Python2
except ImportError:
from urllib.parse import quote # Python3
NA = '__NA__' # Value not available
class PlexPartialObject(object):
""" Not all objects in the Plex listings return the complete list of
elements for the object. This object will allow you to assume each
object is complete, and if the specified value you request is None
it will fetch the full object automatically and update itself.
"""
def __init__(self, server, data, initpath):
self.server = server
self.initpath = initpath
self._loadData(data)
def __getattr__(self, attr):
if self.isPartialObject():
self.reload()
return self.__dict__[attr]
def __setattr__(self, attr, value):
if value != NA:
super(PlexPartialObject, self).__setattr__(attr, value)
def _loadData(self, data):
raise Exception('Abstract method not implemented.')
def isFullObject(self):
return self.initpath == self.key
def isPartialObject(self):
return self.initpath != self.key
def reload(self):
data = self.server.query(self.key)
self.initpath = self.key
self._loadData(data[0])
def cast(func, value):
if value not in [None, NA]:
if func == bool:
value = int(value)
value = func(value)
return value
def joinArgs(args):
if not args: return ''
arglist = []
for key in sorted(args, key=lambda x:x.lower()):
value = str(args[key])
arglist.append('%s=%s' % (key, quote(value)))
return '?%s' % '&'.join(arglist)
def toDatetime(value, format=None):
if value and value != NA:
if format: value = datetime.strptime(value, format)
else: value = datetime.fromtimestamp(int(value))
return value
| dodegy/python-plexapi | plexapi/utils.py | Python | bsd-3-clause | 1,909 |
#!/usr/bin/env python
import warnings
import pandas as pd
from pandas_ml.core.accessor import _AccessorMethods
class CrossValidationMethods(_AccessorMethods):
"""
Deprecated. Accessor to ``sklearn.cross_validation``.
"""
_module_name = 'sklearn.cross_validation'
def StratifiedShuffleSplit(self, *args, **kwargs):
"""
Instanciate ``sklearn.cross_validation.StratifiedShuffleSplit`` using automatic mapping.
- ``y``: ``ModelFrame.target``
"""
target = self._target
return self._module.StratifiedShuffleSplit(target.values, *args, **kwargs)
def iterate(self, cv, reset_index=False):
"""
Generate ``ModelFrame`` using iterators for cross validation
Parameters
----------
cv : cross validation iterator
reset_index : bool
logical value whether to reset index, default False
Returns
-------
generated : generator of ``ModelFrame``
"""
if not(isinstance(cv, self._module._PartitionIterator)):
msg = "{0} is not a subclass of PartitionIterator"
warnings.warn(msg.format(cv.__class__.__name__))
for train_index, test_index in cv:
train_df = self._df.iloc[train_index, :]
test_df = self._df.iloc[test_index, :]
if reset_index:
train_df = train_df.reset_index(drop=True)
test_df = test_df.reset_index(drop=True)
yield train_df, test_df
def train_test_split(self, reset_index=False, *args, **kwargs):
"""
Call ``sklearn.cross_validation.train_test_split`` using automatic mapping.
Parameters
----------
reset_index : bool
logical value whether to reset index, default False
kwargs : keywords passed to ``cross_validation.train_test_split``
Returns
-------
train, test : tuple of ``ModelFrame``
"""
func = self._module.train_test_split
def _init(klass, data, index, **kwargs):
if reset_index:
return klass(data, **kwargs)
else:
return klass(data, index=index, **kwargs)
data = self._data
idx = self._df.index
if self._df.has_target():
target = self._target
tr_d, te_d, tr_l, te_l, tr_i, te_i = func(data.values, target.values, idx.values,
*args, **kwargs)
# Create DataFrame here to retain data and target names
tr_d = _init(pd.DataFrame, tr_d, tr_i, columns=data.columns)
te_d = _init(pd.DataFrame, te_d, te_i, columns=data.columns)
tr_l = _init(pd.Series, tr_l, tr_i, name=target.name)
te_l = _init(pd.Series, te_l, te_i, name=target.name)
train_df = self._constructor(data=tr_d, target=tr_l)
test_df = self._constructor(data=te_d, target=te_l)
return train_df, test_df
else:
tr_d, te_d, tr_i, te_i = func(data.values, idx.values, *args, **kwargs)
# Create DataFrame here to retain data and target names
tr_d = _init(pd.DataFrame, tr_d, tr_i, columns=data.columns)
te_d = _init(pd.DataFrame, te_d, te_i, columns=data.columns)
train_df = self._constructor(data=tr_d)
train_df.target_name = self._df.target_name
test_df = self._constructor(data=te_d)
test_df.target_name = self._df.target_name
return train_df, test_df
def cross_val_score(self, estimator, *args, **kwargs):
"""
Call ``sklearn.cross_validation.cross_val_score`` using automatic mapping.
- ``X``: ``ModelFrame.data``
- ``y``: ``ModelFrame.target``
"""
func = self._module.cross_val_score
return func(estimator, X=self._data.values, y=self._target.values, *args, **kwargs)
def permutation_test_score(self, estimator, *args, **kwargs):
"""
Call ``sklearn.cross_validation.permutation_test_score`` using automatic mapping.
- ``X``: ``ModelFrame.data``
- ``y``: ``ModelFrame.target``
"""
func = self._module.permutation_test_score
score, pscores, pvalue = func(estimator, X=self._data.values, y=self._target.values,
*args, **kwargs)
return score, pscores, pvalue
def check_cv(self, cv, *args, **kwargs):
"""
Call ``sklearn.cross_validation.check_cv`` using automatic mapping.
- ``X``: ``ModelFrame.data``
- ``y``: ``ModelFrame.target``
"""
func = self._module.check_cv
return func(cv, X=self._data, y=self._target, *args, **kwargs)
| sinhrks/pandas-ml | pandas_ml/skaccessors/cross_validation.py | Python | bsd-3-clause | 4,933 |
'''
Copyright (c) OS-Networks, http://os-networks.net
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the HWIOS Project nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE DEVELOPERS ``AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.'''
import re
class Signal(object):
def __init__(self, name):
self.name = name
self.callees = []
def register_callee(self, callee_function, filters = None):
if filters != None:
_filter = []
for filter in filters:
_filter.append((re.compile(filter[0]), filter[1]))
filters = _filter
self.callees.append({'callee':callee_function,'filters':filters})
def execute(self, filters = None, **kwargs):
"""Matches available filters against callee filter options. Uses registered filter order """
for callee in self.callees:
if filters != None:
match = 0
for idx,compiled_filter in enumerate(callee['filters']):
rp = compiled_filter[0].match(filters[idx])
if rp != None and compiled_filter[1] == True:
match += 1
if rp == None and compiled_filter[1] == False:
match += 1
if match == len(filters):
callee['callee'](**kwargs)
else:
callee['callee'](**kwargs)
class SignalPool(object):
def __init__(self, signals = None):
self.signals = []
if signals != None:
self.signals = signals
def append(self, signal):
'''General signal adding'''
self.signals.append(signal)
def remove(self, signal):
for index, _signal in enumerate(self.signals):
if _signal == signal:
self.signal.remove(signal)
def send(self, signal_name, filters = None, **kwargs):
for _signal in self.signals:
if _signal.name == signal_name:
_signal.execute(filters = filters,**kwargs)
def subscribe(self, signal_name, function, filters = None):
'''signal registration, with option filter'''
for _signal in self.signals:
if _signal.name == signal_name:
_signal.register_callee(function, filters)
| Knygar/hwios | services/web_ui/models/signal.py | Python | bsd-3-clause | 3,723 |
import mock
from twisted.internet import defer
from twisted.trial import unittest
from OpenSSL import crypto
import oppy.connection.connectionbuildtask as connectionbuildtask
from oppy.connection.definitions import (
LINK_CERT_TYPE,
ID_CERT_TYPE,
OPENSSL_RSA_KEY_TYPE,
)
from oppy.cell.fixedlen import NetInfoCell
from oppy.cell.varlen import AuthChallengeCell, CertsCell, VersionsCell
from oppy.cell.util import CertsCellPayloadItem
from cert_der import test_cert_der
class ConnectionBuildTaskTest(unittest.TestCase):
@mock.patch('oppy.connection.connectionmanager.ConnectionManager', autospec=True)
def setUp(self, cm):
self.cm = cm
self.cbt = connectionbuildtask.ConnectionBuildTask(cm, mock.Mock())
@mock.patch('twisted.internet.defer.Deferred', autospec=True)
def test_connectionMade(self, mock_deferred):
mock_sendVersionsCell = mock.Mock()
mock_sendVersionsCell.return_value = mock_deferred
self.cbt._sendVersionsCell = mock_sendVersionsCell
self.cbt.connectionMade()
self.assertEqual(mock_sendVersionsCell.call_count, 1)
self.assertEqual(self.cbt._tasks.addCallback.call_count, 6)
self.assertTrue(mock.call(self.cbt._processVersionsCell) in
self.cbt._tasks.addCallback.call_args_list)
self.assertTrue(mock.call(self.cbt._processCertsCell) in
self.cbt._tasks.addCallback.call_args_list)
self.assertTrue(mock.call(self.cbt._processAuthChallengeCell) in
self.cbt._tasks.addCallback.call_args_list)
self.assertTrue(mock.call(self.cbt._processNetInfoCell) in
self.cbt._tasks.addCallback.call_args_list)
self.assertTrue(mock.call(self.cbt._sendNetInfoCell) in
self.cbt._tasks.addCallback.call_args_list)
self.assertTrue(mock.call(self.cbt._connectionSucceeded) in
self.cbt._tasks.addCallback.call_args_list)
self.cbt._tasks.addErrback.assert_called_once_with(
self.cbt._connectionFailed)
def test_connectionMade_send_versions_fail(self):
self.cbt._sendVersionsCell = mock.Mock()
self.cbt._sendVersionsCell.side_effect = Exception
self.cbt._connectionFailed = mock.Mock()
self.cbt._connectionSucceeded = mock.Mock()
self.cbt.connectionMade()
self.assertEqual(self.cbt._connectionFailed.call_count, 1)
self.assertEqual(self.cbt._connectionSucceeded.call_count, 0)
def test_connectionMade_callback_fail(self):
d = defer.Deferred()
self.cbt._sendVersionsCell = mock.Mock()
self.cbt._sendVersionsCell.return_value = d
self.cbt._processVersionsCell = mock.Mock()
self.cbt._processVersionsCell.return_value = 'test'
self.cbt._processCertsCell = mock.Mock()
self.cbt._processCertsCell.return_value = 'test'
self.cbt._processAuthChallengeCell = mock.Mock()
self.cbt._processAuthChallengeCell.side_effect = Exception
self.cbt._processNetInfoCell = mock.Mock()
self.cbt._sendNetInfoCell = mock.Mock()
self.cbt._connectionSucceeded = mock.Mock()
self.cbt._connectionFailed = mock.Mock()
self.cbt.connectionMade()
d.callback('test')
self.assertEqual(self.cbt._connectionFailed.call_count, 1)
self.assertEqual(self.cbt._connectionSucceeded.call_count, 0)
def test_connectionLost_not_failed_with_current_task(self):
self.cbt._current_task = mock.Mock()
self.cbt._current_task.errback = mock.Mock()
self.cbt._connectionFailed = mock.Mock()
self.cbt.connectionLost(mock.Mock())
self.assertTrue(self.cbt._failed)
self.assertEqual(self.cbt._connectionFailed.call_count, 0)
self.assertEqual(self.cbt._current_task.errback.call_count, 1)
def test_connectionLost_not_failed_no_current_task(self):
self.cbt._current_task = None
self.cbt._connectionFailed = mock.Mock()
self.cbt.connectionLost(mock.Mock())
self.assertTrue(self.cbt._failed)
self.assertEqual(self.cbt._connectionFailed.call_count, 1)
def test_connectionLost_failed(self):
self.cbt._failed = True
self.cbt._current_task = mock.Mock()
self.cbt._current_task.errback = mock.Mock()
self.cbt._connectionFailed = mock.Mock()
self.cbt.connectionLost(mock.Mock())
self.assertTrue(self.cbt._failed)
self.assertEqual(self.cbt._connectionFailed.call_count, 0)
self.assertEqual(self.cbt._current_task.errback.call_count, 0)
# TODO: test dataReceived(). blocked by fixing cell parsing code.
def test_recvCell(self):
self.cbt._read_queue = mock.Mock()
self.cbt._read_queue.get = mock.Mock()
ret = mock.Mock()
self.cbt._read_queue.get.return_value = ret
r = self.cbt._recvCell()
self.assertEqual(r, ret)
self.assertEqual(ret, self.cbt._current_task)
@mock.patch('oppy.connection.connectionbuildtask.VersionsCell',
autospec=True)
def test_sendVersionsCell(self, mock_versions):
mock_cell = mock.Mock()
mock_bytes = mock.Mock()
mock_cell.getBytes.return_value = mock_bytes
mock_versions.make.return_value = mock_cell
self.cbt.transport = mock.Mock()
self.cbt.transport.write = mock.Mock()
self.cbt._recvCell = mock.Mock()
ret = mock.Mock()
self.cbt._recvCell.return_value = ret
r = self.cbt._sendVersionsCell()
self.cbt.transport.write.assert_called_once_with(mock_bytes)
self.assertEqual(r, ret)
@mock.patch('oppy.connection.connectionbuildtask._connectionSupportsHandshake')
def test_processVersionsCell(self, csh):
csh.return_value = True
cell = VersionsCell.make([3])
self.cbt.transport = mock.Mock()
self.cbt.transport.getPeerCertificate = mock.Mock()
self.cbt.transport.getPeerCertificate.return_value = 'test'
self.cbt._recvCell = mock.Mock()
self.cbt._recvCell.return_value = 't'
self.assertEqual(self.cbt._processVersionsCell(cell), 't')
self.assertEqual(self.cbt._connection_cert, 'test')
self.assertEqual(self.cbt._link_protocol, 3)
self.assertEqual(self.cbt._recvCell.call_count, 1)
def test_processVersionsCell_wrong_cell_type(self):
cell = NetInfoCell.make(0, '127.0.0.1', ['127.0.0.1'])
self.assertRaises(TypeError,
self.cbt._processVersionsCell,
cell)
@mock.patch('oppy.connection.connectionbuildtask._connectionSupportsHandshake')
def test_processVersionsCell_unsupported_handshake(self, csh):
self.cbt.transport = mock.Mock()
self.cbt.transport.getPeerCertificate = mock.Mock()
self.cbt.transport.getPeerCertificate.return_value = 'test'
csh.return_value = False
cell = VersionsCell.make([3])
self.assertRaises(ValueError,
self.cbt._processVersionsCell,
cell)
@mock.patch('oppy.connection.connectionbuildtask._connectionSupportsHandshake')
def test_processVersionsCell_no_versions_in_common(self, csh):
self.cbt.transport = mock.Mock()
self.cbt.transport.getPeerCertificate = mock.Mock()
self.cbt.transport.getPeerCertificate.return_value = 'test'
csh.return_value = True
cell = VersionsCell(None, [2])
self.assertRaises(ValueError,
self.cbt._processVersionsCell,
cell)
@mock.patch('oppy.connection.connectionbuildtask._getCertsFromCell',
return_value=(mock.Mock(), mock.Mock()))
@mock.patch('oppy.connection.connectionbuildtask._certsHaveValidTime',
return_value=True)
@mock.patch('oppy.connection.connectionbuildtask._ASN1KeysEqual',
return_value=True)
@mock.patch('oppy.connection.connectionbuildtask._isRSA1024BitKey',
return_value=True)
@mock.patch('oppy.crypto.util.verifyCertSig', return_value=True)
def test_processCertsCell(self, gcfc, chvt, ake, irbk, crypto):
self.cbt._connection_cert = mock.Mock()
self.cbt._recvCell = mock.Mock()
self.cbt._recvCell.return_value = 'test'
cell = CertsCell(None)
self.assertEqual(self.cbt._processCertsCell(cell), 'test')
self.assertEqual(self.cbt._recvCell.call_count, 1)
def test_processCertsCell_wrong_cell_type(self):
cell = NetInfoCell.make(0, '127.0.0.1', ['127.0.0.1'])
self.assertRaises(TypeError,
self.cbt._processCertsCell,
cell)
@mock.patch('oppy.connection.connectionbuildtask._getCertsFromCell',
return_value=(mock.Mock(), mock.Mock()))
@mock.patch('oppy.connection.connectionbuildtask._certsHaveValidTime',
return_value=False)
def test_processCertsCell_invalid_cert_time(self, gcfc, chvt):
cell = CertsCell(None)
self.assertRaises(ValueError,
self.cbt._processCertsCell,
cell)
@mock.patch('oppy.connection.connectionbuildtask._getCertsFromCell',
return_value=(mock.Mock(), mock.Mock()))
@mock.patch('oppy.connection.connectionbuildtask._certsHaveValidTime',
return_value=True)
@mock.patch('oppy.connection.connectionbuildtask._ASN1KeysEqual',
return_value=False)
def test_processCertsCell_keys_neq(self, gcfc, chvt, ake):
self.cbt._connection_cert = mock.Mock()
cell = CertsCell(None)
self.assertRaises(ValueError,
self.cbt._processCertsCell,
cell)
@mock.patch('oppy.connection.connectionbuildtask._getCertsFromCell',
return_value=(mock.Mock(), mock.Mock()))
@mock.patch('oppy.connection.connectionbuildtask._certsHaveValidTime',
return_value=True)
@mock.patch('oppy.connection.connectionbuildtask._ASN1KeysEqual',
return_value=True)
@mock.patch('oppy.connection.connectionbuildtask._isRSA1024BitKey',
return_value=False)
def test_processCertsCell_not_RSA_1024(self, gcfc, chvt, ake, irbk):
self.cbt._connection_cert = mock.Mock()
cell = CertsCell(None)
self.assertRaises(ValueError,
self.cbt._processCertsCell,
cell)
@mock.patch('oppy.connection.connectionbuildtask._getCertsFromCell',
return_value=(mock.Mock(), mock.Mock()))
@mock.patch('oppy.connection.connectionbuildtask._certsHaveValidTime',
return_value=True)
@mock.patch('oppy.connection.connectionbuildtask._ASN1KeysEqual',
return_value=True)
@mock.patch('oppy.connection.connectionbuildtask._isRSA1024BitKey',
return_value=True)
@mock.patch('oppy.crypto.util.verifyCertSig', return_value=False)
def test_processCertsCell_cert_not_signed(self, gcfc, chvt, ake, irbk, c):
self.cbt._connection_cert = mock.Mock()
cell = CertsCell(None)
self.assertRaises(ValueError,
self.cbt._processCertsCell,
cell)
def test_processAuthChallengeCell(self):
cell = AuthChallengeCell(None)
self.cbt._recvCell = mock.Mock()
self.cbt._recvCell.return_value = 'test'
self.assertEqual(self.cbt._processAuthChallengeCell(cell), 'test')
self.assertEqual(self.cbt._recvCell.call_count, 1)
def test_processAuthChallengeCell_wrong_cell_type(self):
cell = CertsCell(None)
self.assertRaises(TypeError,
self.cbt._processAuthChallengeCell,
cell)
def test_processNetInfoCell(self):
cell = NetInfoCell.make(0, '127.0.0.1', ['127.0.0.2'])
self.assertEqual(self.cbt._processNetInfoCell(cell),
('127.0.0.1', '127.0.0.2'))
def test_processNetInfoCell_wrong_type(self):
self.assertRaises(TypeError,
self.cbt._processNetInfoCell,
CertsCell(None))
@mock.patch('oppy.connection.connectionbuildtask.NetInfoCell.getBytes',
return_value='test')
def test_sendNetInfoCell(self, cell):
self.cbt.transport = mock.Mock()
self.cbt.transport.write = mock.Mock()
self.cbt._sendNetInfoCell(('127.0.0.1', '127.0.0.2'))
self.cbt.transport.write.assert_called_once_with('test')
def test_connectionSucceeded(self):
self.cbt._connectionSucceeded(None)
self.cm.connectionTaskSucceeded.assert_called_once_with(self.cbt)
def test_connectionFailed_not_failed_yet(self):
self.cbt._failed = False
self.cbt.transport = mock.Mock()
self.cbt.transport.abortConnection = mock.Mock()
self.cbt._connectionFailed(None)
self.assertTrue(self.cbt._failed)
self.assertEqual(self.cbt.transport.abortConnection.call_count, 1)
self.assertEqual(self.cm.connectionTaskFailed.call_count, 1)
def test_connectionFailed_already_failed(self):
self.cbt._failed = True
self.cbt.transport = mock.Mock()
self.cbt.transport.abortConnection = mock.Mock()
self.cbt._connectionFailed(None)
self.assertTrue(self.cbt._failed)
self.assertEqual(self.cbt.transport.abortConnection.call_count, 0)
self.assertEqual(self.cm.connectionTaskFailed.call_count, 1)
@mock.patch('oppy.crypto.util.verifyCertSig', return_value=True)
def test_connectionSupportsHandshake_self_signed(self, _):
c = mock.Mock()
mock_issuer = mock.Mock()
mock_issuer.get_components.return_value = [('CN', None)]
mock_issuer.commonName = 'foo.net'
c.get_issuer.return_value = mock_issuer
mock_subject = mock.Mock()
mock_subject.get_components.return_value = [('CN', None)]
mock_subject.commonName = 'bar.net'
c.get_subject.return_value = mock_subject
b = mock.Mock()
c.get_pubkey.bits = mock.Mock(return_value=b)
b.bits.return_value = 1024
self.assertTrue(connectionbuildtask._connectionSupportsHandshake(c))
@mock.patch('oppy.crypto.util.verifyCertSig', return_value=False)
def test_connectionSupportsHandshake_issuer_CN(self, _):
c = mock.Mock()
mock_issuer = mock.Mock()
mock_issuer.get_components.return_value = [('XX', None)]
mock_issuer.commonName = 'foo.net'
c.get_issuer.return_value = mock_issuer
mock_subject = mock.Mock()
mock_subject.get_components.return_value = [('CN', None)]
mock_subject.commonName = 'bar.net'
c.get_subject.return_value = mock_subject
b = mock.Mock()
c.get_pubkey.bits = mock.Mock(return_value=b)
b.bits.return_value = 1024
self.assertTrue(connectionbuildtask._connectionSupportsHandshake(c))
@mock.patch('oppy.crypto.util.verifyCertSig', return_value=False)
def test_connectionSupportsHandshake_subject_CN(self, c):
c = mock.Mock()
mock_issuer = mock.Mock()
mock_issuer.get_components.return_value = [('CN', None)]
mock_issuer.commonName = 'foo.net'
c.get_issuer.return_value = mock_issuer
mock_subject = mock.Mock()
mock_subject.get_components.return_value = [('XX', None)]
mock_subject.commonName = 'bar.net'
c.get_subject.return_value = mock_subject
b = mock.Mock()
c.get_pubkey.bits = mock.Mock(return_value=b)
b.bits.return_value = 1024
self.assertTrue(connectionbuildtask._connectionSupportsHandshake(c))
@mock.patch('oppy.crypto.util.verifyCertSig', return_value=False)
def test_connectionSupportsHandshake_issuer_net(self, c):
c = mock.Mock()
mock_issuer = mock.Mock()
mock_issuer.get_components.return_value = [('CN', None)]
mock_issuer.commonName = 'foo.com'
c.get_issuer.return_value = mock_issuer
mock_subject = mock.Mock()
mock_subject.get_components.return_value = [('CN', None)]
mock_subject.commonName = 'bar.net'
c.get_subject.return_value = mock_subject
b = mock.Mock()
c.get_pubkey.bits = mock.Mock(return_value=b)
b.bits.return_value = 1024
self.assertTrue(connectionbuildtask._connectionSupportsHandshake(c))
@mock.patch('oppy.crypto.util.verifyCertSig', return_value=False)
def test_connectionSupportsHandshake_subject_net(self, c):
c = mock.Mock()
mock_issuer = mock.Mock()
mock_issuer.get_components.return_value = [('CN', None)]
mock_issuer.commonName = 'foo.net'
c.get_issuer.return_value = mock_issuer
mock_subject = mock.Mock()
mock_subject.get_components.return_value = [('CN', None)]
mock_subject.commonName = 'bar.com'
c.get_subject.return_value = mock_subject
b = mock.Mock()
c.get_pubkey.bits = mock.Mock(return_value=b)
b.bits.return_value = 1024
self.assertTrue(connectionbuildtask._connectionSupportsHandshake(c))
@mock.patch('oppy.crypto.util.verifyCertSig', return_value=False)
def test_connectionSupportsHandshake_longer_1024(self, c):
c = mock.Mock()
mock_issuer = mock.Mock()
mock_issuer.get_components.return_value = [('CN', None)]
mock_issuer.commonName = 'foo.net'
c.get_issuer.return_value = mock_issuer
mock_subject = mock.Mock()
mock_subject.get_components.return_value = [('CN', None)]
mock_subject.commonName = 'bar.net'
c.get_subject.return_value = mock_subject
b = mock.Mock()
c.get_pubkey.bits = mock.Mock(return_value=b)
b.bits.return_value = 2048
self.assertTrue(connectionbuildtask._connectionSupportsHandshake(c))
@mock.patch('oppy.crypto.util.verifyCertSig', return_value=False)
def test_connectionSupportsHandshake_all_fail(self, c):
c = mock.Mock()
mock_issuer = mock.Mock()
mock_issuer.get_components.return_value = [('CN', None)]
mock_issuer.commonName = 'foo.net'
c.get_issuer.return_value = mock_issuer
mock_subject = mock.Mock()
mock_subject.get_components.return_value = [('CN', None)]
mock_subject.commonName = 'bar.net'
c.get_subject.return_value = mock_subject
b = mock.Mock()
c.get_pubkey.bits = mock.Mock(return_value=b)
b.bits.return_value = 1024
self.assertTrue(connectionbuildtask._connectionSupportsHandshake(c))
def test_getCertsFromCell(self):
lc = test_cert_der
link_cert = CertsCellPayloadItem(LINK_CERT_TYPE, len(lc), lc)
ic = test_cert_der
id_cert = CertsCellPayloadItem(ID_CERT_TYPE, len(ic), ic)
cell = CertsCell.make(0, [link_cert, id_cert])
res1 = crypto.load_certificate(crypto.FILETYPE_ASN1, lc)
res2 = crypto.load_certificate(crypto.FILETYPE_ASN1, ic)
l, i = connectionbuildtask._getCertsFromCell(cell)
self.assertEqual(crypto.dump_certificate(crypto.FILETYPE_ASN1, l), lc)
self.assertEqual(crypto.dump_certificate(crypto.FILETYPE_ASN1, i), ic)
def test_getCertsFromCell_invalid_count(self):
lc = test_cert_der
link_cert = CertsCellPayloadItem(LINK_CERT_TYPE, len(lc), lc)
cell = CertsCell.make(0, [link_cert])
self.assertRaises(ValueError,
connectionbuildtask._getCertsFromCell,
cell)
def test_getCertsFromCell_malformed_cert(self):
lc = test_cert_der
link_cert = CertsCellPayloadItem(LINK_CERT_TYPE, len(lc), lc)
ic = test_cert_der[:len(test_cert_der)-1]
id_cert = CertsCellPayloadItem(ID_CERT_TYPE, len(ic), ic)
cell = CertsCell.make(0, [link_cert, id_cert])
self.assertRaises(ValueError,
connectionbuildtask._getCertsFromCell,
cell)
def test_getCertsFromCell_invalid_cert_type(self):
lc = test_cert_der
link_cert = CertsCellPayloadItem(LINK_CERT_TYPE, len(lc), lc)
ic = test_cert_der
id_cert = CertsCellPayloadItem(LINK_CERT_TYPE, len(ic), ic)
cell = CertsCell.make(0, [link_cert, id_cert])
self.assertRaises(ValueError,
connectionbuildtask._getCertsFromCell,
cell)
@mock.patch('oppy.crypto.util.validCertTime', return_value=True)
def test_certsHaveValidTime_fail(self, vct):
mock_cert = mock.Mock()
certs = [mock_cert]
self.assertTrue(connectionbuildtask._certsHaveValidTime(certs))
vct.assert_called_once_with(mock_cert)
@mock.patch('oppy.crypto.util.validCertTime', return_value=False)
def test_certsHaveValidTime_fail(self, vct):
mock_cert = mock.Mock()
certs = [mock_cert]
self.assertFalse(connectionbuildtask._certsHaveValidTime(certs))
vct.assert_called_once_with(mock_cert)
@mock.patch('oppy.crypto.util.constantStrEqual', return_value=True)
@mock.patch('OpenSSL.crypto.dump_privatekey', autospec=True)
def test_ASN1KeysEqual(self, dpk, cse):
mock_asn1_key = mock.Mock()
self.assertTrue(connectionbuildtask._ASN1KeysEqual(mock_asn1_key,
mock_asn1_key))
self.assertEqual(cse.call_count, 1)
@mock.patch('oppy.crypto.util.constantStrEqual', return_value=False)
@mock.patch('OpenSSL.crypto.dump_privatekey', autospec=True)
def test_ASN1KeysEqual_neq(self, dpk, cse):
mock_asn1_key = mock.Mock()
self.assertFalse(connectionbuildtask._ASN1KeysEqual(mock_asn1_key,
mock_asn1_key))
self.assertEqual(cse.call_count, 1)
def test_isRSA1024BitKey(self):
key = mock.Mock()
key.type.return_value = OPENSSL_RSA_KEY_TYPE
key.bits.return_value = 1024
self.assertTrue(connectionbuildtask._isRSA1024BitKey(key))
def test_isRSA1024BitKey_not_RSA(self):
key = mock.Mock()
key.type.return_value = OPENSSL_RSA_KEY_TYPE - 1
key.bits.return_value = 1024
self.assertFalse(connectionbuildtask._isRSA1024BitKey(key))
def test_isRSA1024BitKey_not_1024(self):
key = mock.Mock()
key.type.return_value = OPENSSL_RSA_KEY_TYPE
key.bits.return_value = 2048
self.assertFalse(connectionbuildtask._isRSA1024BitKey(key))
| nskinkel/oppy | oppy/tests/unit/connection/test_connectionbuildtask.py | Python | bsd-3-clause | 22,953 |
from django.contrib.localflavor.in_.forms import (INZipCodeField,
INStateField, INStateSelect, INPhoneNumberField)
from utils import LocalFlavorTestCase
class INLocalFlavorTests(LocalFlavorTestCase):
def test_INPhoneNumberField(self):
error_format = [u'Phone numbers must be in 02X-8X or 03X-7X or 04X-6X format.']
valid = {
'0423-2443667': '0423-2443667',
'0423 2443667': '0423 2443667',
'04236-244366': '04236-244366',
'040-24436678': '040-24436678',
}
invalid = {
'04-2443667': error_format,
'423-2443667': error_format,
'0423-9442667': error_format,
'0423-0443667': error_format,
'0423-244366': error_format,
'04232442667': error_format,
'0423DJANGO': error_format,
}
self.assertFieldOutput(INPhoneNumberField, valid, invalid)
def test_INPStateSelect(self):
f = INStateSelect()
out = u'''<select name="state">
<option value="KA">Karnataka</option>
<option value="AP" selected="selected">Andhra Pradesh</option>
<option value="KL">Kerala</option>
<option value="TN">Tamil Nadu</option>
<option value="MH">Maharashtra</option>
<option value="UP">Uttar Pradesh</option>
<option value="GA">Goa</option>
<option value="GJ">Gujarat</option>
<option value="RJ">Rajasthan</option>
<option value="HP">Himachal Pradesh</option>
<option value="JK">Jammu and Kashmir</option>
<option value="AR">Arunachal Pradesh</option>
<option value="AS">Assam</option>
<option value="BR">Bihar</option>
<option value="CG">Chattisgarh</option>
<option value="HR">Haryana</option>
<option value="JH">Jharkhand</option>
<option value="MP">Madhya Pradesh</option>
<option value="MN">Manipur</option>
<option value="ML">Meghalaya</option>
<option value="MZ">Mizoram</option>
<option value="NL">Nagaland</option>
<option value="OR">Orissa</option>
<option value="PB">Punjab</option>
<option value="SK">Sikkim</option>
<option value="TR">Tripura</option>
<option value="UA">Uttarakhand</option>
<option value="WB">West Bengal</option>
<option value="AN">Andaman and Nicobar</option>
<option value="CH">Chandigarh</option>
<option value="DN">Dadra and Nagar Haveli</option>
<option value="DD">Daman and Diu</option>
<option value="DL">Delhi</option>
<option value="LD">Lakshadweep</option>
<option value="PY">Pondicherry</option>
</select>'''
self.assertEqual(f.render('state', 'AP'), out)
def test_INZipCodeField(self):
error_format = [u'Enter a zip code in the format XXXXXX or XXX XXX.']
valid = {
'360311': '360311',
'360 311': '360311',
}
invalid = {
'36 0311': error_format,
'3603111': error_format,
'360 31': error_format,
'36031': error_format,
'O2B 2R3': error_format
}
self.assertFieldOutput(INZipCodeField, valid, invalid)
def test_INStateField(self):
error_format = [u'Enter an Indian state or territory.']
valid = {
'an': 'AN',
'AN': 'AN',
'andaman and nicobar': 'AN',
'andra pradesh': 'AP',
'andrapradesh': 'AP',
'andhrapradesh': 'AP',
'ap': 'AP',
'andhra pradesh': 'AP',
'ar': 'AR',
'arunachal pradesh': 'AR',
'assam': 'AS',
'as': 'AS',
'bihar': 'BR',
'br': 'BR',
'cg': 'CG',
'chattisgarh': 'CG',
'ch': 'CH',
'chandigarh': 'CH',
'daman and diu': 'DD',
'dd': 'DD',
'dl': 'DL',
'delhi': 'DL',
'dn': 'DN',
'dadra and nagar haveli': 'DN',
'ga': 'GA',
'goa': 'GA',
'gj': 'GJ',
'gujarat': 'GJ',
'himachal pradesh': 'HP',
'hp': 'HP',
'hr': 'HR',
'haryana': 'HR',
'jharkhand': 'JH',
'jh': 'JH',
'jammu and kashmir': 'JK',
'jk': 'JK',
'karnataka': 'KA',
'karnatka': 'KA',
'ka': 'KA',
'kerala': 'KL',
'kl': 'KL',
'ld': 'LD',
'lakshadweep': 'LD',
'maharastra': 'MH',
'mh': 'MH',
'maharashtra': 'MH',
'meghalaya': 'ML',
'ml': 'ML',
'mn': 'MN',
'manipur': 'MN',
'madhya pradesh': 'MP',
'mp': 'MP',
'mizoram': 'MZ',
'mizo': 'MZ',
'mz': 'MZ',
'nl': 'NL',
'nagaland': 'NL',
'orissa': 'OR',
'odisa': 'OR',
'orisa': 'OR',
'or': 'OR',
'pb': 'PB',
'punjab': 'PB',
'py': 'PY',
'pondicherry': 'PY',
'rajasthan': 'RJ',
'rajastan': 'RJ',
'rj': 'RJ',
'sikkim': 'SK',
'sk': 'SK',
'tamil nadu': 'TN',
'tn': 'TN',
'tamilnadu': 'TN',
'tamilnad': 'TN',
'tr': 'TR',
'tripura': 'TR',
'ua': 'UA',
'uttarakhand': 'UA',
'up': 'UP',
'uttar pradesh': 'UP',
'westbengal': 'WB',
'bengal': 'WB',
'wb': 'WB',
'west bengal': 'WB'
}
invalid = {
'florida': error_format,
'FL': error_format,
}
self.assertFieldOutput(INStateField, valid, invalid)
| disqus/django-old | tests/regressiontests/forms/localflavor/in_.py | Python | bsd-3-clause | 5,631 |
"""Tests for the object class AntiSpam.
Uses the pyweet AntiSpam with a 1 second timeout instead of the usual 5 minutes
that you would experience if you actually ran the program.
"""
import time
import pytest
from pyweet.spam import AntiSpam
@pytest.fixture(autouse=True)
def set_test_timeout(request):
request.addfinalizer(clear_store)
AntiSpam.timeout = 1
@pytest.fixture
def clear_store():
AntiSpam.tweet_store = {}
AntiSpam.timeout = 600
def test_duplicates_are_spam():
"""Identical messages without the timout should be marked as spam."""
message = "a generic message to test with"
assert not AntiSpam.is_spam(message)
assert AntiSpam.is_spam(message)
assert AntiSpam.is_spam(message)
assert AntiSpam.is_spam(message)
def test_timeouts():
""""After the timeout period, a message should get through."""
message = "another generic message to test with"
assert not AntiSpam.is_spam(message)
assert AntiSpam.is_spam(message)
assert AntiSpam.timeout == 1, "pytest isn't picking up the fixture"
time.sleep(2) # wait a bit
AntiSpam.clear()
assert not AntiSpam.is_spam(message)
assert AntiSpam.is_spam(message)
| a-tal/pyweet | test/test_anti_spam.py | Python | bsd-3-clause | 1,200 |
# This file is part of the Edison Project.
# Please refer to the LICENSE document that was supplied with this software for information on how it can be used.
# ensure that we include all the models required to administer this app
from cmdb.models import *
from django.contrib import admin
admin.site.register(Country)
admin.site.register(County)
admin.site.register(Address)
admin.site.register(Company)
admin.site.register(Contact)
admin.site.register(DataCentre)
admin.site.register(DataCentreRoom)
admin.site.register(DataCentreSuite)
admin.site.register(DataCentreRack)
admin.site.register(Repo)
admin.site.register(ConfigurationItemClass)
admin.site.register(NetworkInterface)
admin.site.register(PackageProvider)
admin.site.register(PackageFormat)
admin.site.register(OperatingSystemBreed)
admin.site.register(OperatingSystemName)
admin.site.register(OperatingSystemVersion)
admin.site.register(VirtualisationType)
admin.site.register(VirtualServerDefinition)
admin.site.register(ConfigurationItemProfile)
admin.site.register(ConfigurationItem)
| proffalken/edison | cmdb/admin.py | Python | bsd-3-clause | 1,052 |
# vim:ts=4:sw=4:expandtab
"""A mongodb client library for Diesel"""
# needed to make diesel work with python 2.5
from __future__ import with_statement
import itertools
import struct
from collections import deque
from diesel import Client, call, sleep, send, receive, first, Loop, Application, ConnectionClosed
from bson import BSON, _make_c_string, decode_all
from bson.son import SON
_ZERO = "\x00\x00\x00\x00"
HEADER_SIZE = 16
class MongoOperationalError(Exception): pass
def _full_name(parent, child):
return "%s.%s" % (parent, child)
class TraversesCollections(object):
def __init__(self, name, client):
self.name = name
self.client = client
def __getattr__(self, name):
return self[name]
def __getitem__(self, name):
cls = self.client.collection_class or Collection
return cls(_full_name(self.name, name), self.client)
class Db(TraversesCollections):
pass
class Collection(TraversesCollections):
def find(self, spec=None, fields=None, skip=0, limit=0):
return MongoCursor(self.name, self.client, spec, fields, skip, limit)
def update(self, spec, doc, upsert=False, multi=False, safe=True):
return self.client.update(self.name, spec, doc, upsert, multi, safe)
def insert(self, doc_or_docs, safe=True):
return self.client.insert(self.name, doc_or_docs, safe)
def delete(self, spec, safe=True):
return self.client.delete(self.name, spec, safe)
class MongoClient(Client):
collection_class = None
def __init__(self, host='localhost', port=27017, **kw):
Client.__init__(self, host, port, **kw)
self._msg_id_counter = itertools.count(1)
@property
def _msg_id(self):
return self._msg_id_counter.next()
def _put_request_get_response(self, op, data):
self._put_request(op, data)
header = receive(HEADER_SIZE)
length, id, to, code = struct.unpack('<4i', header)
message = receive(length - HEADER_SIZE)
cutoff = struct.calcsize('<iqii')
flag, cid, start, numret = struct.unpack('<iqii', message[:cutoff])
body = decode_all(message[cutoff:])
return (cid, start, numret, body)
def _put_request(self, op, data):
req = struct.pack('<4i', HEADER_SIZE + len(data), self._msg_id, 0, op)
send("%s%s" % (req, data))
def _handle_response(self, cursor, resp):
cid, start, numret, result = resp
cursor.retrieved += numret
cursor.id = cid
if not cid or (cursor.retrieved == cursor.limit):
cursor.finished = True
return result
@call
def query(self, cursor):
op = Ops.OP_QUERY
c = cursor
msg = Ops.query(c.col, c.spec, c.fields, c.skip, c.limit)
resp = self._put_request_get_response(op, msg)
return self._handle_response(cursor, resp)
@call
def get_more(self, cursor):
limit = 0
if cursor.limit:
if cursor.limit > cursor.retrieved:
limit = cursor.limit - cursor.retrieved
else:
cursor.finished = True
if not cursor.finished:
op = Ops.OP_GET_MORE
msg = Ops.get_more(cursor.col, limit, cursor.id)
resp = self._put_request_get_response(op, msg)
return self._handle_response(cursor, resp)
else:
return []
def _put_gle_command(self):
msg = Ops.query('admin.$cmd', {'getlasterror' : 1}, 0, 0, -1)
res = self._put_request_get_response(Ops.OP_QUERY, msg)
_, _, _, r = res
doc = r[0]
if doc.get('err'):
raise MongoOperationalError(doc['err'])
return doc
@call
def update(self, col, spec, doc, upsert=False, multi=False, safe=True):
data = Ops.update(col, spec, doc, upsert, multi)
self._put_request(Ops.OP_UPDATE, data)
if safe:
return self._put_gle_command()
@call
def insert(self, col, doc_or_docs, safe=True):
data = Ops.insert(col, doc_or_docs)
self._put_request(Ops.OP_INSERT, data)
if safe:
return self._put_gle_command()
@call
def delete(self, col, spec, safe=True):
data = Ops.delete(col, spec)
self._put_request(Ops.OP_DELETE, data)
if safe:
return self._put_gle_command()
@call
def drop_database(self, name):
return self._command(name, {'dropDatabase':1})
@call
def list_databases(self):
result = self._command('admin', {'listDatabases':1})
return [(d['name'], d['sizeOnDisk']) for d in result['databases']]
@call
def _command(self, dbname, command):
msg = Ops.query('%s.$cmd' % dbname, command, None, 0, 1)
resp = self._put_request_get_response(Ops.OP_QUERY, msg)
cid, start, numret, result = resp
if result:
return result[0]
else:
return []
def __getattr__(self, name):
return Db(name, self)
class Ops(object):
ASCENDING = 1
DESCENDING = -1
OP_UPDATE = 2001
OP_INSERT = 2002
OP_GET_BY_OID = 2003
OP_QUERY = 2004
OP_GET_MORE = 2005
OP_DELETE = 2006
OP_KILL_CURSORS = 2007
@staticmethod
def query(col, spec, fields, skip, limit):
data = [
_ZERO,
_make_c_string(col),
struct.pack('<ii', skip, limit),
BSON.encode(spec or {}),
]
if fields:
if type(fields) == dict:
data.append(BSON.encode(fields))
else:
data.append(BSON.encode(dict.fromkeys(fields, 1)))
return "".join(data)
@staticmethod
def get_more(col, limit, id):
data = _ZERO
data += _make_c_string(col)
data += struct.pack('<iq', limit, id)
return data
@staticmethod
def update(col, spec, doc, upsert, multi):
colname = _make_c_string(col)
flags = 0
if upsert:
flags |= 1 << 0
if multi:
flags |= 1 << 1
fmt = '<i%dsi' % len(colname)
part = struct.pack(fmt, 0, colname, flags)
return "%s%s%s" % (part, BSON.encode(spec), BSON.encode(doc))
@staticmethod
def insert(col, doc_or_docs):
try:
doc_or_docs.fromkeys
doc_or_docs = [doc_or_docs]
except AttributeError:
pass
doc_data = "".join(BSON.encode(doc) for doc in doc_or_docs)
colname = _make_c_string(col)
return "%s%s%s" % (_ZERO, colname, doc_data)
@staticmethod
def delete(col, spec):
colname = _make_c_string(col)
return "%s%s%s%s" % (_ZERO, colname, _ZERO, BSON.encode(spec))
class MongoIter(object):
def __init__(self, cursor):
self.cursor = cursor
self.cache = deque()
def next(self):
try:
return self.cache.popleft()
except IndexError:
more = self.cursor.more()
if not more:
raise StopIteration()
else:
self.cache.extend(more)
return self.next()
class MongoCursor(object):
def __init__(self, col, client, spec, fields, skip, limit):
self.col = col
self.client = client
self.spec = spec
self.fields = fields
self.skip = skip
self.limit = limit
self.id = None
self.retrieved = 0
self.finished = False
self._query_additions = []
def more(self):
if not self.retrieved:
self._touch_query()
if not self.id and not self.finished:
return self.client.query(self)
elif not self.finished:
return self.client.get_more(self)
def all(self):
return list(self)
def __iter__(self):
return MongoIter(self)
def one(self):
all = self.all()
la = len(all)
if la == 1:
res = all[0]
elif la == 0:
res = None
else:
raise ValueError("Cursor returned more than 1 record")
return res
def count(self):
if self.retrieved:
raise ValueError("can't count an already started cursor")
db, col = self.col.split('.', 1)
l = [('count', col), ('query', self.spec)]
if self.skip:
l.append(('skip', self.skip))
if self.limit:
l.append(('limit', self.limit))
command = SON(l)
result = self.client._command(db, command)
return int(result.get('n', 0))
def sort(self, name, direction):
if self.retrieved:
raise ValueError("can't sort an already started cursor")
key = SON()
key[name] = direction
self._query_additions.append(('sort', key))
return self
def _touch_query(self):
if self._query_additions:
spec = SON({'$query': self.spec or {}})
for k, v in self._query_additions:
if k == 'sort':
ordering = spec.setdefault('$orderby', SON())
ordering.update(v)
self.spec = spec
def __enter__(self):
return self
def __exit__(self, *args, **params):
if self.id and not self.finished:
raise RuntimeError("need to cleanup cursor!")
class RawMongoClient(Client):
"A mongodb client that does the bare minimum to push bits over the wire."
@call
def send(self, data, respond=False):
"""Send raw mongodb data and optionally yield the server's response."""
send(data)
if not respond:
return ''
else:
header = receive(HEADER_SIZE)
length, id, to, opcode = struct.unpack('<4i', header)
body = receive(length - HEADER_SIZE)
return header + body
class MongoProxy(object):
ClientClass = RawMongoClient
def __init__(self, backend_host, backend_port):
self.backend_host = backend_host
self.backend_port = backend_port
def __call__(self, addr):
"""A persistent client<--proxy-->backend connection handler."""
try:
backend = None
while True:
header = receive(HEADER_SIZE)
info = struct.unpack('<4i', header)
length, id, to, opcode = info
body = receive(length - HEADER_SIZE)
resp, info, body = self.handle_request(info, body)
if resp is not None:
# our proxy will respond without talking to the backend
send(resp)
else:
# pass the (maybe modified) request on to the backend
length, id, to, opcode = info
is_query = opcode in [Ops.OP_QUERY, Ops.OP_GET_MORE]
payload = header + body
(backend, resp) = self.from_backend(payload, is_query, backend)
self.handle_response(resp)
except ConnectionClosed:
if backend:
backend.close()
def handle_request(self, info, body):
length, id, to, opcode = info
print "saw request with opcode", opcode
return None, info, body
def handle_response(self, response):
send(response)
def from_backend(self, data, respond, backend=None):
if not backend:
backend = self.ClientClass()
backend.connect(self.backend_host, self.backend_port)
resp = backend.send(data, respond)
return (backend, resp)
| dieseldev/diesel | diesel/protocols/mongodb.py | Python | bsd-3-clause | 11,619 |
from __future__ import division
import math
from django.db.models import Q
from django.http import Http404
from django.shortcuts import get_object_or_404, render
from django.views.generic.detail import DetailView
from pontoon.base.models import Locale, Project, ProjectLocale, TranslatedResource
from pontoon.base.utils import require_AJAX
from pontoon.contributors.views import ContributorsMixin
def localization(request, code, slug):
"""Locale-project overview."""
locale = get_object_or_404(Locale, code=code)
project = get_object_or_404(Project.objects.available(), slug=slug)
project_locale = get_object_or_404(ProjectLocale, locale=locale, project=project)
resource_count = len(locale.parts_stats(project)) - 1
return render(request, 'localizations/localization.html', {
'locale': locale,
'project': project,
'project_locale': project_locale,
'resource_count': resource_count,
})
@require_AJAX
def ajax_resources(request, code, slug):
"""Resources tab."""
locale = get_object_or_404(Locale, code=code)
project = get_object_or_404(
Project.objects.available().prefetch_related('subpage_set'),
slug=slug
)
# Amend the parts dict with latest activity info.
translatedresources_qs = (
TranslatedResource.objects
.filter(resource__project=project, locale=locale)
.prefetch_related('resource', 'latest_translation__user')
)
if not len(translatedresources_qs):
raise Http404
pages = {}
for page in project.subpage_set.all():
latest_page_translatedresource = None
page_translatedresources_qs = (
TranslatedResource.objects
.filter(resource__in=page.resources.all(), locale=locale)
.prefetch_related('resource', 'latest_translation__user')
)
for page_translatedresource in page_translatedresources_qs:
latest = (
latest_page_translatedresource.latest_translation
if latest_page_translatedresource
else None
)
if (
latest is None or
(
page_translatedresource.latest_translation.latest_activity['date'] >
latest.latest_activity['date']
)
):
latest_page_translatedresource = page_translatedresource
pages[page.name] = latest_page_translatedresource
translatedresources = {s.resource.path: s for s in translatedresources_qs}
translatedresources = dict(translatedresources.items() + pages.items())
parts = locale.parts_stats(project)
for part in parts:
translatedresource = translatedresources.get(part['title'], None)
if translatedresource and translatedresource.latest_translation:
part['latest_activity'] = translatedresource.latest_translation.latest_activity
else:
part['latest_activity'] = None
part['chart'] = {
'translated_strings': part['translated_strings'],
'fuzzy_strings': part['fuzzy_strings'],
'total_strings': part['resource__total_strings'],
'approved_strings': part['approved_strings'],
'approved_share': round(
part['approved_strings'] / part['resource__total_strings'] * 100
),
'translated_share': round(
part['translated_strings'] / part['resource__total_strings'] * 100
),
'fuzzy_share': round(part['fuzzy_strings'] / part['resource__total_strings'] * 100),
'approved_percent': int(
math.floor(part['approved_strings'] / part['resource__total_strings'] * 100)
),
}
return render(request, 'localizations/includes/resources.html', {
'locale': locale,
'project': project,
'resources': parts,
})
class LocalizationContributorsView(ContributorsMixin, DetailView):
"""
Renders view of contributors for the localization.
"""
template_name = 'localizations/includes/contributors.html'
def get_object(self):
return get_object_or_404(
ProjectLocale,
locale__code=self.kwargs['code'],
project__slug=self.kwargs['slug']
)
def get_context_object_name(self, obj):
return 'projectlocale'
def contributors_filter(self, **kwargs):
return Q(
entity__resource__project=self.object.project,
locale=self.object.locale
)
| mastizada/pontoon | pontoon/localizations/views.py | Python | bsd-3-clause | 4,580 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-24 16:13
from __future__ import unicode_literals
from django.db import migrations, models
def add_questions(apps, schema_editor):
current_database = schema_editor.connection.alias
QuestionSubmodels = [
apps.get_model('wizard_builder.SingleLineText'),
apps.get_model('wizard_builder.TextArea'),
apps.get_model('wizard_builder.RadioButton'),
apps.get_model('wizard_builder.Checkbox'),
]
for Model in QuestionSubmodels:
for question in Model.objects.using(current_database):
question_type = question._meta.model_name.lower()
question.type = question_type
question.save()
class Migration(migrations.Migration):
dependencies = [
('wizard_builder', '0028_formquestion_type'),
]
operations = [
migrations.RunPython(
add_questions,
migrations.RunPython.noop,
),
]
| SexualHealthInnovations/django-wizard-builder | wizard_builder/migrations/0029_populate_type.py | Python | bsd-3-clause | 983 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from tests.apidocs.util import APIDocsTestCase
class OrganizationStatsDocs(APIDocsTestCase):
def setUp(self):
self.create_event("a", message="oh no")
self.create_event("b", message="oh no")
self.url = reverse(
"sentry-api-0-organization-stats", kwargs={"organization_slug": self.organization.slug},
)
self.login_as(user=self.user)
def test_get(self):
response = self.client.get(self.url)
request = RequestFactory().get(self.url)
self.validate_schema(request, response)
| beeftornado/sentry | tests/apidocs/endpoints/organizations/test-org-stats.py | Python | bsd-3-clause | 716 |
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Gael Varoquaux <[email protected]>
#
# License: BSD Style.
import sys
import warnings
import itertools
import operator
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel
from ..base import RegressorMixin
from .base import sparse_center_data, center_data
from ..utils import array2d, atleast2d_or_csc
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..utils.extmath import safe_sparse_dot
from . import cd_fast
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear Model trained with L1 and L2 prior as regularizer
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter
alpha = 0 is equivalent to an ordinary least square, solved
by the LinearRegression object in the scikit. For numerical
reasons, using alpha = 0 is with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with 0 <= l1_ratio <= 1. For
l1_ratio = 0 the penalty is an L2 penalty. For l1_ratio = 1 it is an L1
penalty. For 0 < l1_ratio < 1, the penalty is a combination of L1 and
L2.
fit_intercept: bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
normalize : boolean, optional
If True, the regressors X are normalized
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always True to preserve sparsity.
max_iter: int, optional
The maximum number of iterations
copy_X : boolean, optional, default False
If True, X will be copied; else, it may be overwritten.
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than 'tol', the optimization code checks the
dual gap for optimality and continues until it is smaller
than tol.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive: bool, optional
When set to True, forces the coefficients to be positive.
Attributes
----------
`coef_` : array, shape = (n_features,)
parameter vector (w in the cost function formula)
`sparse_coef_` : scipy.sparse matrix, shape = (n_features, 1)
`sparse_coef_` is a readonly property derived from `coef_`
`intercept_` : float | array, shape = (n_targets,)
independent term in decision function.
`dual_gap_` : float
the current fit is guaranteed to be epsilon-suboptimal with
epsilon := `dual_gap_`
`eps_` : float
`eps_` is used to check if the fit converged to the requested
`tol`
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a fortran contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
rho=None):
self.alpha = alpha
self.l1_ratio = l1_ratio
if rho is not None:
self.l1_ratio = rho
warnings.warn("rho was renamed to l1_ratio and will be removed "
"in 0.15", DeprecationWarning)
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
def fit(self, X, y, Xy=None, coef_init=None):
"""Fit model with coordinate descent
Parameters
-----------
X: ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y: ndarray, shape = (n_samples,) or (n_samples, n_targets)
Target
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
coef_init: ndarray of shape n_features or (n_targets, n_features)
The initial coeffients to warm-start the optimization
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a fortran contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this aglorithm does not converge"
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
# From now on X can be touched inplace
y = np.asarray(y, dtype=np.float64)
# now all computation with X can be done inplace
fit = self._sparse_fit if sparse.isspmatrix(X) else self._dense_fit
fit(X, y, Xy, coef_init)
return self
def _dense_fit(self, X, y, Xy=None, coef_init=None):
# copy was done in fit if necessary
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
precompute = self.precompute
if hasattr(precompute, '__array__') \
and not np.allclose(X_mean, np.zeros(n_features)) \
and not np.allclose(X_std, np.ones(n_features)):
# recompute Gram
precompute = 'auto'
Xy = None
coef_ = self._init_coef(coef_init, n_features, n_targets)
dual_gap_ = np.empty(n_targets)
eps_ = np.empty(n_targets)
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
# precompute if n_samples > n_features
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute or (precompute == 'auto' and n_samples > n_features):
Gram = np.dot(X.T, X)
else:
Gram = None
for k in xrange(n_targets):
if Gram is None:
coef_[k, :], dual_gap_[k], eps_[k] = \
cd_fast.enet_coordinate_descent(
coef_[k, :], l1_reg, l2_reg, X, y[:, k], self.max_iter,
self.tol, self.positive)
else:
Gram = Gram.copy()
if Xy is None:
this_Xy = np.dot(X.T, y[:, k])
else:
this_Xy = Xy[:, k]
coef_[k, :], dual_gap_[k], eps_[k] = \
cd_fast.enet_coordinate_descent_gram(
coef_[k, :], l1_reg, l2_reg, Gram, this_Xy, y[:, k],
self.max_iter, self.tol, self.positive)
if dual_gap_[k] > eps_[k]:
warnings.warn('Objective did not converge for ' +
'target %d, you might want' % k +
' to increase the number of iterations')
self.coef_, self.dual_gap_, self.eps_ = (np.squeeze(a) for a in
(coef_, dual_gap_, eps_))
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
def _sparse_fit(self, X, y, Xy=None, coef_init=None):
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have incompatible shapes.\n" +
"Note: Sparse matrices cannot be indexed w/" +
"boolean masks (use `indices=True` in CV).")
# NOTE: we are explicitly not centering the data the naive way to
# avoid breaking the sparsity of X
X_data, y, X_mean, y_mean, X_std = sparse_center_data(
X, y, self.fit_intercept, self.normalize)
if y.ndim == 1:
y = y[:, np.newaxis]
n_samples, n_features = X.shape[0], X.shape[1]
n_targets = y.shape[1]
coef_ = self._init_coef(coef_init, n_features, n_targets)
dual_gap_ = np.empty(n_targets)
eps_ = np.empty(n_targets)
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
for k in xrange(n_targets):
coef_[k, :], dual_gap_[k], eps_[k] = \
cd_fast.sparse_enet_coordinate_descent(
coef_[k, :], l1_reg, l2_reg, X_data, X.indices,
X.indptr, y[:, k], X_mean / X_std,
self.max_iter, self.tol, self.positive)
if dual_gap_[k] > eps_[k]:
warnings.warn('Objective did not converge for ' +
'target %d, you might want' % k +
' to increase the number of iterations')
self.coef_, self.dual_gap_, self.eps_ = (np.squeeze(a) for a in
(coef_, dual_gap_, eps_))
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
def _init_coef(self, coef_init, n_features, n_targets):
if coef_init is None:
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64)
else:
coef_ = self.coef_
else:
coef_ = coef_init
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
if coef_.shape != (n_targets, n_features):
raise ValueError("X and coef_init have incompatible "
"shapes (%s != %s)."
% (coef_.shape, (n_targets, n_features)))
return coef_
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape = (n_samples,)
The predicted decision function
"""
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self).decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with l1_ratio=1.0 (no L2 penalty).
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0
alpha = 0 is equivalent to an ordinary least square, solved
by the LinearRegression object in the scikit. For numerical
reasons, using alpha = 0 is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If True, the regressors X are normalized
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always True to preserve sparsity.
max_iter: int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than 'tol', the optimization code checks the
dual gap for optimality and continues until it is smaller
than tol.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to True, forces the coefficients to be positive.
Attributes
----------
`coef_` : array, shape = (n_features,)
parameter vector (w in the cost function formula)
`sparse_coef_` : scipy.sparse matrix, shape = (n_features, 1)
`sparse_coef_` is a readonly property derived from `coef_`
`intercept_` : float
independent term in decision function.
`dual_gap_` : float
the current fit is guaranteed to be epsilon-suboptimal with
epsilon := `dual_gap_`
`eps_` : float
`eps_` is used to check if the fit converged to the requested
`tol`
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute='auto', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a fortran contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute='auto', copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive)
###############################################################################
# Classes to store linear models along a regularization path
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, fit_intercept=True,
normalize=False, copy_X=True, verbose=False,
**params):
"""Compute Lasso path with coordinate descent
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
X : ndarray, shape = (n_samples, n_features)
Training data. Pass directly as fortran contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape = (n_samples,)
Target values
eps : float, optional
Length of the path. eps=1e-3 means that
alpha_min / alpha_max = 1e-3
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
fit_intercept : bool
Fit or not an intercept
normalize : boolean, optional
If True, the regressors X are normalized
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : bool or integer
Amount of verbosity
params : kwargs
keyword arguments passed to the Lasso objects
Returns
-------
models : a list of models along the regularization path
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a fortran contiguous numpy array.
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, verbose=verbose, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, fit_intercept=True,
normalize=False, copy_X=True, verbose=False, rho=None,
**params):
"""Compute Elastic-Net path with coordinate descent
The Elastic Net optimization function is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
Parameters
----------
X : ndarray, shape = (n_samples, n_features)
Training data. Pass directly as fortran contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape = (n_samples,)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). l1_ratio=1 corresponds to the Lasso
eps : float
Length of the path. eps=1e-3 means that
alpha_min / alpha_max = 1e-3
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
fit_intercept : bool
Fit or not an intercept
normalize : boolean, optional
If True, the regressors X are normalized
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : bool or integer
Amount of verbosity
params : kwargs
keyword arguments passed to the Lasso objects
Returns
-------
models : a list of models along the regularization path
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
ElasticNet
ElasticNetCV
"""
if rho is not None:
l1_ratio = rho
warnings.warn("rho was renamed to l1_ratio and will be removed "
"in 0.15", DeprecationWarning)
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=copy_X and fit_intercept)
# From now on X can be touched inplace
if not sparse.isspmatrix(X):
X, y, X_mean, y_mean, X_std = center_data(X, y, fit_intercept,
normalize, copy=False)
# XXX : in the sparse case the data will be centered
# at each fit...
n_samples, n_features = X.shape
if (hasattr(precompute, '__array__')
and not np.allclose(X_mean, np.zeros(n_features))
and not np.allclose(X_std, np.ones(n_features))):
# recompute Gram
precompute = 'auto'
Xy = None
if precompute or ((precompute == 'auto') and (n_samples > n_features)):
if sparse.isspmatrix(X):
warnings.warn("precompute is ignored for sparse data")
precompute = False
else:
precompute = np.dot(X.T, X)
if Xy is None:
Xy = safe_sparse_dot(X.T, y, dense_output=True)
n_samples = X.shape[0]
if alphas is None:
alpha_max = np.abs(Xy).max() / (n_samples * l1_ratio)
alphas = np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
coef_ = None # init coef_
models = []
n_alphas = len(alphas)
for i, alpha in enumerate(alphas):
model = ElasticNet(
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept if sparse.isspmatrix(X) else False,
precompute=precompute)
model.set_params(**params)
model.fit(X, y, coef_init=coef_, Xy=Xy)
if fit_intercept and not sparse.isspmatrix(X):
model.fit_intercept = True
model._set_intercept(X_mean, y_mean, X_std)
if verbose:
if verbose > 2:
print model
elif verbose > 1:
print 'Path: %03i out of %03i' % (i, n_alphas)
else:
sys.stderr.write('.')
coef_ = model.coef_.copy()
models.append(model)
return models
def _path_residuals(X, y, train, test, path, path_params, l1_ratio=1):
this_mses = list()
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
models_train = path(X[train], y[train], **path_params)
this_mses = np.empty(len(models_train))
for i_model, model in enumerate(models_train):
y_ = model.predict(X[test])
this_mses[i_model] = ((y_ - y[test]) ** 2).mean()
return this_mses, l1_ratio
class LinearModelCV(LinearModel):
"""Base class for iterative model fitting along a regularization path"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data. Pass directly as fortran contiguous data to avoid
unnecessary memory duplication
y : narray, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
# From now on X can be touched inplace
y = np.asarray(y, dtype=np.float64)
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
# Start to compute path on full data
# XXX: is this really useful: we are fitting models that we won't
# use later
models = self.path(X, y, **path_params)
# Update the alphas list
alphas = [model.alpha for model in models]
n_alphas = len(alphas)
path_params.update({'alphas': alphas, 'n_alphas': n_alphas})
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
all_mse_paths = list()
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
for l1_ratio, mse_alphas in itertools.groupby(
Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_path_residuals)(
X, y, train, test, self.path, path_params,
l1_ratio=l1_ratio)
for l1_ratio in l1_ratios for train, test in folds
), operator.itemgetter(1)):
mse_alphas = [m[0] for m in mse_alphas]
mse_alphas = np.array(mse_alphas)
mse = np.mean(mse_alphas, axis=0)
i_best_alpha = np.argmin(mse)
this_best_mse = mse[i_best_alpha]
all_mse_paths.append(mse_alphas.T)
if this_best_mse < best_mse:
model = models[i_best_alpha]
best_l1_ratio = l1_ratio
if hasattr(model, 'l1_ratio'):
if model.l1_ratio != best_l1_ratio:
# Need to refit the model
model.l1_ratio = best_l1_ratio
model.fit(X, y)
self.l1_ratio_ = model.l1_ratio
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.alpha_ = model.alpha
self.alphas_ = np.asarray(alphas)
self.coef_path_ = np.asarray([model.coef_ for model in models])
self.mse_path_ = np.squeeze(all_mse_paths)
return self
@property
def rho_(self):
warnings.warn("rho was renamed to l1_ratio and will be removed "
"in 0.15", DeprecationWarning)
return self.l1_ratio_
@property
def alpha(self):
warnings.warn("Use alpha_. Using alpha is deprecated "
"since version 0.12, and backward compatibility "
"won't be maintained from version 0.14 onward. ",
DeprecationWarning, stacklevel=1)
return self.alpha_
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
eps : float, optional
Length of the path. eps=1e-3 means that
alpha_min / alpha_max = 1e-3.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter: int, optional
The maximum number of iterations
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than 'tol', the optimization code checks the
dual gap for optimality and continues until it is smaller
than tol.
cv : integer or crossvalidation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific crossvalidation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
verbose : bool or integer
amount of verbosity
Attributes
----------
`alpha_`: float
The amount of penalization choosen by cross validation
`coef_` : array, shape = (n_features,)
parameter vector (w in the cost function formula)
`intercept_` : float
independent term in decision function.
`mse_path_`: array, shape = (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
`alphas_`: numpy array
The grid of alphas used for fitting
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a fortran contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
n_jobs = 1
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For l1_ratio = 0
the penalty is an L2 penalty. For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in [.1, .5, .7,
.9, .95, .99, 1]
eps : float, optional
Length of the path. eps=1e-3 means that
alpha_min / alpha_max = 1e-3.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than 'tol', the optimization code checks the
dual gap for optimality and continues until it is smaller
than tol.
cv : integer or crossvalidation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific crossvalidation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
verbose : bool or integer
amount of verbosity
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If '-1', use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
Attributes
----------
`alpha_` : float
The amount of penalization choosen by cross validation
`l1_ratio_` : float
The compromise between l1 and l2 penalization choosen by
cross validation
`coef_` : array, shape = (n_features,)
Parameter vector (w in the cost function formula),
`intercept_` : float
Independent term in the decision function.
`mse_path_` : array, shape = (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a fortran contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, rho=None):
self.l1_ratio = l1_ratio
if rho is not None:
self.l1_ratio = rho
warnings.warn("rho was renamed to l1_ratio and will be removed "
"in 0.15", DeprecationWarning)
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If True, the regressors X are normalized
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than 'tol', the optimization code checks the
dual gap for optimality and continues until it is smaller
than tol.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
`intercept_` : array, shape = (n_tasks,)
Independent term in decision function.
`coef_` : array, shape = (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), `coef_` is then a 1D array
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, rho=None, tol=0.0001,
warm_start=False)
>>> print clf.coef_
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print clf.intercept_
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a fortran contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, rho=None):
self.l1_ratio = l1_ratio
if rho is not None:
self.l1_ratio = rho
warnings.warn("rho was renamed to l1_ratio and will be removed "
"in 0.15", DeprecationWarning)
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
def fit(self, X, y, Xy=None, coef_init=None):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X: ndarray, shape = (n_samples, n_features)
Data
y: ndarray, shape = (n_samples, n_tasks)
Target
coef_init: ndarray of shape n_features
The initial coeffients to warm-start the optimization
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a fortran contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = array2d(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
squeeze_me = False
if y.ndim == 1:
squeeze_me = True
y = y[:, np.newaxis]
n_samples, n_features = X.shape
_, n_tasks = y.shape
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if coef_init is None:
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
else:
self.coef_ = coef_init
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
self.coef_, self.dual_gap_, self.eps_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol)
self._set_intercept(X_mean, y_mean, X_std)
# Make sure that the coef_ have the same shape as the given 'y',
# to predict with the same shape
if squeeze_me:
self.coef_ = self.coef_.squeeze()
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If True, the regressors X are normalized
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than 'tol', the optimization code checks the
dual gap for optimality and continues until it is smaller
than tol.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
`coef_` : array, shape = (n_tasks, n_features)
parameter vector (W in the cost function formula)
`intercept_` : array, shape = (n_tasks,)
independent term in decision function.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, tol=0.0001, warm_start=False)
>>> print clf.coef_
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print clf.intercept_
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a fortran contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
| mrshu/scikit-learn | sklearn/linear_model/coordinate_descent.py | Python | bsd-3-clause | 44,980 |
import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
#get_ipython().magic('matplotlib inline')
lValues = [ k for k in range(2,24, 4)];
# lValues = lValues + [ k for k in range(24, 128, 8)];
for cyc in lValues:
print("TEST_CYCLES_START", cyc)
b1 = tsds.generate_random_TS(N = 3200 , FREQ = 'D', seed = 0, trendtype = "constant", cycle_length = cyc, transform = "None", sigma = 0.1, exog_count = 0, ar_order=0);
df = b1.mPastData
# df.tail(10)
# df[:-10].tail()
# df[:-10:-1]
# df.describe()
lEngine = autof.cForecastEngine()
lEngine.mOptions.mCycleLengths = [ k for k in range(2,128) ];
lEngine
H = cyc * 2;
lEngine.train(df , b1.mTimeVar , b1.mSignalVar, H);
lEngine.getModelInfo();
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
dfapp_in = df.copy();
dfapp_in.tail()
# H = 12
dfapp_out = lEngine.forecast(dfapp_in, H);
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[b1.mTimeVar , b1.mSignalVar, b1.mSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H).values);
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(H).to_json(date_format='iso'))
print("</Forecast>\n\n")
print("TEST_CYCLES_END", cyc)
| antoinecarme/pyaf | tests/perf/test_cycles_full_long.py | Python | bsd-3-clause | 1,473 |
#coding:utf-8
from PyQt4 import QtGui
#from PyQt4 import QtCore
from libblah.consts import ABOUT_MSG, ABOUT_TITLE
from ui.ui_verification_dialog import Ui_VerificationDialog
def popup_confirm(parent, msg = None):
reply = QtGui.QMessageBox.question(parent, u"提示",
msg,
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
return True
else:
return False
def popup_warning(parent, msg = None):
QtGui.QMessageBox.warning(parent, u"警告", msg, QtGui.QMessageBox.Close)
def popup_error(parent, msg = None):
QtGui.QMessageBox.critical(parent, u"错误", msg, QtGui.QMessageBox.Close)
def popup_about(parent):
QtGui.QMessageBox.about(parent, ABOUT_TITLE, ABOUT_MSG)
class GetInputDialog(QtGui.QDialog, Ui_VerificationDialog):
def __init__(self, body = "Input: "):
QtGui.QDialog.__init__(self)
self.setupUi(self)
self.label.setText(body)
class GetVerificationDialog(GetInputDialog):
def __init__(self, body, path):
GetInputDialog.__init__(self, body)
pix = QtGui.QPixmap(path)
lab = QtGui.QLabel("verification", self)
lab.setPixmap(pix)
self.verticalLayout.addWidget(lab)
@staticmethod
def get_input(body = "Recognise and Input 4 characters: ", path = None):
dlg = GetVerificationDialog(body, path)
dlg.show()
result = dlg.exec_()
if result == QtGui.QDialog.Accepted:
btn_val = True
else:
btn_val = False
return (btn_val, str(dlg.lineEdit.text()))
| williamyangcn/iBlah_py | libiblah/popup_dlgs.py | Python | bsd-3-clause | 1,614 |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
'''
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import json
from os.path import abspath, basename, dirname, exists, join, normpath
from typing import Dict, List, NamedTuple, Optional
from warnings import warn
# Bokeh imports
from ..core.templates import CSS_RESOURCES, JS_RESOURCES
from ..document.document import Document
from ..model import Model
from ..resources import BaseResources, Resources
from ..settings import settings
from ..util.compiler import bundle_models
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Bundle',
'bundle_for_objs_and_resources',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class ScriptRef(object):
def __init__(self, url, type="text/javascript"):
self.url = url
self.type = type
class Script(object):
def __init__(self, content, type="text/javascript"):
self.content = content
self.type = type
class StyleRef(object):
def __init__(self, url):
self.url = url
class Style(object):
def __init__(self, content):
self.content = content
class Bundle(object):
@classmethod
def of(cls, js_files, js_raw, css_files, css_raw, hashes):
return cls(js_files=js_files, js_raw=js_raw, css_files=css_files, css_raw=css_raw, hashes=hashes)
def __init__(self, **kwargs):
self.js_files = kwargs.get("js_files", [])
self.js_raw = kwargs.get("js_raw", [])
self.css_files = kwargs.get("css_files", [])
self.css_raw = kwargs.get("css_raw", [])
self.hashes = kwargs.get("hashes", {})
def __iter__(self):
yield self._render_js()
yield self._render_css()
def _render_js(self):
return JS_RESOURCES.render(js_files=self.js_files, js_raw=self.js_raw, hashes=self.hashes)
def _render_css(self):
return CSS_RESOURCES.render(css_files=self.css_files, css_raw=self.css_raw)
def scripts(self, tag=True):
if tag:
return JS_RESOURCES.render(js_raw=self.js_raw, js_files=[])
else:
return "\n".join(self.js_raw)
@property
def js_urls(self):
return self.js_files
@property
def css_urls(self):
return self.css_files
def add(self, artifact):
if isinstance(artifact, ScriptRef):
self.js_files.append(artifact.url)
elif isinstance(artifact, Script):
self.js_raw.append(artifact.content)
elif isinstance(artifact, StyleRef):
self.css_files.append(artifact.url)
elif isinstance(artifact, Style):
self.css_raw.append(artifact.content)
def bundle_for_objs_and_resources(objs, resources):
''' Generate rendered CSS and JS resources suitable for the given
collection of Bokeh objects
Args:
objs (seq[Model or Document]) :
resources (BaseResources or tuple[BaseResources])
Returns:
Bundle
'''
# Any env vars will overide a local default passed in
resources = settings.resources(default=resources)
if isinstance(resources, str):
resources = Resources(mode=resources)
if resources is None or isinstance(resources, BaseResources):
js_resources = css_resources = resources
elif isinstance(resources, tuple) and len(resources) == 2 and all(r is None or isinstance(r, BaseResources) for r in resources):
js_resources, css_resources = resources
if js_resources and not css_resources:
warn('No Bokeh CSS Resources provided to template. If required you will need to provide them manually.')
if css_resources and not js_resources:
warn('No Bokeh JS Resources provided to template. If required you will need to provide them manually.')
else:
raise ValueError("expected Resources or a pair of optional Resources, got %r" % resources)
from copy import deepcopy
# XXX: force all components on server and in notebook, because we don't know in advance what will be used
use_widgets = _use_widgets(objs) if objs else True
use_tables = _use_tables(objs) if objs else True
use_gl = _use_gl(objs) if objs else True
js_files = []
js_raw = []
css_files = []
css_raw = []
if js_resources:
js_resources = deepcopy(js_resources)
if not use_widgets and "bokeh-widgets" in js_resources.js_components:
js_resources.js_components.remove("bokeh-widgets")
if not use_tables and "bokeh-tables" in js_resources.js_components:
js_resources.js_components.remove("bokeh-tables")
if not use_gl and "bokeh-gl" in js_resources.js_components:
js_resources.js_components.remove("bokeh-gl")
js_files.extend(js_resources.js_files)
js_raw.extend(js_resources.js_raw)
if css_resources:
css_resources = deepcopy(css_resources)
css_files.extend(css_resources.css_files)
css_raw.extend(css_resources.css_raw)
if js_resources:
extensions = _bundle_extensions(objs, js_resources)
mode = js_resources.mode if resources is not None else "inline"
if mode == "inline":
js_raw.extend([ Resources._inline(bundle.artifact_path) for bundle in extensions ])
elif mode == "server":
js_files.extend([ bundle.server_url for bundle in extensions ])
elif mode == "cdn":
js_files.extend([ bundle.cdn_url for bundle in extensions if bundle.cdn_url is not None ])
else:
js_files.extend([ bundle.artifact_path for bundle in extensions ])
models = [ obj.__class__ for obj in _all_objs(objs) ] if objs else None
ext = bundle_models(models)
if ext is not None:
js_raw.append(ext)
return Bundle.of(js_files, js_raw, css_files, css_raw, js_resources.hashes if js_resources else {})
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _query_extensions(objs, query):
names = set()
for obj in _all_objs(objs):
if hasattr(obj, "__implementation__"):
continue
name = obj.__view_module__.split(".")[0]
if name == "bokeh":
continue
if name in names:
continue
names.add(name)
for model in Model.model_class_reverse_map.values():
if model.__module__.startswith(name):
if query(model):
return True
return False
_default_cdn_host = "https://unpkg.com"
class ExtensionEmbed(NamedTuple):
artifact_path: str
server_url: str
cdn_url: Optional[str] = None
extension_dirs: Dict[str, str] = {} # name -> path
def _bundle_extensions(objs, resources: Resources) -> List[ExtensionEmbed]:
names = set()
bundles = []
extensions = [".min.js", ".js"] if resources.minified else [".js"]
for obj in _all_objs(objs) if objs is not None else Model.model_class_reverse_map.values():
if hasattr(obj, "__implementation__"):
continue
name = obj.__view_module__.split(".")[0]
if name == "bokeh":
continue
if name in names:
continue
names.add(name)
module = __import__(name)
this_file = abspath(module.__file__)
base_dir = dirname(this_file)
dist_dir = join(base_dir, "dist")
ext_path = join(base_dir, "bokeh.ext.json")
if not exists(ext_path):
continue
server_prefix = f"{resources.root_url}static/extensions"
package_path = join(base_dir, "package.json")
pkg: Optional[str] = None
if exists(package_path):
with open(package_path) as io:
try:
pkg = json.load(io)
except json.decoder.JSONDecodeError:
pass
artifact_path: str
server_url: str
cdn_url: Optional[str] = None
if pkg is not None:
pkg_name = pkg["name"]
pkg_version = pkg.get("version", "latest")
pkg_main = pkg.get("module", pkg.get("main", None))
if pkg_main is not None:
cdn_url = f"{_default_cdn_host}/{pkg_name}@^{pkg_version}/{pkg_main}"
else:
pkg_main = join(dist_dir, f"{name}.js")
artifact_path = join(base_dir, normpath(pkg_main))
artifacts_dir = dirname(artifact_path)
artifact_name = basename(artifact_path)
server_path = f"{name}/{artifact_name}"
else:
for ext in extensions:
artifact_path = join(dist_dir, f"{name}{ext}")
artifacts_dir = dist_dir
server_path = f"{name}/{name}{ext}"
if exists(artifact_path):
break
else:
raise ValueError(f"can't resolve artifact path for '{name}' extension")
extension_dirs[name] = artifacts_dir
server_url = f"{server_prefix}/{server_path}"
embed = ExtensionEmbed(artifact_path, server_url, cdn_url)
bundles.append(embed)
return bundles
def _all_objs(objs):
all_objs = set()
for obj in objs:
if isinstance(obj, Document):
for root in obj.roots:
all_objs |= root.references()
else:
all_objs |= obj.references()
return all_objs
def _any(objs, query):
''' Whether any of a collection of objects satisfies a given query predicate
Args:
objs (seq[Model or Document]) :
query (callable)
Returns:
True, if ``query(obj)`` is True for some object in ``objs``, else False
'''
for obj in objs:
if isinstance(obj, Document):
if _any(obj.roots, query):
return True
else:
if any(query(ref) for ref in obj.references()):
return True
return False
def _use_gl(objs):
''' Whether a collection of Bokeh objects contains a plot requesting WebGL
Args:
objs (seq[Model or Document]) :
Returns:
bool
'''
from ..models.plots import Plot
return _any(objs, lambda obj: isinstance(obj, Plot) and obj.output_backend == "webgl")
def _use_tables(objs):
''' Whether a collection of Bokeh objects contains a TableWidget
Args:
objs (seq[Model or Document]) :
Returns:
bool
'''
from ..models.widgets import TableWidget
return _any(objs, lambda obj: isinstance(obj, TableWidget)) or _ext_use_tables(objs)
def _use_widgets(objs):
''' Whether a collection of Bokeh objects contains a any Widget
Args:
objs (seq[Model or Document]) :
Returns:
bool
'''
from ..models.widgets import Widget
return _any(objs, lambda obj: isinstance(obj, Widget)) or _ext_use_widgets(objs)
def _ext_use_tables(objs):
from ..models.widgets import TableWidget
return _query_extensions(objs, lambda cls: issubclass(cls, TableWidget))
def _ext_use_widgets(objs):
from ..models.widgets import Widget
return _query_extensions(objs, lambda cls: issubclass(cls, Widget))
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| ericmjl/bokeh | bokeh/embed/bundle.py | Python | bsd-3-clause | 12,481 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 5, transform = "BoxCox", sigma = 0.0, exog_count = 0, ar_order = 12); | antoinecarme/pyaf | tests/artificial/transf_BoxCox/trend_PolyTrend/cycle_5/ar_12/test_artificial_128_BoxCox_PolyTrend_5_12_0.py | Python | bsd-3-clause | 261 |
from __future__ import absolute_import
import mock
import os
from django.conf import settings
TEST_ROOT = os.path.normpath(
os.path.join(
os.path.dirname(__file__),
os.pardir,
os.pardir,
os.pardir,
os.pardir,
'tests'))
def pytest_configure(config):
# HACK: Only needed for testing!
os.environ.setdefault('_SENTRY_SKIP_CONFIGURATION', '1')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sentry.conf.server')
# override docs which are typically synchronized from an upstream server
# to ensure tests are consistent
os.environ.setdefault(
'INTEGRATION_DOC_FOLDER',
os.path.join(
TEST_ROOT,
'fixtures',
'integration-docs'))
from sentry.utils import integrationdocs
integrationdocs.DOC_FOLDER = os.environ['INTEGRATION_DOC_FOLDER']
if not settings.configured:
# only configure the db if its not already done
test_db = os.environ.get('DB', 'postgres')
if test_db == 'mysql':
settings.DATABASES['default'].update(
{
'ENGINE': 'django.db.backends.mysql',
'NAME': 'sentry',
'USER': 'root',
'HOST': '127.0.0.1',
}
)
# mysql requires running full migration all the time
elif test_db == 'postgres':
settings.DATABASES['default'].update(
{
'ENGINE': 'sentry.db.postgres',
'USER': 'postgres',
'NAME': 'sentry',
'HOST': '127.0.0.1',
}
)
# postgres requires running full migration all the time
# since it has to install stored functions which come from
# an actual migration.
elif test_db == 'sqlite':
settings.DATABASES['default'].update(
{
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
)
else:
raise RuntimeError('oops, wrong database: %r' % test_db)
settings.TEMPLATE_DEBUG = True
# Disable static compiling in tests
settings.STATIC_BUNDLES = {}
# override a few things with our test specifics
settings.INSTALLED_APPS = tuple(settings.INSTALLED_APPS) + ('tests', )
# Need a predictable key for tests that involve checking signatures
settings.SENTRY_PUBLIC = False
if not settings.SENTRY_CACHE:
settings.SENTRY_CACHE = 'sentry.cache.django.DjangoCache'
settings.SENTRY_CACHE_OPTIONS = {}
# This speeds up the tests considerably, pbkdf2 is by design, slow.
settings.PASSWORD_HASHERS = [
'django.contrib.auth.hashers.MD5PasswordHasher',
]
settings.AUTH_PASSWORD_VALIDATORS = []
# Replace real sudo middleware with our mock sudo middleware
# to assert that the user is always in sudo mode
middleware = list(settings.MIDDLEWARE_CLASSES)
sudo = middleware.index('sentry.middleware.sudo.SudoMiddleware')
middleware[sudo] = 'sentry.testutils.middleware.SudoMiddleware'
settings.MIDDLEWARE_CLASSES = tuple(middleware)
settings.SENTRY_OPTIONS['cloudflare.secret-key'] = 'cloudflare-secret-key'
# enable draft features
settings.SENTRY_OPTIONS['mail.enable-replies'] = True
settings.SENTRY_ALLOW_ORIGIN = '*'
settings.SENTRY_TSDB = 'sentry.tsdb.inmemory.InMemoryTSDB'
settings.SENTRY_TSDB_OPTIONS = {}
if settings.SENTRY_NEWSLETTER == 'sentry.newsletter.base.Newsletter':
settings.SENTRY_NEWSLETTER = 'sentry.newsletter.dummy.DummyNewsletter'
settings.SENTRY_NEWSLETTER_OPTIONS = {}
settings.BROKER_BACKEND = 'memory'
settings.BROKER_URL = None
settings.CELERY_ALWAYS_EAGER = False
settings.CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
settings.DEBUG_VIEWS = True
settings.SENTRY_ENCRYPTION_SCHEMES = ()
settings.DISABLE_RAVEN = True
settings.CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
if not hasattr(settings, 'SENTRY_OPTIONS'):
settings.SENTRY_OPTIONS = {}
settings.SENTRY_OPTIONS.update(
{
'redis.clusters': {
'default': {
'hosts': {
0: {
'db': 9,
},
},
},
},
'mail.backend': 'django.core.mail.backends.locmem.EmailBackend',
'system.url-prefix': 'http://testserver',
'slack.client-id': 'slack-client-id',
'slack.client-secret': 'slack-client-secret',
'slack.verification-token': 'slack-verification-token',
'github-app.name': 'sentry-test-app',
'github-app.client-id': 'github-client-id',
'github-app.client-secret': 'github-client-secret',
'vsts.client-id': 'vsts-client-id',
'vsts.client-secret': 'vsts-client-secret',
}
)
# django mail uses socket.getfqdn which doesn't play nice if our
# networking isn't stable
patcher = mock.patch('socket.getfqdn', return_value='localhost')
patcher.start()
if not settings.SOUTH_TESTS_MIGRATE:
settings.INSTALLED_APPS = tuple(i for i in settings.INSTALLED_APPS if i != 'south')
from sentry.runner.initializer import (
bootstrap_options, configure_structlog, initialize_receivers, fix_south,
bind_cache_to_option_store, setup_services
)
bootstrap_options(settings)
configure_structlog()
fix_south(settings)
bind_cache_to_option_store()
initialize_receivers()
setup_services()
register_extensions()
from sentry.utils.redis import clusters
with clusters.get('default').all() as client:
client.flushdb()
# force celery registration
from sentry.celery import app # NOQA
# disable DISALLOWED_IPS
from sentry import http
http.DISALLOWED_IPS = set()
def register_extensions():
from sentry.plugins import plugins
from sentry.plugins.utils import TestIssuePlugin2
plugins.register(TestIssuePlugin2)
from sentry import integrations
from sentry.integrations.bitbucket import BitbucketIntegrationProvider
from sentry.integrations.example import ExampleIntegrationProvider, AliasedIntegrationProvider
from sentry.integrations.github import GitHubIntegrationProvider
from sentry.integrations.github_enterprise import GitHubEnterpriseIntegrationProvider
from sentry.integrations.jira import JiraIntegrationProvider
from sentry.integrations.slack import SlackIntegrationProvider
from sentry.integrations.vsts import VstsIntegrationProvider
from sentry.integrations.vsts_extension import VstsExtensionIntegrationProvider
integrations.register(BitbucketIntegrationProvider)
integrations.register(ExampleIntegrationProvider)
integrations.register(AliasedIntegrationProvider)
integrations.register(GitHubIntegrationProvider)
integrations.register(GitHubEnterpriseIntegrationProvider)
integrations.register(JiraIntegrationProvider)
integrations.register(SlackIntegrationProvider)
integrations.register(VstsIntegrationProvider)
integrations.register(VstsExtensionIntegrationProvider)
from sentry.plugins import bindings
from sentry.plugins.providers.dummy import DummyRepositoryProvider
bindings.add('repository.provider', DummyRepositoryProvider, id='dummy')
def pytest_runtest_teardown(item):
from sentry import tsdb
# TODO(dcramer): this only works if this is the correct tsdb backend
tsdb.flush()
# XXX(dcramer): only works with DummyNewsletter
from sentry import newsletter
if hasattr(newsletter.backend, 'clear'):
newsletter.backend.clear()
from sentry.utils.redis import clusters
with clusters.get('default').all() as client:
client.flushdb()
from celery.task.control import discard_all
discard_all()
from sentry.models import OrganizationOption, ProjectOption, UserOption
for model in (OrganizationOption, ProjectOption, UserOption):
model.objects.clear_local_cache()
| ifduyue/sentry | src/sentry/utils/pytest/sentry.py | Python | bsd-3-clause | 8,317 |
from __future__ import unicode_literals
import os
import json
from functools import wraps
from datetime import datetime, date
from contextlib import contextmanager
from threading import RLock, Condition, current_thread
from collections import Sized, Iterable, Mapping, defaultdict
def is_listy(x):
"""
returns a boolean indicating whether the passed object is "listy",
which we define as a sized iterable which is not a map or string
"""
return isinstance(x, Sized) and isinstance(x, Iterable) and not isinstance(x, (Mapping, type(b''), type('')))
def listify(x):
"""
returns a list version of x if x is a non-string iterable, otherwise
returns a list with x as its only element
"""
return list(x) if is_listy(x) else [x]
class serializer(json.JSONEncoder):
"""
JSONEncoder subclass for plugins to register serializers for types.
Plugins should not need to instantiate this class directly, but
they are expected to call serializer.register() for new data types.
"""
_registry = {}
_datetime_format = '%Y-%m-%d %H:%M:%S.%f'
def default(self, o):
if type(o) in self._registry:
preprocessor = self._registry[type(o)]
else:
for klass, preprocessor in self._registry.items():
if isinstance(o, klass):
break
else:
raise json.JSONEncoder.default(self, o)
return preprocessor(o)
@classmethod
def register(cls, type, preprocessor):
"""
Associates a type with a preprocessor so that RPC handlers may
pass non-builtin JSON types. For example, Sideboard already
does the equivalent of
>>> serializer.register(datetime, lambda dt: dt.strftime('%Y-%m-%d %H:%M:%S.%f'))
This method raises an exception if you try to register a
preprocessor for a type which already has one.
:param type: the type you are registering
:param preprocessor: function which takes one argument which is
the value to serialize and returns a json-
serializable value
"""
assert type not in cls._registry, '{} already has a preprocessor defined'.format(type)
cls._registry[type] = preprocessor
serializer.register(date, lambda d: d.strftime('%Y-%m-%d'))
serializer.register(datetime, lambda dt: dt.strftime(serializer._datetime_format))
serializer.register(set, lambda s: sorted(list(s)))
def cached_property(func):
"""decorator for making readonly, memoized properties"""
pname = "_" + func.__name__
@property
@wraps(func)
def caching(self, *args, **kwargs):
if not hasattr(self, pname):
setattr(self, pname, func(self, *args, **kwargs))
return getattr(self, pname)
return caching
def request_cached_property(func):
"""
Sometimes we want a property to be cached for the duration of a request,
with concurrent requests each having their own cached version. This does
that via the threadlocal class, such that each HTTP request CherryPy serves
and each RPC request served via websocket or JSON-RPC will have its own
cached value, which is cleared and then re-generated on later requests.
"""
from sideboard.lib import threadlocal
name = func.__module__ + '.' + func.__name__
@property
@wraps(func)
def with_caching(self):
val = threadlocal.get(name)
if val is None:
val = func(self)
threadlocal.set(name, val)
return val
return with_caching
class _class_property(property):
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
def class_property(cls):
"""
For whatever reason, the @property decorator isn't smart enough to recognize
classmethods and behave differently on them than on instance methods. This
property may be used to create a class-level property, useful for singletons
and other one-per-class properties. Class properties are read-only.
"""
return _class_property(classmethod(cls))
def entry_point(func):
"""
Decorator used to define entry points for command-line scripts. Sideboard
ships with a "sep" (Sideboard Entry Point) command line script which can be
used to call into any plugin-defined entry point after deleting sys.argv[0]
so that the entry point name will be the first argument. For example, if a
plugin had this entry point:
@entry_point
def some_action():
print(sys.argv)
Then someone in a shell ran the command:
sep some_action foo bar
It would print:
['some_action', 'foo', 'bar']
:param func: a function which takes no arguments; its name will be the name
of the command, and an exception is raised if a function with
the same name has already been registered as an entry point
"""
assert func.__name__ not in _entry_points, 'An entry point named {} has already been implemented'.format(func.__name__)
_entry_points[func.__name__] = func
return func
_entry_points = {}
class RWGuard(object):
"""
This utility class provides the ability to perform read/write locking, such
that we can have any number of readers OR a single writer. We give priority
to writers, who will get the lock before any readers.
These locks are reentrant, meaning that the same thread can acquire a read
or write lock multiple times, and will then need to release the lock the
same number of times it was acquired. A thread with an acquired read lock
cannot acquire a write lock, or vice versa. Locks can only be released by
the threads which acquired them.
This class is named RWGuard rather than RWLock because it is not itself a
lock, e.g. it doesn't have an acquire method, it cannot be directly used as
a context manager, etc.
"""
def __init__(self):
self.lock = RLock()
self.waiting_writer_count = 0
self.acquired_writer = defaultdict(int)
self.acquired_readers = defaultdict(int)
self.ready_for_reads = Condition(self.lock)
self.ready_for_writes = Condition(self.lock)
@property
@contextmanager
def read_locked(self):
"""
Context manager which acquires a read lock on entrance and releases it
on exit. Any number of threads may acquire a read lock.
"""
self.acquire_for_read()
try:
yield
finally:
self.release()
@property
@contextmanager
def write_locked(self):
"""
Context manager which acquires a write lock on entrance and releases it
on exit. Only one thread may acquire a write lock at a time.
"""
self.acquire_for_write()
try:
yield
finally:
self.release()
def acquire_for_read(self):
"""
NOTE: consumers are encouraged to use the "read_locked" context manager
instead of this method where possible.
This method acquires the read lock for the current thread, blocking if
necessary until there are no other threads with the write lock acquired
or waiting for the write lock to be available.
"""
tid = current_thread().ident
assert tid not in self.acquired_writer, 'Threads which have already acquired a write lock may not lock for reading'
with self.lock:
while self.acquired_writer or (self.waiting_writer_count and tid not in self.acquired_readers):
self.ready_for_reads.wait()
self.acquired_readers[tid] += 1
def acquire_for_write(self):
"""
NOTE: consumers are encouraged to use the "write_locked" context manager
instead of this method where possible.
This method acquires the write lock for the current thread, blocking if
necessary until no other threads have the write lock acquired and no
thread has the read lock acquired.
"""
tid = current_thread().ident
assert tid not in self.acquired_readers, 'Threads which have already acquired a read lock may not lock for writing'
with self.lock:
while self.acquired_readers or (self.acquired_writer and tid not in self.acquired_writer):
self.waiting_writer_count += 1
self.ready_for_writes.wait()
self.waiting_writer_count -= 1
self.acquired_writer[tid] += 1
def release(self):
"""
Release the read or write lock held by the current thread. Since these
locks are reentrant, this method must be called once for each time the
lock was acquired. This method raises an exception if called by a
thread with no read or write lock acquired.
"""
tid = current_thread().ident
assert tid in self.acquired_readers or tid in self.acquired_writer, 'this thread does not hold a read or write lock'
with self.lock:
for counts in [self.acquired_readers, self.acquired_writer]:
counts[tid] -= 1
if counts[tid] <= 0:
del counts[tid]
wake_readers = not self.waiting_writer_count
wake_writers = self.waiting_writer_count and not self.acquired_readers
if wake_writers:
with self.ready_for_writes:
self.ready_for_writes.notify()
elif wake_readers:
with self.ready_for_reads:
self.ready_for_reads.notify_all()
| RobRuana/sideboard | sideboard/lib/_utils.py | Python | bsd-3-clause | 9,665 |
# -*- coding: utf-8 -*-
__author__ = 'Michael Ingrisch'
__email__ = '[email protected]'
__version__ = '0.1.0' | michimichi/compartmentmodels | compartmentmodels/__init__.py | Python | bsd-3-clause | 119 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 5, transform = "Anscombe", sigma = 0.0, exog_count = 20, ar_order = 12); | antoinecarme/pyaf | tests/artificial/transf_Anscombe/trend_PolyTrend/cycle_5/ar_12/test_artificial_1024_Anscombe_PolyTrend_5_12_20.py | Python | bsd-3-clause | 265 |
"""
Module parse to/from Excel
"""
# ---------------------------------------------------------------------
# ExcelFile class
import abc
from datetime import date, datetime, time, timedelta
from distutils.version import LooseVersion
from io import UnsupportedOperation
import os
from textwrap import fill
import warnings
import numpy as np
import pandas._libs.json as json
import pandas.compat as compat
from pandas.compat import (
OrderedDict, add_metaclass, lrange, map, range, string_types, u, zip)
from pandas.errors import EmptyDataError
from pandas.util._decorators import Appender, deprecate_kwarg
from pandas.core.dtypes.common import (
is_bool, is_float, is_integer, is_list_like)
from pandas.core import config
from pandas.core.frame import DataFrame
from pandas.io.common import (
_NA_VALUES, _is_url, _stringify_path, _urlopen, _validate_header_arg,
get_filepath_or_buffer)
from pandas.io.formats.printing import pprint_thing
from pandas.io.parsers import TextParser
__all__ = ["read_excel", "ExcelWriter", "ExcelFile"]
_writer_extensions = ["xlsx", "xls", "xlsm"]
_writers = {}
_read_excel_doc = """
Read an Excel file into a pandas DataFrame.
Support both `xls` and `xlsx` file extensions from a local filesystem or URL.
Support an option to read a single sheet or a list of sheets.
Parameters
----------
io : str, file descriptor, pathlib.Path, ExcelFile or xlrd.Book
The string could be a URL. Valid URL schemes include http, ftp, s3,
gcs, and file. For file URLs, a host is expected. For instance, a local
file could be /path/to/workbook.xlsx.
sheet_name : str, int, list, or None, default 0
Strings are used for sheet names. Integers are used in zero-indexed
sheet positions. Lists of strings/integers are used to request
multiple sheets. Specify None to get all sheets.
Available cases:
* Defaults to ``0``: 1st sheet as a `DataFrame`
* ``1``: 2nd sheet as a `DataFrame`
* ``"Sheet1"``: Load sheet with name "Sheet1"
* ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5"
as a dict of `DataFrame`
* None: All sheets.
header : int, list of int, default 0
Row (0-indexed) to use for the column labels of the parsed
DataFrame. If a list of integers is passed those row positions will
be combined into a ``MultiIndex``. Use None if there is no header.
names : array-like, default None
List of column names to use. If file contains no header row,
then you should explicitly pass header=None.
index_col : int, list of int, default None
Column (0-indexed) to use as the row labels of the DataFrame.
Pass None if there is no such column. If a list is passed,
those columns will be combined into a ``MultiIndex``. If a
subset of data is selected with ``usecols``, index_col
is based on the subset.
parse_cols : int or list, default None
Alias of `usecols`.
.. deprecated:: 0.21.0
Use `usecols` instead.
usecols : int, str, list-like, or callable default None
Return a subset of the columns.
* If None, then parse all columns.
* If int, then indicates last column to be parsed.
.. deprecated:: 0.24.0
Pass in a list of int instead from 0 to `usecols` inclusive.
* If str, then indicates comma separated list of Excel column letters
and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
both sides.
* If list of int, then indicates list of column numbers to be parsed.
* If list of string, then indicates list of column names to be parsed.
.. versionadded:: 0.24.0
* If callable, then evaluate each column name against it and parse the
column if the callable returns ``True``.
.. versionadded:: 0.24.0
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
Use `object` to preserve data as stored in Excel and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
.. versionadded:: 0.20.0
engine : str, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd.
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the Excel cell content, and return the transformed
content.
true_values : list, default None
Values to consider as True.
.. versionadded:: 0.19.0
false_values : list, default None
Values to consider as False.
.. versionadded:: 0.19.0
skiprows : list-like
Rows to skip at the beginning (0-indexed).
nrows : int, default None
Number of rows to parse.
.. versionadded:: 0.23.0
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted
as NaN: '""" + fill("', '".join(sorted(_NA_VALUES)), 70, subsequent_indent=" ") + """'.
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
parse_dates : bool, list-like, or dict, default False
The behavior is as follows:
* bool. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
result 'foo'
If a column or index contains an unparseable date, the entire column or
index will be returned unaltered as an object data type. For non-standard
datetime parsing, use ``pd.to_datetime`` after ``pd.read_csv``
Note: A fast-path exists for iso8601-formatted dates.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Pandas will try to call `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) call `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
thousands : str, default None
Thousands separator for parsing string columns to numeric. Note that
this parameter is only necessary for columns stored as TEXT in Excel,
any numeric columns will automatically be parsed, regardless of display
format.
comment : str, default None
Comments out remainder of line. Pass a character or characters to this
argument to indicate comments in the input file. Any data between the
comment string and the end of the current line is ignored.
skip_footer : int, default 0
Alias of `skipfooter`.
.. deprecated:: 0.23.0
Use `skipfooter` instead.
skipfooter : int, default 0
Rows at the end to skip (0-indexed).
convert_float : bool, default True
Convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric
data will be read in as floats: Excel stores all numbers as floats
internally.
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
DataFrame or dict of DataFrames
DataFrame from the passed in Excel file. See notes in sheet_name
argument for more information on when a dict of DataFrames is returned.
See Also
--------
to_excel : Write DataFrame to an Excel file.
to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_fwf : Read a table of fixed-width formatted lines into DataFrame.
Examples
--------
The file can be read using the file name as string or an open file object:
>>> pd.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP
Name Value
0 string1 1
1 string2 2
2 #Comment 3
>>> pd.read_excel(open('tmp.xlsx', 'rb'),
... sheet_name='Sheet3') # doctest: +SKIP
Unnamed: 0 Name Value
0 0 string1 1
1 1 string2 2
2 2 #Comment 3
Index and header can be specified via the `index_col` and `header` arguments
>>> pd.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP
0 1 2
0 NaN Name Value
1 0.0 string1 1
2 1.0 string2 2
3 2.0 #Comment 3
Column types are inferred but can be explicitly specified
>>> pd.read_excel('tmp.xlsx', index_col=0,
... dtype={'Name': str, 'Value': float}) # doctest: +SKIP
Name Value
0 string1 1.0
1 string2 2.0
2 #Comment 3.0
True, False, and NA values, and thousands separators have defaults,
but can be explicitly specified, too. Supply the values you would like
as strings or lists of strings!
>>> pd.read_excel('tmp.xlsx', index_col=0,
... na_values=['string1', 'string2']) # doctest: +SKIP
Name Value
0 NaN 1
1 NaN 2
2 #Comment 3
Comment lines in the excel input file can be skipped using the `comment` kwarg
>>> pd.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP
Name Value
0 string1 1.0
1 string2 2.0
2 None NaN
"""
def register_writer(klass):
"""Adds engine to the excel writer registry. You must use this method to
integrate with ``to_excel``. Also adds config options for any new
``supported_extensions`` defined on the writer."""
if not compat.callable(klass):
raise ValueError("Can only register callables as engines")
engine_name = klass.engine
_writers[engine_name] = klass
for ext in klass.supported_extensions:
if ext.startswith('.'):
ext = ext[1:]
if ext not in _writer_extensions:
config.register_option("io.excel.{ext}.writer".format(ext=ext),
engine_name, validator=str)
_writer_extensions.append(ext)
def _get_default_writer(ext):
_default_writers = {'xlsx': 'openpyxl', 'xlsm': 'openpyxl', 'xls': 'xlwt'}
try:
import xlsxwriter # noqa
_default_writers['xlsx'] = 'xlsxwriter'
except ImportError:
pass
return _default_writers[ext]
def get_writer(engine_name):
try:
return _writers[engine_name]
except KeyError:
raise ValueError("No Excel writer '{engine}'"
.format(engine=engine_name))
@Appender(_read_excel_doc)
@deprecate_kwarg("parse_cols", "usecols")
@deprecate_kwarg("skip_footer", "skipfooter")
def read_excel(io,
sheet_name=0,
header=0,
names=None,
index_col=None,
parse_cols=None,
usecols=None,
squeeze=False,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
keep_default_na=True,
verbose=False,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skip_footer=0,
skipfooter=0,
convert_float=True,
mangle_dupe_cols=True,
**kwds):
# Can't use _deprecate_kwarg since sheetname=None has a special meaning
if is_integer(sheet_name) and sheet_name == 0 and 'sheetname' in kwds:
warnings.warn("The `sheetname` keyword is deprecated, use "
"`sheet_name` instead", FutureWarning, stacklevel=2)
sheet_name = kwds.pop("sheetname")
if 'sheet' in kwds:
raise TypeError("read_excel() got an unexpected keyword argument "
"`sheet`")
if not isinstance(io, ExcelFile):
io = ExcelFile(io, engine=engine)
return io.parse(
sheet_name=sheet_name,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=squeeze,
dtype=dtype,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
keep_default_na=keep_default_na,
verbose=verbose,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
convert_float=convert_float,
mangle_dupe_cols=mangle_dupe_cols,
**kwds)
@add_metaclass(abc.ABCMeta)
class _BaseExcelReader(object):
@property
@abc.abstractmethod
def sheet_names(self):
pass
@abc.abstractmethod
def get_sheet_by_name(self, name):
pass
@abc.abstractmethod
def get_sheet_by_index(self, index):
pass
@abc.abstractmethod
def get_sheet_data(self, sheet, convert_float):
pass
def parse(self,
sheet_name=0,
header=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
dtype=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
verbose=False,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=True,
mangle_dupe_cols=True,
**kwds):
_validate_header_arg(header)
ret_dict = False
# Keep sheetname to maintain backwards compatibility.
if isinstance(sheet_name, list):
sheets = sheet_name
ret_dict = True
elif sheet_name is None:
sheets = self.sheet_names
ret_dict = True
else:
sheets = [sheet_name]
# handle same-type duplicates.
sheets = list(OrderedDict.fromkeys(sheets).keys())
output = OrderedDict()
for asheetname in sheets:
if verbose:
print("Reading sheet {sheet}".format(sheet=asheetname))
if isinstance(asheetname, compat.string_types):
sheet = self.get_sheet_by_name(asheetname)
else: # assume an integer if not a string
sheet = self.get_sheet_by_index(asheetname)
data = self.get_sheet_data(sheet, convert_float)
usecols = _maybe_convert_usecols(usecols)
if sheet.nrows == 0:
output[asheetname] = DataFrame()
continue
if is_list_like(header) and len(header) == 1:
header = header[0]
# forward fill and pull out names for MultiIndex column
header_names = None
if header is not None and is_list_like(header):
header_names = []
control_row = [True] * len(data[0])
for row in header:
if is_integer(skiprows):
row += skiprows
data[row], control_row = _fill_mi_header(data[row],
control_row)
if index_col is not None:
header_name, _ = _pop_header_name(data[row], index_col)
header_names.append(header_name)
if is_list_like(index_col):
# Forward fill values for MultiIndex index.
if not is_list_like(header):
offset = 1 + header
else:
offset = 1 + max(header)
# Check if we have an empty dataset
# before trying to collect data.
if offset < len(data):
for col in index_col:
last = data[offset][col]
for row in range(offset + 1, len(data)):
if data[row][col] == '' or data[row][col] is None:
data[row][col] = last
else:
last = data[row][col]
has_index_names = is_list_like(header) and len(header) > 1
# GH 12292 : error when read one empty column from excel file
try:
parser = TextParser(data,
names=names,
header=header,
index_col=index_col,
has_index_names=has_index_names,
squeeze=squeeze,
dtype=dtype,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
usecols=usecols,
mangle_dupe_cols=mangle_dupe_cols,
**kwds)
output[asheetname] = parser.read(nrows=nrows)
if not squeeze or isinstance(output[asheetname], DataFrame):
if header_names:
output[asheetname].columns = output[
asheetname].columns.set_names(header_names)
elif compat.PY2:
output[asheetname].columns = _maybe_convert_to_string(
output[asheetname].columns)
except EmptyDataError:
# No Data, return an empty DataFrame
output[asheetname] = DataFrame()
if ret_dict:
return output
else:
return output[asheetname]
class _XlrdReader(_BaseExcelReader):
def __init__(self, filepath_or_buffer):
"""Reader using xlrd engine.
Parameters
----------
filepath_or_buffer : string, path object or Workbook
Object to be parsed.
"""
err_msg = "Install xlrd >= 1.0.0 for Excel support"
try:
import xlrd
except ImportError:
raise ImportError(err_msg)
else:
if xlrd.__VERSION__ < LooseVersion("1.0.0"):
raise ImportError(err_msg +
". Current version " + xlrd.__VERSION__)
# If filepath_or_buffer is a url, want to keep the data as bytes so
# can't pass to get_filepath_or_buffer()
if _is_url(filepath_or_buffer):
filepath_or_buffer = _urlopen(filepath_or_buffer)
elif not isinstance(filepath_or_buffer, (ExcelFile, xlrd.Book)):
filepath_or_buffer, _, _, _ = get_filepath_or_buffer(
filepath_or_buffer)
if isinstance(filepath_or_buffer, xlrd.Book):
self.book = filepath_or_buffer
elif hasattr(filepath_or_buffer, "read"):
# N.B. xlrd.Book has a read attribute too
if hasattr(filepath_or_buffer, 'seek'):
try:
# GH 19779
filepath_or_buffer.seek(0)
except UnsupportedOperation:
# HTTPResponse does not support seek()
# GH 20434
pass
data = filepath_or_buffer.read()
self.book = xlrd.open_workbook(file_contents=data)
elif isinstance(filepath_or_buffer, compat.string_types):
self.book = xlrd.open_workbook(filepath_or_buffer)
else:
raise ValueError('Must explicitly set engine if not passing in'
' buffer or path for io.')
@property
def sheet_names(self):
return self.book.sheet_names()
def get_sheet_by_name(self, name):
return self.book.sheet_by_name(name)
def get_sheet_by_index(self, index):
return self.book.sheet_by_index(index)
def get_sheet_data(self, sheet, convert_float):
from xlrd import (xldate, XL_CELL_DATE,
XL_CELL_ERROR, XL_CELL_BOOLEAN,
XL_CELL_NUMBER)
epoch1904 = self.book.datemode
def _parse_cell(cell_contents, cell_typ):
"""converts the contents of the cell into a pandas
appropriate object"""
if cell_typ == XL_CELL_DATE:
# Use the newer xlrd datetime handling.
try:
cell_contents = xldate.xldate_as_datetime(
cell_contents, epoch1904)
except OverflowError:
return cell_contents
# Excel doesn't distinguish between dates and time,
# so we treat dates on the epoch as times only.
# Also, Excel supports 1900 and 1904 epochs.
year = (cell_contents.timetuple())[0:3]
if ((not epoch1904 and year == (1899, 12, 31)) or
(epoch1904 and year == (1904, 1, 1))):
cell_contents = time(cell_contents.hour,
cell_contents.minute,
cell_contents.second,
cell_contents.microsecond)
elif cell_typ == XL_CELL_ERROR:
cell_contents = np.nan
elif cell_typ == XL_CELL_BOOLEAN:
cell_contents = bool(cell_contents)
elif convert_float and cell_typ == XL_CELL_NUMBER:
# GH5394 - Excel 'numbers' are always floats
# it's a minimal perf hit and less surprising
val = int(cell_contents)
if val == cell_contents:
cell_contents = val
return cell_contents
data = []
for i in range(sheet.nrows):
row = [_parse_cell(value, typ)
for value, typ in zip(sheet.row_values(i),
sheet.row_types(i))]
data.append(row)
return data
class ExcelFile(object):
"""
Class for parsing tabular excel sheets into DataFrame objects.
Uses xlrd. See read_excel for more documentation
Parameters
----------
io : string, path object (pathlib.Path or py._path.local.LocalPath),
file-like object or xlrd workbook
If a string or path object, expected to be a path to xls or xlsx file.
engine : string, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or ``xlrd``.
"""
_engines = {
'xlrd': _XlrdReader,
}
def __init__(self, io, engine=None):
if engine is None:
engine = 'xlrd'
if engine not in self._engines:
raise ValueError("Unknown engine: {engine}".format(engine=engine))
# could be a str, ExcelFile, Book, etc.
self.io = io
# Always a string
self._io = _stringify_path(io)
self._reader = self._engines[engine](self._io)
def __fspath__(self):
return self._io
def parse(self,
sheet_name=0,
header=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
converters=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=True,
mangle_dupe_cols=True,
**kwds):
"""
Parse specified sheet(s) into a DataFrame
Equivalent to read_excel(ExcelFile, ...) See the read_excel
docstring for more info on accepted parameters
"""
# Can't use _deprecate_kwarg since sheetname=None has a special meaning
if is_integer(sheet_name) and sheet_name == 0 and 'sheetname' in kwds:
warnings.warn("The `sheetname` keyword is deprecated, use "
"`sheet_name` instead", FutureWarning, stacklevel=2)
sheet_name = kwds.pop("sheetname")
elif 'sheetname' in kwds:
raise TypeError("Cannot specify both `sheet_name` "
"and `sheetname`. Use just `sheet_name`")
if 'chunksize' in kwds:
raise NotImplementedError("chunksize keyword of read_excel "
"is not implemented")
return self._reader.parse(sheet_name=sheet_name,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=squeeze,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
convert_float=convert_float,
mangle_dupe_cols=mangle_dupe_cols,
**kwds)
@property
def book(self):
return self._reader.book
@property
def sheet_names(self):
return self._reader.sheet_names
def close(self):
"""close io if necessary"""
if hasattr(self.io, 'close'):
self.io.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def _excel2num(x):
"""
Convert Excel column name like 'AB' to 0-based column index.
Parameters
----------
x : str
The Excel column name to convert to a 0-based column index.
Returns
-------
num : int
The column index corresponding to the name.
Raises
------
ValueError
Part of the Excel column name was invalid.
"""
index = 0
for c in x.upper().strip():
cp = ord(c)
if cp < ord("A") or cp > ord("Z"):
raise ValueError("Invalid column name: {x}".format(x=x))
index = index * 26 + cp - ord("A") + 1
return index - 1
def _range2cols(areas):
"""
Convert comma separated list of column names and ranges to indices.
Parameters
----------
areas : str
A string containing a sequence of column ranges (or areas).
Returns
-------
cols : list
A list of 0-based column indices.
Examples
--------
>>> _range2cols('A:E')
[0, 1, 2, 3, 4]
>>> _range2cols('A,C,Z:AB')
[0, 2, 25, 26, 27]
"""
cols = []
for rng in areas.split(","):
if ":" in rng:
rng = rng.split(":")
cols.extend(lrange(_excel2num(rng[0]), _excel2num(rng[1]) + 1))
else:
cols.append(_excel2num(rng))
return cols
def _maybe_convert_usecols(usecols):
"""
Convert `usecols` into a compatible format for parsing in `parsers.py`.
Parameters
----------
usecols : object
The use-columns object to potentially convert.
Returns
-------
converted : object
The compatible format of `usecols`.
"""
if usecols is None:
return usecols
if is_integer(usecols):
warnings.warn(("Passing in an integer for `usecols` has been "
"deprecated. Please pass in a list of int from "
"0 to `usecols` inclusive instead."),
FutureWarning, stacklevel=2)
return lrange(usecols + 1)
if isinstance(usecols, compat.string_types):
return _range2cols(usecols)
return usecols
def _validate_freeze_panes(freeze_panes):
if freeze_panes is not None:
if (
len(freeze_panes) == 2 and
all(isinstance(item, int) for item in freeze_panes)
):
return True
raise ValueError("freeze_panes must be of form (row, column)"
" where row and column are integers")
# freeze_panes wasn't specified, return False so it won't be applied
# to output sheet
return False
def _trim_excel_header(row):
# trim header row so auto-index inference works
# xlrd uses '' , openpyxl None
while len(row) > 0 and (row[0] == '' or row[0] is None):
row = row[1:]
return row
def _maybe_convert_to_string(row):
"""
Convert elements in a row to string from Unicode.
This is purely a Python 2.x patch and is performed ONLY when all
elements of the row are string-like.
Parameters
----------
row : array-like
The row of data to convert.
Returns
-------
converted : array-like
"""
if compat.PY2:
converted = []
for i in range(len(row)):
if isinstance(row[i], compat.string_types):
try:
converted.append(str(row[i]))
except UnicodeEncodeError:
break
else:
break
else:
row = converted
return row
def _fill_mi_header(row, control_row):
"""Forward fills blank entries in row, but only inside the same parent index
Used for creating headers in Multiindex.
Parameters
----------
row : list
List of items in a single row.
control_row : list of bool
Helps to determine if particular column is in same parent index as the
previous value. Used to stop propagation of empty cells between
different indexes.
Returns
----------
Returns changed row and control_row
"""
last = row[0]
for i in range(1, len(row)):
if not control_row[i]:
last = row[i]
if row[i] == '' or row[i] is None:
row[i] = last
else:
control_row[i] = False
last = row[i]
return _maybe_convert_to_string(row), control_row
# fill blank if index_col not None
def _pop_header_name(row, index_col):
"""
Pop the header name for MultiIndex parsing.
Parameters
----------
row : list
The data row to parse for the header name.
index_col : int, list
The index columns for our data. Assumed to be non-null.
Returns
-------
header_name : str
The extracted header name.
trimmed_row : list
The original data row with the header name removed.
"""
# Pop out header name and fill w/blank.
i = index_col if not is_list_like(index_col) else max(index_col)
header_name = row[i]
header_name = None if header_name == "" else header_name
return header_name, row[:i] + [''] + row[i + 1:]
@add_metaclass(abc.ABCMeta)
class ExcelWriter(object):
"""
Class for writing DataFrame objects into excel sheets, default is to use
xlwt for xls, openpyxl for xlsx. See DataFrame.to_excel for typical usage.
Parameters
----------
path : string
Path to xls or xlsx file.
engine : string (optional)
Engine to use for writing. If None, defaults to
``io.excel.<extension>.writer``. NOTE: can only be passed as a keyword
argument.
date_format : string, default None
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD')
datetime_format : string, default None
Format string for datetime objects written into Excel files
(e.g. 'YYYY-MM-DD HH:MM:SS')
mode : {'w' or 'a'}, default 'w'
File mode to use (write or append).
.. versionadded:: 0.24.0
Attributes
----------
None
Methods
-------
None
Notes
-----
None of the methods and properties are considered public.
For compatibility with CSV writers, ExcelWriter serializes lists
and dicts to strings before writing.
Examples
--------
Default usage:
>>> with ExcelWriter('path_to_file.xlsx') as writer:
... df.to_excel(writer)
To write to separate sheets in a single file:
>>> with ExcelWriter('path_to_file.xlsx') as writer:
... df1.to_excel(writer, sheet_name='Sheet1')
... df2.to_excel(writer, sheet_name='Sheet2')
You can set the date format or datetime format:
>>> with ExcelWriter('path_to_file.xlsx',
date_format='YYYY-MM-DD',
datetime_format='YYYY-MM-DD HH:MM:SS') as writer:
... df.to_excel(writer)
You can also append to an existing Excel file:
>>> with ExcelWriter('path_to_file.xlsx', mode='a') as writer:
... df.to_excel(writer, sheet_name='Sheet3')
"""
# Defining an ExcelWriter implementation (see abstract methods for more...)
# - Mandatory
# - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)``
# --> called to write additional DataFrames to disk
# - ``supported_extensions`` (tuple of supported extensions), used to
# check that engine supports the given extension.
# - ``engine`` - string that gives the engine name. Necessary to
# instantiate class directly and bypass ``ExcelWriterMeta`` engine
# lookup.
# - ``save(self)`` --> called to save file to disk
# - Mostly mandatory (i.e. should at least exist)
# - book, cur_sheet, path
# - Optional:
# - ``__init__(self, path, engine=None, **kwargs)`` --> always called
# with path as first argument.
# You also need to register the class with ``register_writer()``.
# Technically, ExcelWriter implementations don't need to subclass
# ExcelWriter.
def __new__(cls, path, engine=None, **kwargs):
# only switch class if generic(ExcelWriter)
if issubclass(cls, ExcelWriter):
if engine is None or (isinstance(engine, string_types) and
engine == 'auto'):
if isinstance(path, string_types):
ext = os.path.splitext(path)[-1][1:]
else:
ext = 'xlsx'
try:
engine = config.get_option('io.excel.{ext}.writer'
.format(ext=ext))
if engine == 'auto':
engine = _get_default_writer(ext)
except KeyError:
error = ValueError("No engine for filetype: '{ext}'"
.format(ext=ext))
raise error
cls = get_writer(engine)
return object.__new__(cls)
# declare external properties you can count on
book = None
curr_sheet = None
path = None
@abc.abstractproperty
def supported_extensions(self):
"extensions that writer engine supports"
pass
@abc.abstractproperty
def engine(self):
"name of engine"
pass
@abc.abstractmethod
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
freeze_panes=None):
"""
Write given formatted cells into Excel an excel sheet
Parameters
----------
cells : generator
cell of formatted data to save to Excel sheet
sheet_name : string, default None
Name of Excel sheet, if None, then use self.cur_sheet
startrow : upper left cell row to dump data frame
startcol : upper left cell column to dump data frame
freeze_panes: integer tuple of length 2
contains the bottom-most row and right-most column to freeze
"""
pass
@abc.abstractmethod
def save(self):
"""
Save workbook to disk.
"""
pass
def __init__(self, path, engine=None,
date_format=None, datetime_format=None, mode='w',
**engine_kwargs):
# validate that this engine can handle the extension
if isinstance(path, string_types):
ext = os.path.splitext(path)[-1]
else:
ext = 'xls' if engine == 'xlwt' else 'xlsx'
self.check_extension(ext)
self.path = path
self.sheets = {}
self.cur_sheet = None
if date_format is None:
self.date_format = 'YYYY-MM-DD'
else:
self.date_format = date_format
if datetime_format is None:
self.datetime_format = 'YYYY-MM-DD HH:MM:SS'
else:
self.datetime_format = datetime_format
self.mode = mode
def __fspath__(self):
return _stringify_path(self.path)
def _get_sheet_name(self, sheet_name):
if sheet_name is None:
sheet_name = self.cur_sheet
if sheet_name is None: # pragma: no cover
raise ValueError('Must pass explicit sheet_name or set '
'cur_sheet property')
return sheet_name
def _value_with_fmt(self, val):
"""Convert numpy types to Python types for the Excel writers.
Parameters
----------
val : object
Value to be written into cells
Returns
-------
Tuple with the first element being the converted value and the second
being an optional format
"""
fmt = None
if is_integer(val):
val = int(val)
elif is_float(val):
val = float(val)
elif is_bool(val):
val = bool(val)
elif isinstance(val, datetime):
fmt = self.datetime_format
elif isinstance(val, date):
fmt = self.date_format
elif isinstance(val, timedelta):
val = val.total_seconds() / float(86400)
fmt = '0'
else:
val = compat.to_str(val)
return val, fmt
@classmethod
def check_extension(cls, ext):
"""checks that path's extension against the Writer's supported
extensions. If it isn't supported, raises UnsupportedFiletypeError."""
if ext.startswith('.'):
ext = ext[1:]
if not any(ext in extension for extension in cls.supported_extensions):
msg = (u("Invalid extension for engine '{engine}': '{ext}'")
.format(engine=pprint_thing(cls.engine),
ext=pprint_thing(ext)))
raise ValueError(msg)
else:
return True
# Allow use as a contextmanager
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""synonym for save, to make it more file-like"""
return self.save()
class _OpenpyxlWriter(ExcelWriter):
engine = 'openpyxl'
supported_extensions = ('.xlsx', '.xlsm')
def __init__(self, path, engine=None, mode='w', **engine_kwargs):
# Use the openpyxl module as the Excel writer.
from openpyxl.workbook import Workbook
super(_OpenpyxlWriter, self).__init__(path, mode=mode, **engine_kwargs)
if self.mode == 'a': # Load from existing workbook
from openpyxl import load_workbook
book = load_workbook(self.path)
self.book = book
else:
# Create workbook object with default optimized_write=True.
self.book = Workbook()
if self.book.worksheets:
try:
self.book.remove(self.book.worksheets[0])
except AttributeError:
# compat - for openpyxl <= 2.4
self.book.remove_sheet(self.book.worksheets[0])
def save(self):
"""
Save workbook to disk.
"""
return self.book.save(self.path)
@classmethod
def _convert_to_style(cls, style_dict):
"""
converts a style_dict to an openpyxl style object
Parameters
----------
style_dict : style dictionary to convert
"""
from openpyxl.style import Style
xls_style = Style()
for key, value in style_dict.items():
for nk, nv in value.items():
if key == "borders":
(xls_style.borders.__getattribute__(nk)
.__setattr__('border_style', nv))
else:
xls_style.__getattribute__(key).__setattr__(nk, nv)
return xls_style
@classmethod
def _convert_to_style_kwargs(cls, style_dict):
"""
Convert a style_dict to a set of kwargs suitable for initializing
or updating-on-copy an openpyxl v2 style object
Parameters
----------
style_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'font'
'fill'
'border' ('borders')
'alignment'
'number_format'
'protection'
Returns
-------
style_kwargs : dict
A dict with the same, normalized keys as ``style_dict`` but each
value has been replaced with a native openpyxl style object of the
appropriate class.
"""
_style_key_map = {
'borders': 'border',
}
style_kwargs = {}
for k, v in style_dict.items():
if k in _style_key_map:
k = _style_key_map[k]
_conv_to_x = getattr(cls, '_convert_to_{k}'.format(k=k),
lambda x: None)
new_v = _conv_to_x(v)
if new_v:
style_kwargs[k] = new_v
return style_kwargs
@classmethod
def _convert_to_color(cls, color_spec):
"""
Convert ``color_spec`` to an openpyxl v2 Color object
Parameters
----------
color_spec : str, dict
A 32-bit ARGB hex string, or a dict with zero or more of the
following keys.
'rgb'
'indexed'
'auto'
'theme'
'tint'
'index'
'type'
Returns
-------
color : openpyxl.styles.Color
"""
from openpyxl.styles import Color
if isinstance(color_spec, str):
return Color(color_spec)
else:
return Color(**color_spec)
@classmethod
def _convert_to_font(cls, font_dict):
"""
Convert ``font_dict`` to an openpyxl v2 Font object
Parameters
----------
font_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'name'
'size' ('sz')
'bold' ('b')
'italic' ('i')
'underline' ('u')
'strikethrough' ('strike')
'color'
'vertAlign' ('vertalign')
'charset'
'scheme'
'family'
'outline'
'shadow'
'condense'
Returns
-------
font : openpyxl.styles.Font
"""
from openpyxl.styles import Font
_font_key_map = {
'sz': 'size',
'b': 'bold',
'i': 'italic',
'u': 'underline',
'strike': 'strikethrough',
'vertalign': 'vertAlign',
}
font_kwargs = {}
for k, v in font_dict.items():
if k in _font_key_map:
k = _font_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
font_kwargs[k] = v
return Font(**font_kwargs)
@classmethod
def _convert_to_stop(cls, stop_seq):
"""
Convert ``stop_seq`` to a list of openpyxl v2 Color objects,
suitable for initializing the ``GradientFill`` ``stop`` parameter.
Parameters
----------
stop_seq : iterable
An iterable that yields objects suitable for consumption by
``_convert_to_color``.
Returns
-------
stop : list of openpyxl.styles.Color
"""
return map(cls._convert_to_color, stop_seq)
@classmethod
def _convert_to_fill(cls, fill_dict):
"""
Convert ``fill_dict`` to an openpyxl v2 Fill object
Parameters
----------
fill_dict : dict
A dict with one or more of the following keys (or their synonyms),
'fill_type' ('patternType', 'patterntype')
'start_color' ('fgColor', 'fgcolor')
'end_color' ('bgColor', 'bgcolor')
or one or more of the following keys (or their synonyms).
'type' ('fill_type')
'degree'
'left'
'right'
'top'
'bottom'
'stop'
Returns
-------
fill : openpyxl.styles.Fill
"""
from openpyxl.styles import PatternFill, GradientFill
_pattern_fill_key_map = {
'patternType': 'fill_type',
'patterntype': 'fill_type',
'fgColor': 'start_color',
'fgcolor': 'start_color',
'bgColor': 'end_color',
'bgcolor': 'end_color',
}
_gradient_fill_key_map = {
'fill_type': 'type',
}
pfill_kwargs = {}
gfill_kwargs = {}
for k, v in fill_dict.items():
pk = gk = None
if k in _pattern_fill_key_map:
pk = _pattern_fill_key_map[k]
if k in _gradient_fill_key_map:
gk = _gradient_fill_key_map[k]
if pk in ['start_color', 'end_color']:
v = cls._convert_to_color(v)
if gk == 'stop':
v = cls._convert_to_stop(v)
if pk:
pfill_kwargs[pk] = v
elif gk:
gfill_kwargs[gk] = v
else:
pfill_kwargs[k] = v
gfill_kwargs[k] = v
try:
return PatternFill(**pfill_kwargs)
except TypeError:
return GradientFill(**gfill_kwargs)
@classmethod
def _convert_to_side(cls, side_spec):
"""
Convert ``side_spec`` to an openpyxl v2 Side object
Parameters
----------
side_spec : str, dict
A string specifying the border style, or a dict with zero or more
of the following keys (or their synonyms).
'style' ('border_style')
'color'
Returns
-------
side : openpyxl.styles.Side
"""
from openpyxl.styles import Side
_side_key_map = {
'border_style': 'style',
}
if isinstance(side_spec, str):
return Side(style=side_spec)
side_kwargs = {}
for k, v in side_spec.items():
if k in _side_key_map:
k = _side_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
side_kwargs[k] = v
return Side(**side_kwargs)
@classmethod
def _convert_to_border(cls, border_dict):
"""
Convert ``border_dict`` to an openpyxl v2 Border object
Parameters
----------
border_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'left'
'right'
'top'
'bottom'
'diagonal'
'diagonal_direction'
'vertical'
'horizontal'
'diagonalUp' ('diagonalup')
'diagonalDown' ('diagonaldown')
'outline'
Returns
-------
border : openpyxl.styles.Border
"""
from openpyxl.styles import Border
_border_key_map = {
'diagonalup': 'diagonalUp',
'diagonaldown': 'diagonalDown',
}
border_kwargs = {}
for k, v in border_dict.items():
if k in _border_key_map:
k = _border_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
if k in ['left', 'right', 'top', 'bottom', 'diagonal']:
v = cls._convert_to_side(v)
border_kwargs[k] = v
return Border(**border_kwargs)
@classmethod
def _convert_to_alignment(cls, alignment_dict):
"""
Convert ``alignment_dict`` to an openpyxl v2 Alignment object
Parameters
----------
alignment_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'horizontal'
'vertical'
'text_rotation'
'wrap_text'
'shrink_to_fit'
'indent'
Returns
-------
alignment : openpyxl.styles.Alignment
"""
from openpyxl.styles import Alignment
return Alignment(**alignment_dict)
@classmethod
def _convert_to_number_format(cls, number_format_dict):
"""
Convert ``number_format_dict`` to an openpyxl v2.1.0 number format
initializer.
Parameters
----------
number_format_dict : dict
A dict with zero or more of the following keys.
'format_code' : str
Returns
-------
number_format : str
"""
return number_format_dict['format_code']
@classmethod
def _convert_to_protection(cls, protection_dict):
"""
Convert ``protection_dict`` to an openpyxl v2 Protection object.
Parameters
----------
protection_dict : dict
A dict with zero or more of the following keys.
'locked'
'hidden'
Returns
-------
"""
from openpyxl.styles import Protection
return Protection(**protection_dict)
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
freeze_panes=None):
# Write the frame cells using openpyxl.
sheet_name = self._get_sheet_name(sheet_name)
_style_cache = {}
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.create_sheet()
wks.title = sheet_name
self.sheets[sheet_name] = wks
if _validate_freeze_panes(freeze_panes):
wks.freeze_panes = wks.cell(row=freeze_panes[0] + 1,
column=freeze_panes[1] + 1)
for cell in cells:
xcell = wks.cell(
row=startrow + cell.row + 1,
column=startcol + cell.col + 1
)
xcell.value, fmt = self._value_with_fmt(cell.val)
if fmt:
xcell.number_format = fmt
style_kwargs = {}
if cell.style:
key = str(cell.style)
style_kwargs = _style_cache.get(key)
if style_kwargs is None:
style_kwargs = self._convert_to_style_kwargs(cell.style)
_style_cache[key] = style_kwargs
if style_kwargs:
for k, v in style_kwargs.items():
setattr(xcell, k, v)
if cell.mergestart is not None and cell.mergeend is not None:
wks.merge_cells(
start_row=startrow + cell.row + 1,
start_column=startcol + cell.col + 1,
end_column=startcol + cell.mergeend + 1,
end_row=startrow + cell.mergestart + 1
)
# When cells are merged only the top-left cell is preserved
# The behaviour of the other cells in a merged range is
# undefined
if style_kwargs:
first_row = startrow + cell.row + 1
last_row = startrow + cell.mergestart + 1
first_col = startcol + cell.col + 1
last_col = startcol + cell.mergeend + 1
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row == first_row and col == first_col:
# Ignore first cell. It is already handled.
continue
xcell = wks.cell(column=col, row=row)
for k, v in style_kwargs.items():
setattr(xcell, k, v)
register_writer(_OpenpyxlWriter)
class _XlwtWriter(ExcelWriter):
engine = 'xlwt'
supported_extensions = ('.xls',)
def __init__(self, path, engine=None, encoding=None, mode='w',
**engine_kwargs):
# Use the xlwt module as the Excel writer.
import xlwt
engine_kwargs['engine'] = engine
if mode == 'a':
raise ValueError('Append mode is not supported with xlwt!')
super(_XlwtWriter, self).__init__(path, mode=mode, **engine_kwargs)
if encoding is None:
encoding = 'ascii'
self.book = xlwt.Workbook(encoding=encoding)
self.fm_datetime = xlwt.easyxf(num_format_str=self.datetime_format)
self.fm_date = xlwt.easyxf(num_format_str=self.date_format)
def save(self):
"""
Save workbook to disk.
"""
return self.book.save(self.path)
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
freeze_panes=None):
# Write the frame cells using xlwt.
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.add_sheet(sheet_name)
self.sheets[sheet_name] = wks
if _validate_freeze_panes(freeze_panes):
wks.set_panes_frozen(True)
wks.set_horz_split_pos(freeze_panes[0])
wks.set_vert_split_pos(freeze_panes[1])
style_dict = {}
for cell in cells:
val, fmt = self._value_with_fmt(cell.val)
stylekey = json.dumps(cell.style)
if fmt:
stylekey += fmt
if stylekey in style_dict:
style = style_dict[stylekey]
else:
style = self._convert_to_style(cell.style, fmt)
style_dict[stylekey] = style
if cell.mergestart is not None and cell.mergeend is not None:
wks.write_merge(startrow + cell.row,
startrow + cell.mergestart,
startcol + cell.col,
startcol + cell.mergeend,
val, style)
else:
wks.write(startrow + cell.row,
startcol + cell.col,
val, style)
@classmethod
def _style_to_xlwt(cls, item, firstlevel=True, field_sep=',',
line_sep=';'):
"""helper which recursively generate an xlwt easy style string
for example:
hstyle = {"font": {"bold": True},
"border": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"align": {"horiz": "center"}}
will be converted to
font: bold on; \
border: top thin, right thin, bottom thin, left thin; \
align: horiz center;
"""
if hasattr(item, 'items'):
if firstlevel:
it = ["{key}: {val}"
.format(key=key, val=cls._style_to_xlwt(value, False))
for key, value in item.items()]
out = "{sep} ".format(sep=(line_sep).join(it))
return out
else:
it = ["{key} {val}"
.format(key=key, val=cls._style_to_xlwt(value, False))
for key, value in item.items()]
out = "{sep} ".format(sep=(field_sep).join(it))
return out
else:
item = "{item}".format(item=item)
item = item.replace("True", "on")
item = item.replace("False", "off")
return item
@classmethod
def _convert_to_style(cls, style_dict, num_format_str=None):
"""
converts a style_dict to an xlwt style object
Parameters
----------
style_dict : style dictionary to convert
num_format_str : optional number format string
"""
import xlwt
if style_dict:
xlwt_stylestr = cls._style_to_xlwt(style_dict)
style = xlwt.easyxf(xlwt_stylestr, field_sep=',', line_sep=';')
else:
style = xlwt.XFStyle()
if num_format_str is not None:
style.num_format_str = num_format_str
return style
register_writer(_XlwtWriter)
class _XlsxStyler(object):
# Map from openpyxl-oriented styles to flatter xlsxwriter representation
# Ordering necessary for both determinism and because some are keyed by
# prefixes of others.
STYLE_MAPPING = {
'font': [
(('name',), 'font_name'),
(('sz',), 'font_size'),
(('size',), 'font_size'),
(('color', 'rgb',), 'font_color'),
(('color',), 'font_color'),
(('b',), 'bold'),
(('bold',), 'bold'),
(('i',), 'italic'),
(('italic',), 'italic'),
(('u',), 'underline'),
(('underline',), 'underline'),
(('strike',), 'font_strikeout'),
(('vertAlign',), 'font_script'),
(('vertalign',), 'font_script'),
],
'number_format': [
(('format_code',), 'num_format'),
((), 'num_format',),
],
'protection': [
(('locked',), 'locked'),
(('hidden',), 'hidden'),
],
'alignment': [
(('horizontal',), 'align'),
(('vertical',), 'valign'),
(('text_rotation',), 'rotation'),
(('wrap_text',), 'text_wrap'),
(('indent',), 'indent'),
(('shrink_to_fit',), 'shrink'),
],
'fill': [
(('patternType',), 'pattern'),
(('patterntype',), 'pattern'),
(('fill_type',), 'pattern'),
(('start_color', 'rgb',), 'fg_color'),
(('fgColor', 'rgb',), 'fg_color'),
(('fgcolor', 'rgb',), 'fg_color'),
(('start_color',), 'fg_color'),
(('fgColor',), 'fg_color'),
(('fgcolor',), 'fg_color'),
(('end_color', 'rgb',), 'bg_color'),
(('bgColor', 'rgb',), 'bg_color'),
(('bgcolor', 'rgb',), 'bg_color'),
(('end_color',), 'bg_color'),
(('bgColor',), 'bg_color'),
(('bgcolor',), 'bg_color'),
],
'border': [
(('color', 'rgb',), 'border_color'),
(('color',), 'border_color'),
(('style',), 'border'),
(('top', 'color', 'rgb',), 'top_color'),
(('top', 'color',), 'top_color'),
(('top', 'style',), 'top'),
(('top',), 'top'),
(('right', 'color', 'rgb',), 'right_color'),
(('right', 'color',), 'right_color'),
(('right', 'style',), 'right'),
(('right',), 'right'),
(('bottom', 'color', 'rgb',), 'bottom_color'),
(('bottom', 'color',), 'bottom_color'),
(('bottom', 'style',), 'bottom'),
(('bottom',), 'bottom'),
(('left', 'color', 'rgb',), 'left_color'),
(('left', 'color',), 'left_color'),
(('left', 'style',), 'left'),
(('left',), 'left'),
],
}
@classmethod
def convert(cls, style_dict, num_format_str=None):
"""
converts a style_dict to an xlsxwriter format dict
Parameters
----------
style_dict : style dictionary to convert
num_format_str : optional number format string
"""
# Create a XlsxWriter format object.
props = {}
if num_format_str is not None:
props['num_format'] = num_format_str
if style_dict is None:
return props
if 'borders' in style_dict:
style_dict = style_dict.copy()
style_dict['border'] = style_dict.pop('borders')
for style_group_key, style_group in style_dict.items():
for src, dst in cls.STYLE_MAPPING.get(style_group_key, []):
# src is a sequence of keys into a nested dict
# dst is a flat key
if dst in props:
continue
v = style_group
for k in src:
try:
v = v[k]
except (KeyError, TypeError):
break
else:
props[dst] = v
if isinstance(props.get('pattern'), string_types):
# TODO: support other fill patterns
props['pattern'] = 0 if props['pattern'] == 'none' else 1
for k in ['border', 'top', 'right', 'bottom', 'left']:
if isinstance(props.get(k), string_types):
try:
props[k] = ['none', 'thin', 'medium', 'dashed', 'dotted',
'thick', 'double', 'hair', 'mediumDashed',
'dashDot', 'mediumDashDot', 'dashDotDot',
'mediumDashDotDot',
'slantDashDot'].index(props[k])
except ValueError:
props[k] = 2
if isinstance(props.get('font_script'), string_types):
props['font_script'] = ['baseline', 'superscript',
'subscript'].index(props['font_script'])
if isinstance(props.get('underline'), string_types):
props['underline'] = {'none': 0, 'single': 1, 'double': 2,
'singleAccounting': 33,
'doubleAccounting': 34}[props['underline']]
return props
class _XlsxWriter(ExcelWriter):
engine = 'xlsxwriter'
supported_extensions = ('.xlsx',)
def __init__(self, path, engine=None,
date_format=None, datetime_format=None, mode='w',
**engine_kwargs):
# Use the xlsxwriter module as the Excel writer.
import xlsxwriter
if mode == 'a':
raise ValueError('Append mode is not supported with xlsxwriter!')
super(_XlsxWriter, self).__init__(path, engine=engine,
date_format=date_format,
datetime_format=datetime_format,
mode=mode,
**engine_kwargs)
self.book = xlsxwriter.Workbook(path, **engine_kwargs)
def save(self):
"""
Save workbook to disk.
"""
return self.book.close()
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
freeze_panes=None):
# Write the frame cells using xlsxwriter.
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.add_worksheet(sheet_name)
self.sheets[sheet_name] = wks
style_dict = {'null': None}
if _validate_freeze_panes(freeze_panes):
wks.freeze_panes(*(freeze_panes))
for cell in cells:
val, fmt = self._value_with_fmt(cell.val)
stylekey = json.dumps(cell.style)
if fmt:
stylekey += fmt
if stylekey in style_dict:
style = style_dict[stylekey]
else:
style = self.book.add_format(
_XlsxStyler.convert(cell.style, fmt))
style_dict[stylekey] = style
if cell.mergestart is not None and cell.mergeend is not None:
wks.merge_range(startrow + cell.row,
startcol + cell.col,
startrow + cell.mergestart,
startcol + cell.mergeend,
cell.val, style)
else:
wks.write(startrow + cell.row,
startcol + cell.col,
val, style)
register_writer(_XlsxWriter)
| GuessWhoSamFoo/pandas | pandas/io/excel.py | Python | bsd-3-clause | 66,902 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs hello_world.py, through hello_world.isolate, locally in a temporary
directory.
"""
import hashlib
import os
import shutil
import subprocess
import sys
import tempfile
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
def run(cmd):
print('Running: %s' % ' '.join(cmd))
cmd = [sys.executable, os.path.join(ROOT_DIR, '..', cmd[0])] + cmd[1:]
if sys.platform != 'win32':
cmd = ['time', '-p'] + cmd
subprocess.check_call(cmd)
def main():
# Uncomment to make isolate.py to output logs.
#os.environ['ISOLATE_DEBUG'] = '1'
try:
# All the files are put in a temporary directory. This is optional and
# simply done so the current directory doesn't have the following files
# created:
# - hello_world.isolated
# - hello_world.isolated.state
# - cache/
# - hashtable/
tempdir = tempfile.mkdtemp(prefix='hello_world')
cachedir = os.path.join(tempdir, 'cache')
hashtabledir = os.path.join(tempdir, 'hashtable')
isolateddir = os.path.join(tempdir, 'isolated')
isolated = os.path.join(isolateddir, 'hello_world.isolated')
os.mkdir(isolateddir)
print('Archiving')
run(
[
'isolate.py',
'hashtable',
'--isolate', os.path.join(ROOT_DIR, 'hello_world.isolate'),
'--isolated', isolated,
'--outdir', hashtabledir,
])
print('\nRunning')
hashval = hashlib.sha1(open(isolated, 'rb').read()).hexdigest()
run(
[
'run_isolated.py',
'--cache', cachedir,
'--remote', hashtabledir,
'--hash', hashval,
])
finally:
shutil.rmtree(tempdir)
return 0
if __name__ == '__main__':
sys.exit(main())
| leighpauls/k2cro4 | tools/swarm_client/example/run_example_local.py | Python | bsd-3-clause | 1,893 |
import tests.periodicities.period_test as per
per.buildModel((360 , 'BH' , 25));
| antoinecarme/pyaf | tests/periodicities/Business_Hour/Cycle_Business_Hour_25_BH_360.py | Python | bsd-3-clause | 83 |
"""
byceps.blueprints.site.board.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from . import board_access_grant, last_category_view, last_topic_view
| homeworkprod/byceps | byceps/services/board/dbmodels/__init__.py | Python | bsd-3-clause | 250 |
from regulus.tree.tree import Node
from regulus.tree import *
class TestNode(Node):
def __init__(self, **kwargs):
super(TestNode, self).__init__(**kwargs)
def __str__(self):
if self.data is None:
return "<none>"
return self.data
def show(root):
for node in depth_first(root):
print(node.data)
def show_depth(root, depth=0):
print('{}{} d={}'.format(' '*depth, str(root), depth))
for child in root.children:
show_depth(child, depth+1)
root = TestNode(data='root')
n1 = TestNode(data='.1',parent=root)
n2 = TestNode(data='.2',parent=root)
n11 = TestNode(data='.1.1',parent=n1)
n12 = TestNode(data='.1.2',parent=n1)
n21 = TestNode(data='.2.1',parent=n2)
n211 = TestNode(data='.2.1.1',parent=n21)
n212 = TestNode(data='.2.1.2',parent=n21)
n22 = TestNode(data='.2.2',parent=n2)
print('breath first. pre')
for n in breath_first(root):
print(n.data)
print('depth = ', root.depth())
print('breath first. post')
for n in breath_first(root, post=True):
print(n.data)
print('breath first. both')
for n in breath_first(root, both=True):
print(n.data)
print('depth first. pre')
for n in depth_first(root):
print(n.data)
print('depth first. post')
for n in depth_first(root, post=True):
print(n.data)
values = dict([('root', 2),
('.1', 5),
('.1.1', 15),
('.1.2', 3),
('.2', 6),
('.2.1', 20),
('.2.2', 9),
('.2.1.1', 0),
('.2.1.2', 30),
])
print('best first')
for v, n in best_first(root, value=lambda n: values[n.data]):
print(v, n.data)
print('reduce .1')
x = reduce(root, lambda n: '.1' in n.data, factory=TestNode)
show_depth(x)
print('reduce .2')
x = reduce(root, lambda n: '.2' in n.data, factory=TestNode)
show_depth(x) | yarden-livnat/regulus | tests/tree_test.py | Python | bsd-3-clause | 1,870 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Copyright (c) 2009, Roboterclub Aachen e.V.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Roboterclub Aachen e.V. nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY ROBOTERCLUB AACHEN E.V. ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ROBOTERCLUB AACHEN E.V. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
import os
import builder_base
import filter.cpp as filter
# -----------------------------------------------------------------------------
def filter_subtype(value):
""" value needs to be parser.structure.SubType """
type = filter.typeName(value.subtype.name)
variable = filter.variableName(value.name)
if value.subtype.isArray:
return "%s %s[%s]" % (type, variable, value.subtype.count)
else:
return "%s %s" % (type, variable)
def filter_constructor(class_, default=True):
if default:
return "%s()" % filter.typeName(class_.name)
else:
parameter = []
for item in class_.iter():
if item.subtype.isArray:
raise builder.BuilderException("Array handling is incomplete " \
"right now! Could not generate code for %s" % item)
else:
type = filter.typeName(item.subtype.name)
name = filter.variableName(item.name)
parameter.append("%s %s" % (type, name))
if len(parameter) > 0:
return "%s(%s)" % (filter.typeName(class_.name), ", ".join(parameter))
else:
return ""
def filter_initialization_list(class_, default=True):
initList = []
for item in class_.iter():
if item.subtype.isArray:
raise builder.BuilderException("Array handling is incomplete " \
"right now! Could not generate code for %s" % item)
else:
type = filter.typeName(item.subtype.name)
name = filter.variableName(item.name)
if item.value is not None:
defaultValue = item.value
else :
defaultValue = ''
if default:
initList.append("%s(%s)" % (name, defaultValue))
else:
initList.append("%s(%s)" % (name, name))
return ", ".join(initList)
# -----------------------------------------------------------------------------
class TypeBuilder(builder_base.Builder):
VERSION = "0.1"
def setup(self, optparser):
optparser.add_option(
"--namespace",
dest = "namespace",
default = "robot",
help = "Namespace of the generated identifiers.")
optparser.add_option(
"--source_path",
dest = "source_path",
default = None,
help = "Output path for the source file")
optparser.add_option(
"--header_path",
dest = "header_path",
default = None,
help = "Output path for the header file")
optparser.add_option(
"--quote_include_path",
dest = "quote_include_path",
default = None,
help = "Include directive for the source file")
optparser.add_option(
"--system_include_path",
dest = "system_include_path",
default = None,
help = "Include directive for the source file")
def generate(self):
# check the commandline options
if self.options.outpath:
source_path = self.options.outpath
header_path = self.options.outpath
elif self.options.source_path and self.options.header_path:
source_path = self.options.source_path
header_path = self.options.header_path
else:
raise builder_base.BuilderException("You need to provide an output path!")
if self.options.system_include_path:
includeDirective = '<%s>' % os.path.join(self.options.system_include_path, 'packets.hpp')
elif self.options.quote_include_path:
includeDirective = '"%s"' % os.path.join(self.options.system_include_path, 'packets.hpp')
else:
includeDirective = '"%s"' % 'packets.hpp'
if self.options.namespace:
namespace = self.options.namespace
else:
raise builder_base.BuilderException("You need to provide a namespace!")
cppFilter = {
'enumElement': filter.enumElement,
'enumElementStrong': filter.typeName,
'variableName': filter.variableName,
'typeName': filter.typeName,
'subtype': filter_subtype,
'generateConstructor': filter_constructor,
'generateInitializationList': filter_initialization_list
}
template_header = self.template('templates/robot_packets.hpp.tpl', filter=cppFilter)
template_source = self.template('templates/robot_packets.cpp.tpl', filter=cppFilter)
substitutions = {
'components': self.tree.components,
'actions': self.tree.components.actions,
'events': self.tree.events,
'packets': self.tree.types,
'includeDirective': includeDirective,
'namespace': namespace
}
file = os.path.join(header_path, 'packets.hpp')
self.write(file, template_header.render(substitutions) + "\n")
file = os.path.join(source_path, 'packets.cpp')
self.write(file, template_source.render(substitutions) + "\n")
# -----------------------------------------------------------------------------
if __name__ == '__main__':
TypeBuilder().run()
| dergraaf/xpcc | tools/system_design/builder/cpp_packets.py | Python | bsd-3-clause | 6,142 |
import unittest
from prestans.provider.cache import Base
class CacheBaseUnitTest(unittest.TestCase):
def test_debug(self):
base = Base()
self.assertEqual(base.debug, False)
base.debug = True
self.assertEqual(base.debug, True)
| anomaly/prestans | tests/provider/test_cache.py | Python | bsd-3-clause | 267 |
from baseneuron import BaseNeuron
import numpy as np
import pycuda.gpuarray as garray
from pycuda.tools import dtype_to_ctype
import pycuda.driver as cuda
from pycuda.compiler import SourceModule
class MorrisLecarCopy(BaseNeuron):
def __init__(self, n_dict, V, dt , debug=False):
self.num_neurons = len(n_dict['id'])
self.dt = np.double(dt)
self.steps = max(int(round(dt / 1e-5)),1)
self.debug = debug
self.ddt = dt / self.steps
self.V = V
self.n = garray.to_gpu(np.asarray(n_dict['initn'], dtype=np.float64))
self.V_1 = garray.to_gpu(np.asarray(n_dict['V1'], dtype=np.float64))
self.V_2 = garray.to_gpu(np.asarray(n_dict['V2'], dtype=np.float64))
self.V_3 = garray.to_gpu(np.asarray(n_dict['V3'], dtype=np.float64))
self.V_4 = garray.to_gpu(np.asarray(n_dict['V4'], dtype=np.float64))
self.Tphi = garray.to_gpu(np.asarray(n_dict['phi'], dtype=np.float64))
self.offset = garray.to_gpu(np.asarray(n_dict['offset'],
dtype=np.float64))
cuda.memcpy_htod(int(self.V), np.asarray(n_dict['initV'], dtype=np.double))
self.update = self.get_euler_kernel()
@property
def neuron_class(self): return True
def eval(self, st = None):
self.update.prepared_async_call(self.update_grid, self.update_block, st, self.V, self.n.gpudata, self.num_neurons, self.I.gpudata, self.ddt*1000, self.steps, self.V_1.gpudata, self.V_2.gpudata, self.V_3.gpudata, self.V_4.gpudata, self.Tphi.gpudata, self.offset.gpudata)
def get_euler_kernel(self):
template = """
#define NVAR 2
#define NNEU %(nneu)d //NROW * NCOL
#define V_L (-0.05)
#define V_Ca 0.1
#define V_K (-0.07)
#define g_Ca 1.1
#define g_K 2.0
#define g_L 0.5
__device__ %(type)s compute_n(%(type)s V, %(type)s n, %(type)s V_3, %(type)s V_4, %(type)s Tphi)
{
%(type)s n_inf = 0.5 * (1 + tanh((V - V_3) / V_4));
%(type)s dn = Tphi * cosh(( V - V_3) / (V_4*2)) * (n_inf - n);
return dn;
}
__device__ %(type)s compute_V(%(type)s V, %(type)s n, %(type)s I, %(type)s V_1, %(type)s V_2, %(type)s offset)
{
%(type)s m_inf = 0.5 * (1+tanh((V - V_1)/V_2));
%(type)s dV = (I - g_L * (V - V_L) - g_K * n * (V - V_K) - g_Ca * m_inf * (V - V_Ca) + offset);
return dV;
}
__global__ void
hhn_euler_multiple(%(type)s* g_V, %(type)s* g_n, int num_neurons, %(type)s* I_pre, %(type)s dt, int nsteps, \
%(type)s* V_1, %(type)s* V_2, %(type)s* V_3, %(type)s* V_4, %(type)s* Tphi, %(type)s* offset)
{
int bid = blockIdx.x;
int cart_id = bid * NNEU + threadIdx.x;
%(type)s I, V, n;
if(cart_id < num_neurons)
{
V = g_V[cart_id];
I = I_pre[cart_id];
n = g_n[cart_id];
%(type)s dV, dn;
for(int i = 0; i < nsteps; ++i)
{
dn = compute_n(V, n, V_3[cart_id], V_4[cart_id], Tphi[cart_id]);
dV = compute_V(V, n, I, V_1[cart_id], V_2[cart_id], offset[cart_id]);
V += dV * dt;
n += dn * dt;
}
g_V[cart_id] = V;
g_n[cart_id] = n;
}
}
"""#Used 40 registers, 1024+0 bytes smem, 84 bytes cmem[0], 308 bytes cmem[2], 28 bytes cmem[16]
dtype = np.double
scalartype = dtype.type if dtype.__class__ is np.dtype else dtype
self.update_block = (128,1,1)
self.update_grid = ((self.num_neurons - 1) / 128 + 1, 1)
mod = SourceModule(template % {"type": dtype_to_ctype(dtype), "nneu": self.update_block[0]}, options=["--ptxas-options=-v"])
func = mod.get_function("hhn_euler_multiple")
func.prepare([np.intp, np.intp, np.int32, np.intp, scalartype, np.int32, np.intp, np.intp, np.intp, np.intp, np.intp, np.intp])
return func
| cerrno/neurokernel | neurokernel/LPU/neurons/MorrisLecarCopy.py | Python | bsd-3-clause | 3,978 |
__author__ = 'Cam Moore'
config = {
'description': 'regexquiz is the simple shell for answering regular expression quizzes for ICS 215',
'author': 'Cam Moore',
'author_email': '[email protected]',
'version': '1.1',
'install_requires': [],
'packages': ['regexquiz'],
'name': 'regexquiz'
}
try:
from setuptools import setup
config['entry_points'] = {
'console_scripts' : [
'regexquiz = regexquiz.cmdline:main'
],
}
except ImportError:
from distutils.core import setup
config['scripts'] = ['bin/regexquiz', 'bin/regexquiz.bat']
setup(**config)
| cammoore/RegExQuiz | setup.py | Python | bsd-3-clause | 627 |
#!/usr/bin/env python
__author__ = "Adam Simpkin, and Felix Simkovic"
__contributing_authors__ = "Jens Thomas, and Ronan Keegan"
__credits__ = "Daniel Rigden, William Shepard, Charles Ballard, Villi Uski, and Andrey Lebedev"
__date__ = "05 May 2017"
__email__ = "[email protected]"
__version__ = "0.1"
import argparse
import os
import sys
from pyjob.stopwatch import StopWatch
import simbad.command_line
import simbad.exit
import simbad.util
import simbad.util.logging_util
import simbad.util.pyrvapi_results
logger = None
def simbad_argparse():
"""Create the argparse options"""
p = argparse.ArgumentParser(
description="SIMBAD: Sequence Independent Molecular replacement Based on Available Database",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
simbad.command_line._argparse_core_options(p)
simbad.command_line._argparse_job_submission_options(p)
simbad.command_line._argparse_contaminant_options(p)
simbad.command_line._argparse_morda_options(p)
simbad.command_line._argparse_lattice_options(p)
simbad.command_line._argparse_rot_options(p)
simbad.command_line._argparse_mr_options(p)
simbad.command_line._argparse_mtz_options(p)
p.add_argument('mtz', help="The path to the input mtz file")
return p
def main():
"""Main SIMBAD routine"""
args = simbad_argparse().parse_args()
args.work_dir = simbad.command_line.get_work_dir(
args.run_dir, work_dir=args.work_dir, ccp4_jobid=args.ccp4_jobid, ccp4i2_xml=args.ccp4i2_xml
)
log_file = os.path.join(args.work_dir, 'simbad.log')
debug_log_file = os.path.join(args.work_dir, 'debug.log')
global logger
logger = simbad.util.logging_util.setup_logging(args.debug_lvl, logfile=log_file, debugfile=debug_log_file)
if not os.path.isfile(args.amore_exe):
raise OSError("amore executable not found")
gui = simbad.util.pyrvapi_results.SimbadOutput(
args.rvapi_document, args.webserver_uri, args.display_gui, log_file, args.work_dir, ccp4i2_xml=args.ccp4i2_xml, tab_prefix=args.tab_prefix
)
simbad.command_line.print_header()
logger.info("Running in directory: %s\n", args.work_dir)
stopwatch = StopWatch()
stopwatch.start()
end_of_cycle, solution_found, all_results = False, False, {}
while not (solution_found or end_of_cycle):
# =====================================================================================
# Perform the lattice search
solution_found = simbad.command_line._simbad_lattice_search(args)
logger.info("Lattice search completed in %d days, %d hours, %d minutes, and %d seconds",
*stopwatch.lap.time_pretty)
if solution_found and not args.process_all:
logger.info(
"Lucky you! SIMBAD worked its charm and found a lattice match for you.")
continue
elif solution_found and args.process_all:
logger.info(
"SIMBAD thinks it has found a solution however process_all is set, continuing to contaminant search")
else:
logger.info("No results found - lattice search was unsuccessful")
if args.output_pdb and args.output_mtz:
csv = os.path.join(args.work_dir, 'latt/lattice_mr.csv')
all_results['latt'] = simbad.util.result_by_score_from_csv(csv, 'final_r_free', ascending=True)
gui.display_results(False, args.results_to_display)
# =====================================================================================
# Perform the contaminant search
solution_found = simbad.command_line._simbad_contaminant_search(args)
logger.info("Contaminant search completed in %d days, %d hours, %d minutes, and %d seconds",
*stopwatch.lap.time_pretty)
if solution_found and not args.process_all:
logger.info(
"Check you out, crystallizing contaminants! But don't worry, SIMBAD figured it out and found a solution.")
continue
elif solution_found and args.process_all:
logger.info(
"SIMBAD thinks it has found a solution however process_all is set, continuing to morda search")
else:
logger.info(
"No results found - contaminant search was unsuccessful")
if args.output_pdb and args.output_mtz:
csv = os.path.join(args.work_dir, 'cont/cont_mr.csv')
all_results['cont'] = simbad.util.result_by_score_from_csv(csv, 'final_r_free', ascending=True)
gui.display_results(False, args.results_to_display)
# =====================================================================================
# Perform the morda search
solution_found = simbad.command_line._simbad_morda_search(args)
logger.info("Full MoRDa domain search completed in %d days, %d hours, %d minutes, and %d seconds",
*stopwatch.lap.time_pretty)
if solution_found:
logger.info("... and SIMBAD worked once again. Get in!")
continue
else:
logger.info("No results found - full search was unsuccessful")
if args.output_pdb and args.output_mtz:
csv = os.path.join(args.work_dir, 'morda/morda_mr.csv')
all_results['morda'] = simbad.util.result_by_score_from_csv(csv, 'final_r_free', ascending=True)
gui.display_results(False, args.results_to_display)
# =====================================================================================
# Make sure we only run the loop once for now
end_of_cycle = True
if len(all_results) >= 1:
if sys.version_info.major == 3:
sorted_results = sorted(all_results.items(), key=lambda kv: (kv[1], kv))
else:
sorted_results = sorted(all_results.iteritems(), key=lambda kv: (kv[1], kv))
result = sorted_results[0][1]
simbad.util.output_files(args.work_dir, result, args.output_pdb, args.output_mtz)
stopwatch.stop()
logger.info("All processing completed in %d days, %d hours, %d minutes, and %d seconds",
*stopwatch.time_pretty)
gui.display_results(True, args.results_to_display)
if args.rvapi_document:
gui.save_document()
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.NOTSET)
try:
main()
except Exception:
simbad.exit.exit_error(*sys.exc_info())
| rigdenlab/SIMBAD | simbad/command_line/simbad_full.py | Python | bsd-3-clause | 6,513 |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
handler500 = 'myapp.views.this_server_error'
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'helloworld.views.home', name='home'),
(r'^myapp/$', 'myapp.views.index'),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
| sodafree/backend | helloworld/helloworld/urls.py | Python | bsd-3-clause | 585 |
# encoding: utf8
import autoslug.fields
from django.db import migrations
from slugify import slugify
def set_initial_slug(apps, schema_editor):
Character = apps.get_model('characters', 'Character')
for character in Character.objects.all():
character.slug = slugify(character.name)
character.save()
class Migration(migrations.Migration):
dependencies = [
('characters', '0006_character_slug'),
]
operations = [
migrations.RunPython(set_initial_slug),
migrations.AlterField(
model_name='character',
name='slug',
field=autoslug.fields.AutoSlugField(unique=True, editable=False),
),
]
| wengole/eveonline-assistant | eveonline-assistant/characters/migrations/0007_character_slug_populate.py | Python | bsd-3-clause | 695 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Author: Jiajie Zhang
Email: [email protected]
"""
import logging
import os
from math import log
import cv2
import numpy as np
import numpy.linalg as la
from scipy.spatial import distance as spd
import pywt
import Tyf
import tifffile as tiff
class Image(object):
""" Image class
Attributes:
full path
original image, may be uint16 type
fullsize gray image
exif info, Tyf.TiffFile type
image features
"""
def __init__(self, full_path):
super(Image, self).__init__()
self.full_path = full_path
self.dir, self.name = os.path.split(full_path)
self.focal_len = None
self.features = {}
self.tf = None
_, ext = os.path.splitext(full_path)
if ext.lower() in (".tiff", ".tif") and os.path.isfile(full_path):
self.original_image, self.exif_info = ImageProcessing.read_tif_image(full_path)
gray_img = cv2.cvtColor(self.original_image, cv2.COLOR_RGB2GRAY)
self.fullsize_gray_image = ImageProcessing.convert_to_float(gray_img)
else:
self.original_image = None
self.fullsize_gray_image = None
self.exif_info = None
self.reset_all()
def reset_focal_length(self):
f = self.get_exif_value("FocalLength")
if f and len(f) == 2:
self.focal_len = f[0] * 1.0 / f[1]
elif f and len(f) == 1:
self.focal_len = f[0]
else:
self.focal_len = None
def reset_all(self):
self.reset_focal_length()
self.features = {}
self.tf = None
def get_exif_value(self, name):
if not self.exif_info:
return None
info = self.exif_info[0].find(name)
if not info:
return None
else:
return info.value
class DataModel(object):
# Align options
AUTO_MASK = 1
ALIGN_STARS = 2
ALIGN_GROUND = 3
# Display options
ORIGINAL_IMAGE = 1
def __init__(self):
super(DataModel, self).__init__()
self.logger = logging.getLogger(self.__class__.__name__)
self.images = []
self.ref_ind = 0
self.image_dir = None
self.final_sky_img = None # Of type double
self.final_ground_img = None # Of type double
# For concurrency issue
self.is_adding_image = False
# Other GUI options
self.merge_option_type = self.ALIGN_STARS
def add_image(self, path):
self.logger.debug("add_image()")
img_dir, name = os.path.split(path)
if not os.path.exists(path) or not os.path.isfile(path):
self.logger.error("File %s not exists!", path)
return False
for img in self.images:
if path == img.full_path:
self.logger.info("Image is already open. File: %s", path)
return False
if self.is_adding_image:
return False
self.is_adding_image = True
img = Image(path)
focal_len = img.get_exif_value("FocalLength")
self.images.append(img)
self.logger.debug("Loading image %s... Focal length = %s", name, focal_len)
if not self.image_dir:
self.image_dir = img_dir
self.is_adding_image = False
return True
def update_final_sky(self, img):
self.logger.debug("update_final_sky()")
self.final_sky_num += 1
if self.final_sky_img is None and self.final_sky_num == 1:
self.final_sky_img = np.copy(img)
elif self.final_sky_img is not None and self.final_sky_num > 0:
# self.final_sky_img = np.fmax(self.final_sky_img, img)
self.final_sky_img = self.final_sky_img / self.final_sky_num * (self.final_sky_num - 1) + img / self.final_sky_num
def update_final_ground(self, img):
self.logger.debug("update_final_ground()")
self.final_ground_num += 1
if self.final_ground_img is None and self.final_ground_num == 1:
self.final_ground_img = np.copy(img)
elif self.final_ground_img is not None and self.final_ground_num > 0:
self.final_ground_img = self.final_ground_img / self.final_ground_num * (self.final_ground_num - 1) + img / self.final_ground_num
def clear_images(self):
self.logger.debug("clear_images()")
self.images = []
self.reset_final_sky()
self.reset_final_ground()
self.image_dir = None
self.ref_ind = 0
self.is_adding_image = False
def reset_final_sky(self):
self.logger.debug("reset_final_sky()")
self.final_sky_img = None
self.final_sky_num = 0
def reset_final_ground(self):
self.logger.debug("reset_final_ground()")
self.final_ground_img = None
self.final_ground_num = 0
def reset_result(self):
self.logger.debug("reset_result()")
self.reset_final_sky()
self.reset_final_ground()
for img in self.images:
img.features = {}
def has_image(self):
res = len(self.images) > 0
self.logger.debug("has_image(): %s", res)
return res
def iter_images(self):
self.logger.debug("iter_images()")
return iter(self.images)
def total_images(self):
res = len(self.images)
self.logger.debug("total_images(): %s", res)
return res
def has_sky_result(self):
res = self.final_sky_img is not None
self.logger.debug("has_sky_result(): %s", res)
return res
def has_ground_result(self):
res = self.final_ground_img is not None
self.logger.debug("has_ground_result(): %s", res)
return res
class ImageProcessing(object):
def __init__(self):
super(ImageProcessing, self).__init__()
@staticmethod
def _try_wavedec(img_blr, resize_factor=0.25):
img_shape = img_blr.shape
need_resize = abs(resize_factor - 1) > 0.001
level = int(6 - log(1 / resize_factor, 2))
if need_resize:
img_blr_resize = cv2.resize(img_blr, None, fx=resize_factor, fy=resize_factor)
else:
img_blr_resize = img_blr
coeffs = pywt.wavedec2(img_blr_resize, "db8", level=level)
coeffs[0].fill(0)
coeffs[-1][0].fill(0)
coeffs[-1][1].fill(0)
coeffs[-1][2].fill(0)
img_rec_resize = pywt.waverec2(coeffs, "db8")
if need_resize:
img_rec = cv2.resize(img_rec_resize, (img_shape[1], img_shape[0]))
else:
img_rec = img_rec_resize
return img_rec
@staticmethod
def detect_star_points(img_gray, mask=None, resize_length=2200):
logging.debug("detect_star_point()")
logging.debug("resize_length = %s", resize_length)
sigma = 3
img_shape = img_gray.shape
img_blr = cv2.GaussianBlur(img_gray, (9, 9), sigma)
img_blr = (img_blr - np.mean(img_blr)) / (np.max(img_blr) - np.min(img_blr))
resize_factor = 1
while max(img_shape) * resize_factor > resize_length:
resize_factor *= 0.5
logging.debug("calc mask...")
s = int(max(img_shape) * 0.02 * resize_factor * 2)
tmp_mask = cv2.resize(img_gray, None, fx=resize_factor, fy=resize_factor)
tmp_mask = np.logical_and(tmp_mask < np.percentile(tmp_mask, 10), tmp_mask < 0.15).astype(np.uint8) * 255
logging.debug("calc mask logical select done")
tmp_mask = 255 - cv2.dilate(tmp_mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (s, s)))
tmp_mask = cv2.resize(tmp_mask, (img_shape[1], img_shape[0]))
if mask is None:
mask = tmp_mask > 127
else:
mask = np.logical_and(tmp_mask > 127, mask > 0)
logging.debug("calc mask done")
mask_rate = np.sum(mask) * 100.0 / np.prod(mask.shape)
logging.debug("mask rate: %.2f", mask_rate)
if mask_rate < 50:
mask = np.ones(tmp_mask.shape, dtype="bool")
while True:
try:
img_rec = ImageProcessing._try_wavedec(img_blr, resize_factor=resize_factor) * mask
bw = ((img_rec > np.percentile(img_rec[mask], 99.5)) * mask).astype(np.uint8) * 255
# img_rec = ImageProcessing._try_wavedec(img_blr, resize_factor=resize_factor)
# bw = ((img_rec > np.percentile(img_rec, 99.5))).astype(np.uint8) * 255
bw = cv2.morphologyEx(bw, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))
_, contours, _ = cv2.findContours(np.copy(bw), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
contours = filter(lambda x: len(x) > 5, contours)
logging.debug("%d star points detected", len(contours))
if len(contours) > 400:
break
else:
raise ValueError, "No enough points"
except ValueError as e:
if resize_factor >= 1:
raise ValueError, "Cannot detect enough star points"
else:
resize_factor *= 2
logging.debug("resize factor = %f", resize_factor)
elps = map(cv2.fitEllipse, contours)
centroids = np.array(map(lambda e: e[0], elps))
areas = np.array(map(lambda x: cv2.contourArea(x) + 0.5 * len(x), contours))
eccentricities = np.sqrt(np.array(map(lambda x: 1 - (x[1][0] / x[1][1]) ** 2, elps)))
mask = np.zeros(bw.shape, np.uint8)
intensities = np.zeros(areas.shape)
for i in range(len(contours)):
cv2.drawContours(mask, contours[i], 0, 255, -1)
rect = cv2.boundingRect(contours[i])
val = cv2.mean(img_rec[rect[1]:rect[1] + rect[3] + 1, rect[0]:rect[0] + rect[2] + 1],
mask[rect[1]:rect[1] + rect[3] + 1, rect[0]:rect[0] + rect[2] + 1])
mask[rect[1]:rect[1] + rect[3] + 1, rect[0]:rect[0] + rect[2] + 1] = 0
intensities[i] = val[0]
inds = np.logical_and(areas > 10, areas < 200, eccentricities < .9)
inds = np.logical_and(inds, areas > np.percentile(areas, 20), intensities > np.percentile(intensities, 20))
star_pts = centroids[inds] # [x, y]
areas = areas[inds]
intensities = intensities[inds]
return star_pts, areas * intensities
@staticmethod
def convert_to_spherical_coord(star_pts, img_size, f):
logging.debug("convert_coord_img_sph()")
p0 = (star_pts - img_size / 2.0) / (np.max(img_size) / 2)
p = p0 * 18 # Fullframe half size, 18mm
lam = np.arctan2(p[:, 0], f)
phi = np.arcsin(p[:, 1] / np.sqrt(np.sum(p ** 2, axis=1) + f ** 2))
return np.stack((lam, phi), axis=-1)
@staticmethod
def extract_point_features(sph, vol, k=15):
logging.debug("extract_point_features()")
pts_num = len(sph)
vec = np.stack((np.cos(sph[:, 1]) * np.cos(sph[:, 0]),
np.cos(sph[:, 1]) * np.sin(sph[:, 0]),
np.sin(sph[:, 1])), axis=-1)
dist_mat = 1 - spd.cdist(vec, vec, "cosine")
vec_dist_ind = np.argsort(-dist_mat)
dist_mat = np.where(dist_mat < -1, -1, np.where(dist_mat > 1, 1, dist_mat))
dist_mat = np.arccos(dist_mat[np.array(range(pts_num))[:, np.newaxis], vec_dist_ind[:, :2 * k]])
vol = vol[vec_dist_ind[:, :2 * k]]
vol_ind = np.argsort(-vol * dist_mat)
def make_cross_mat(v):
return np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
theta_feature = np.zeros((pts_num, k))
rho_feature = np.zeros((pts_num, k))
vol_feature = np.zeros((pts_num, k))
for i in range(pts_num):
v0 = vec[i]
vs = vec[vec_dist_ind[i, vol_ind[i, :k]]]
angles = np.inner(vs, make_cross_mat(v0))
angles = angles / la.norm(angles, axis=1)[:, np.newaxis]
cr = np.inner(angles, make_cross_mat(angles[0]))
s = la.norm(cr, axis=1) * np.sign(np.inner(cr, v0))
c = np.inner(angles, angles[0])
theta_feature[i] = np.arctan2(s, c)
rho_feature[i] = dist_mat[i, vol_ind[i, :k]]
vol_feature[i] = vol[i, vol_ind[i, :k]]
fx = np.arange(-np.pi, np.pi, 3 * np.pi / 180)
features = np.zeros((pts_num, len(fx)))
for i in range(k):
sigma = 2.5 * np.exp(-rho_feature[:, i] * 100) + .04
tmp = np.exp(-np.subtract.outer(theta_feature[:, i], fx) ** 2 / 2 / sigma[:, np.newaxis] ** 2)
tmp = tmp * (vol_feature[:, i] * rho_feature[:, i] ** 2 / sigma)[:, np.newaxis]
features += tmp
features = features / np.sqrt(np.sum(features ** 2, axis=1)).reshape((pts_num, 1))
return features
@staticmethod
def find_initial_match(feature1, feature2):
logging.debug("find_initial_match()")
measure_dist_mat = spd.cdist(feature1["feature"], feature2["feature"], "cosine")
pts1, pts2 = feature1["pts"], feature2["pts"]
pts_mean = np.mean(np.vstack((pts1, pts2)), axis=0)
pts_min = np.min(np.vstack((pts1, pts2)), axis=0)
pts_max = np.max(np.vstack((pts1, pts2)), axis=0)
pts_dist_mat = spd.cdist((pts1 - pts_mean) / (pts_max - pts_min), (pts2 - pts_mean) / (pts_max - pts_min),
"euclidean")
alpha = 0.00
dist_mat = measure_dist_mat * (1 - alpha) + pts_dist_mat * alpha
num1, num2 = dist_mat.shape
# For a given point p1 in image1, find the most similar point p12 in image2,
# then find the point p21 in image1 that most similar to p12, check the
# distance between p1 and p21.
idx12 = np.argsort(dist_mat, axis=1)
idx21 = np.argsort(dist_mat, axis=0)
ind = idx21[0, idx12[:, 0]] == range(num1)
# Check Euclidean distance between the nearest pair
d_th = min(np.percentile(dist_mat[range(num1), idx12[:, 0]], 30),
np.percentile(dist_mat[idx21[0, :], range(num2)], 30))
ind = np.logical_and(ind, dist_mat[range(num1), idx12[:, 0]] < d_th)
pair_idx = np.stack((np.where(ind)[0], idx12[ind, 0]), axis=-1)
# Check angular distance between the nearest pair
xyz1 = np.stack((np.cos(feature1["sph"][:, 1]) * np.cos(feature1["sph"][:, 0]),
np.cos(feature1["sph"][:, 1]) * np.sin(feature1["sph"][:, 0]),
np.sin(feature1["sph"][:, 1])), axis=-1)
xyz2 = np.stack((np.cos(feature2["sph"][:, 1]) * np.cos(feature2["sph"][:, 0]),
np.cos(feature2["sph"][:, 1]) * np.sin(feature2["sph"][:, 0]),
np.sin(feature2["sph"][:, 1])), axis=-1)
theta = np.arccos(np.sum(xyz1[pair_idx[:, 0]] * xyz2[pair_idx[:, 1]], axis=1))
theta_th = min(np.percentile(theta, 75), np.pi / 6)
pts_dist = la.norm(feature1["pts"][pair_idx[:, 0]] - feature2["pts"][pair_idx[:, 1]], axis=1)
dist_th = max(np.max(feature1["pts"]), np.max(feature2["pts"])) * 0.3
pair_idx = pair_idx[np.logical_and(theta < theta_th, pts_dist < dist_th)]
logging.debug("find {0} pairs for initial".format(len(pair_idx)))
return pair_idx
@staticmethod
def fine_tune_transform(feature1, feature2, init_pair_idx):
ind = []
k = 1
while len(ind) < 0.6 * min(len(feature1["pts"]), len(feature2["pts"])) and k < 10:
# Step 1. Randomly choose 20 points evenly distributed on the image
rand_pts = np.random.rand(20, 2) * (np.amax(feature1["pts"], axis=0) - np.amin(feature1["pts"], axis=0)) * \
np.array([1, 0.8]) + np.amin(feature1["pts"], axis=0)
# Step 2. Find nearest points from feature1
dist_mat = spd.cdist(rand_pts, feature1["pts"][init_pair_idx[:, 0]])
tmp_ind = np.argmin(dist_mat, axis=1)
# Step 3. Use these points to find a homography
tf = cv2.findHomography(feature1["pts"][init_pair_idx[tmp_ind, 0]], feature2["pts"][init_pair_idx[tmp_ind, 1]],
method=cv2.RANSAC, ransacReprojThreshold=5)
# Then use the transform find more matched points
pts12 = cv2.perspectiveTransform(np.array([[p] for p in feature1["pts"]], dtype="float32"), tf[0])[:, 0, :]
dist_mat = spd.cdist(pts12, feature2["pts"])
num1, num2 = dist_mat.shape
idx12 = np.argsort(dist_mat, axis=1)
tmp_ind = np.argwhere(np.array([dist_mat[i, idx12[i, 0]] for i in range(num1)]) < 5)
if len(tmp_ind) > len(ind):
ind = tmp_ind
logging.debug("len(ind) = %d, len(feature) = %d", len(ind), min(len(feature1["pts"]), len(feature2["pts"])))
k += 1
pair_idx = np.hstack((ind, idx12[ind, 0]))
tf = cv2.findHomography(feature1["pts"][pair_idx[:, 0]], feature2["pts"][pair_idx[:, 1]],
method=cv2.RANSAC, ransacReprojThreshold=5)
return tf, pair_idx
@staticmethod
def convert_to_float(np_image):
if np_image.dtype == np.float32 or np_image.dtype == np.float64:
return np.copy(np_image)
else:
return np_image.astype("float32") / np.iinfo(np_image.dtype).max
@staticmethod
def read_tif_image(full_path):
img = tiff.imread(full_path)
exif_info = Tyf.open(full_path)
return img, exif_info
@staticmethod
def save_tif_image(full_path, img, exif=None):
if img.dtype != np.uint8 and img.dtype != np.uint16:
return
logging.debug("saving image...")
tiff.imsave(full_path, img)
tmp_exif = Tyf.open(full_path)
tmp_exif.load_raster()
if exif and isinstance(exif, Tyf.TiffFile):
logging.debug("saving exif...")
exif[0].stripes = tmp_exif[0].stripes
exif.save(full_path)
if __name__ == "__main__":
logging_level = logging.DEBUG
logging_format = "%(asctime)s (%(name)s) [%(levelname)s] line %(lineno)d: %(message)s"
logging.basicConfig(format=logging_format, level=logging_level)
data_model = DataModel()
img_tmpl = u"/Volumes/ZJJ-4TB/Photos/17.08.21 Eclipse Trip/6DII/IMG_{:04d}_0.tif"
for p in [img_tmpl.format(i) for i in (79, 80, 81, 82)]:
logging.debug("image: %s", p)
data_model.add_image(p)
ref_img = data_model.images[0]
f = ref_img.focal_len
img_shape = ref_img.fullsize_gray_image.shape
img_size = np.array([img_shape[1], img_shape[0]])
data_model.reset_result()
pts, vol = ImageProcessing.detect_star_points(ref_img.fullsize_gray_image)
sph = ImageProcessing.convert_to_spherical_coord(pts, np.array((img_shape[1], img_shape[0])), f)
feature = ImageProcessing.extract_point_features(sph, vol)
ref_img.features["pts"] = pts
ref_img.features["sph"] = sph
ref_img.features["vol"] = vol
ref_img.features["feature"] = feature
data_model.final_sky_img = np.copy(ref_img.original_image).astype("float32") / np.iinfo(
ref_img.original_image.dtype).max
img = data_model.images[1]
pts, vol = ImageProcessing.detect_star_points(img.fullsize_gray_image)
sph = ImageProcessing.convert_to_spherical_coord(pts, img_size, f)
feature = ImageProcessing.extract_point_features(sph, vol)
img.features["pts"] = pts
img.features["sph"] = sph
img.features["vol"] = vol
img.features["feature"] = feature
pair_idx = ImageProcessing.find_initial_match(img.features, ref_img.features)
tf, pair_idx = ImageProcessing.fine_tune_transform(img.features, ref_img.features, pair_idx)
img_tf = cv2.warpPerspective(img.original_image, tf[0], tuple(img_size))
img_tf = img_tf.astype("float32") / np.iinfo(img_tf.dtype).max
data_model.final_sky_img = data_model.final_sky_img / 2 + img_tf / 2
result_img = (data_model.final_sky_img * np.iinfo("uint16").max).astype("uint16")
ImageProcessing.save_tif_image("test.tif", result_img, data_model.images[0].exif_info)
| LoveDaisy/star_alignment | python/DataModel.py | Python | bsd-3-clause | 20,257 |
'''
A WSGI Middleware is a function or callable object similar to a
:ref:`WSGI application handlers <wsgi-handlers>`
with the only difference that it can return nothing (``None``).
Middleware can be used in conjunction with a
:ref:`WsgiHandler <wsgi-handler>` or any
other handler which iterate through a list of middleware in a similar
way (for example django wsgi handler).
.. important::
An asynchronous WSGI middleware is a callable accepting a WSGI
``environ`` and ``start_response`` as the only input parameters and
it must returns an :ref:`asynchronous iterator <wsgi-async-iter>`
or nothing.
The two most important wsgi middleware in pulsar are:
* the :ref:`Router <wsgi-router>` for serving dynamic web applications
* the :ref:`MediaRouter <wsgi-media-router>` for serving static files
In addition, pulsar provides with the following four middlewares which don't
serve requests, instead they perform initialisation and sanity checks.
.. _wsgi-additional-middleware:
Clean path
~~~~~~~~~~~~~~~~~~
.. autofunction:: clean_path_middleware
Authorization
~~~~~~~~~~~~~~~~~~
.. autofunction:: authorization_middleware
.. _wait-for-body-middleware:
Wait for request body
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autofunction:: wait_for_body_middleware
Middleware in Executor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autofunction:: middleware_in_executor
'''
import re
from io import BytesIO
from functools import wraps
from asyncio import get_event_loop
from pulsar.api import HttpRedirect
from .auth import parse_authorization_header
def clean_path_middleware(environ, start_response=None):
'''Clean url from double slashes and redirect if needed.'''
path = environ['PATH_INFO']
if path and '//' in path:
url = re.sub("/+", '/', path)
if not url.startswith('/'):
url = '/%s' % url
qs = environ['QUERY_STRING']
if qs:
url = '%s?%s' % (url, qs)
raise HttpRedirect(url)
def authorization_middleware(environ, start_response=None):
'''Parse the ``HTTP_AUTHORIZATION`` key in the ``environ``.
If available, set the ``http.authorization`` key in ``environ`` with
the result obtained from :func:`~.parse_authorization_header` function.
'''
key = 'http.authorization'
c = environ.get(key)
if c is None:
code = 'HTTP_AUTHORIZATION'
if code in environ:
environ[key] = parse_authorization_header(environ[code])
async def wait_for_body_middleware(environ, start_response=None):
'''Use this middleware to wait for the full body.
This middleware wait for the full body to be received before letting
other middleware to be processed.
Useful when using synchronous web-frameworks such as :django:`django <>`.
'''
if environ.get('wsgi.async'):
try:
chunk = await environ['wsgi.input'].read()
except TypeError:
chunk = b''
environ['wsgi.input'] = BytesIO(chunk)
environ.pop('wsgi.async')
def middleware_in_executor(middleware):
'''Use this middleware to run a synchronous middleware in the event loop
executor.
Useful when using synchronous web-frameworks such as :django:`django <>`.
'''
@wraps(middleware)
def _(environ, start_response):
loop = get_event_loop()
return loop.run_in_executor(None, middleware, environ, start_response)
return _
| quantmind/pulsar | pulsar/apps/wsgi/middleware.py | Python | bsd-3-clause | 3,415 |
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.template.context_processors import csrf
from django.http import HttpResponse
import cStringIO
from data.models import RadiosondeMeasurement, WeatherMeasurement, MeasuringDevice
from forms import WeatherMeasurementForm, RadiosondeMeasurementForm
import numpy as np
import scipy.io
import xlsxwriter
@login_required
def index(request):
"""
Main view for the page, handles the forms.
"""
# Fetch POST data and redirects to the corresponding view if a button has been clicked.
if request.POST:
form_values = request.POST
if 'matlab-weather' in request.POST:
form_weather = WeatherMeasurementForm(form_values)
request.session['POSTweather'] = form_values
if form_weather.is_valid():
return matlab_weather(request, form_weather)
if 'matlab-radiosonde' in request.POST:
form_radiosonde = RadiosondeMeasurementForm(form_values)
request.session['POSTradiosonde'] = form_values
if form_radiosonde.is_valid():
return matlab_radiosonde(request, form_radiosonde)
if 'excel-weather' in request.POST:
form_weather = WeatherMeasurementForm(form_values)
request.session['POSTweather'] = form_values
if form_weather.is_valid():
return excel_weather(request, form_weather)
if 'excel-radiosonde' in request.POST:
form_radiosonde = RadiosondeMeasurementForm(form_values)
request.session['POSTradiosonde'] = form_values
if form_radiosonde.is_valid():
return excel_radiosonde(request, form_radiosonde)
# Loads or initializes the weather data form
if 'POSTweather' in request.session:
form_weather = WeatherMeasurementForm(request.session['POSTweather'])
else:
form_weather = WeatherMeasurementForm()
# Loads or initializes the radionsonde data form
if 'POSTradiosonde' in request.session:
form_radiosonde = RadiosondeMeasurementForm(request.session['POSTradiosonde'])
else:
form_radiosonde = RadiosondeMeasurementForm()
args = {}
args.update(csrf((request)))
args['form_weather'] = form_weather
args['form_radiosonde'] = form_radiosonde
# Indicates if the radionsonde data form should be displayed
if MeasuringDevice.objects.filter(type = 'R').exists() & RadiosondeMeasurement.objects.exists():
args['radiosonde_data_available'] = True
else:
args['radiosonde_data_available'] = False
# Indicates if the weather data form should be displayed
if MeasuringDevice.objects.filter(type = 'S').exists() & WeatherMeasurement.objects.exists():
args['weather_data_available'] = True
else:
args['weather_data_available'] = False
args['title'] = 'Data downloads'
return render(request, 'downloads/index.html', args)
@login_required
def matlab_radiosonde(request, form):
"""
Reads the radiosonde form and converts the data into a matlab file
"""
start = form.cleaned_data['start_date_radiosonde']
end = form.cleaned_data['end_date_radiosonde']
time = form.cleaned_data['time_radiosonde']
fields = form.cleaned_data['fields_radiosonde']
query = RadiosondeMeasurement.objects.filter(date__gte = start, date__lte = end, time__in = time).values()
radiosonde = dict()
for elem in query:
date = elem['date'].strftime('y%Ym%md%d')
if date not in radiosonde:
radiosonde[date] = dict()
if elem['time'] not in radiosonde[date]:
radiosonde[date][str(elem['time'])] = []
radiosonde[date][elem['time']].append(elem)
dtfields = []
for f in fields:
dtfields.append((str(f), 'f8'))
for d in radiosonde:
for t in radiosonde[d]:
nbElems = len(radiosonde[d][t])
res = np.zeros((nbElems,), dtype=dtfields)
idx = 0
for elem in radiosonde[d][t]:
for f in fields:
res[idx][str(f)] = elem[str(f)]
idx = idx + 1
radiosonde[d][t] = res
for d in radiosonde:
if 'AM' in radiosonde[d] and 'PM' in radiosonde[d]:
dtAMPM = [('AM', np.object), ('PM', np.object)]
res = np.zeros((1,), dtype=dtAMPM)
res[0]['AM'] = radiosonde[d]['AM']
res[0]['PM'] = radiosonde[d]['PM']
radiosonde[d] = res
elif 'AM' in radiosonde[d]:
dtAM = [('AM', np.object)]
res = np.zeros((1,), dtype=dtAM)
res[0]['AM'] = radiosonde[d]['AM']
radiosonde[d] = res
elif 'PM' in radiosonde[d]:
dtAM = [('PM', np.object)]
res = np.zeros((1,), dtype=dtAM)
res[0]['PM'] = radiosonde[d]['PM']
radiosonde[d] = res
dtdays = []
for d in radiosonde:
dtdays.append((d, np.object))
dtdays.sort()
result = np.zeros((1,), dtype=dtdays)
for d in radiosonde:
result[0][d] = radiosonde[d]
fobj = cStringIO.StringIO()
response = HttpResponse(content_type='application/matlab-mat')
response['Content-Disposition'] = 'attachment; filename=radiosonde.mat'
scipy.io.savemat(fobj, {'radiosonde': result}, oned_as='column')
response.write(fobj.getvalue())
return response
@login_required
def matlab_weather(request, form):
"""
Reads the weather form and converts the data into a matlab file
"""
start_date = form.cleaned_data['start_date_weather']
end_date = form.cleaned_data['end_date_weather']
start_time = form.cleaned_data['start_time_weather']
end_time = form.cleaned_data['end_time_weather']
measuring_device = MeasuringDevice.objects.get(id = form.cleaned_data['measuring_device_weather'])
fields = form.cleaned_data['fields_weather']
query = WeatherMeasurement.objects.filter(date__gte = start_date, date__lte = end_date, time__gte = start_time, time__lte = end_time, device = measuring_device).values()
weather = dict()
for elem in query:
date = elem['date'].strftime('y%Ym%md%d')
time = elem['time'].strftime('h%Hm%Ms%S')
if date not in weather:
weather[date] = dict()
if elem['time'] not in weather[date]:
weather[date][time] = []
weather[date][time].append(elem)
dtfields = []
for f in fields:
dtfields.append((str(f), 'f8'))
for d in weather:
for t in weather[d]:
nbElems = len(weather[d][t])
res = np.zeros((nbElems,), dtype=dtfields)
idx = 0
for elem in weather[d][t]:
for f in fields:
res[idx][str(f)] = elem[str(f)]
idx = idx + 1
weather[d][t] = res
for d in weather:
dttime = []
for t in weather[d]:
dttime.append((t, np.object))
dttime.sort()
resultTime = np.zeros((1,), dtype=dttime)
for t in weather[d]:
resultTime[0][t] = weather[d][t]
weather[d] = resultTime
dtdays = []
for d in weather:
dtdays.append((d, np.object))
dtdays.sort()
result = np.zeros((1,), dtype=dtdays)
for d in weather:
result[0][d] = weather[d]
fobj = cStringIO.StringIO()
response = HttpResponse(content_type='application/matlab-mat')
response['Content-Disposition'] = 'attachment; filename=weather.mat'
scipy.io.savemat(fobj, {'weather': result}, oned_as='column')
response.write(fobj.getvalue())
return response
@login_required
def excel_radiosonde(request, form):
"""
Reads the radiosonde form and converts the data into a excel file
"""
start = form.cleaned_data['start_date_radiosonde']
end = form.cleaned_data['end_date_radiosonde']
time = form.cleaned_data['time_radiosonde']
fields = form.cleaned_data['fields_radiosonde']
query = RadiosondeMeasurement.objects.filter(date__gte = start, date__lte = end, time__in = time).order_by('date').values()
fobj = cStringIO.StringIO()
response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=radiosonde.xlsx'
workbook = xlsxwriter.Workbook(fobj)
worksheet = workbook.add_worksheet()
# Adjust the column width.
worksheet.set_column(0, 0, 10)
# Adjust the column width.
worksheet.set_column(1, 1, 5)
bold = workbook.add_format({'bold': 1})
date_format = workbook.add_format({'num_format': 'dd mm yyyy'})
worksheet.write(0,0, 'Date', bold)
worksheet.write(0,1, 'Time', bold)
col = 2
if 'pressure' in fields:
worksheet.write(0, col, 'Atmospheric pressure (hPa)', bold)
col = col + 1
if 'height' in fields:
worksheet.write(0, col, 'Geopotential height (m)', bold)
col = col + 1
if 'temperature' in fields:
worksheet.write(0, col, 'Temperature (C)', bold)
col = col + 1
if 'dew_point' in fields:
worksheet.write(0, col, 'Dewpoint temperature (C)', bold)
col = col + 1
if 'rel_humidity' in fields:
worksheet.write(0, col, 'Relative humidity (%)', bold)
col = col + 1
if 'wind_direction' in fields:
worksheet.write(0, col, 'Wind direction (deg)', bold)
col = col + 1
if 'wind_speed' in fields:
worksheet.write(0, col, 'Wind speed (m/s)', bold)
col = col + 1
for row, elem in enumerate(query, start = 1):
worksheet.write_datetime(row, 0, elem['date'], date_format)
worksheet.write_string(row, 1, elem['time'])
for col, f in enumerate(fields, start = 2):
worksheet.write(row, col, elem[f])
col = 2
workbook.close()
response.write(fobj.getvalue())
return response
@login_required
def excel_weather(request, form):
"""
Reads the weather form and converts the data into a excel file
"""
start_date = form.cleaned_data['start_date_weather']
end_date = form.cleaned_data['end_date_weather']
start_time = form.cleaned_data['start_time_weather']
end_time = form.cleaned_data['end_time_weather']
measuring_device = MeasuringDevice.objects.get(id = form.cleaned_data['measuring_device_weather'])
fields = form.cleaned_data['fields_weather']
query = WeatherMeasurement.objects.filter(date__gte = start_date, date__lte = end_date, time__gte = start_time, time__lte = end_time, device = measuring_device).values()
fobj = cStringIO.StringIO()
response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=radiosonde.xlsx'
workbook = xlsxwriter.Workbook(fobj)
worksheet = workbook.add_worksheet()
# Adjust the column widths.
worksheet.set_column(0, 0, 10)
worksheet.set_column(1, 1, 5)
bold = workbook.add_format({'bold': 1})
date_format = workbook.add_format({'num_format': 'dd/mm/yyyy'})
time_format = workbook.add_format({'num_format': 'hh:mm'})
worksheet.write(0,0, 'Date', bold)
worksheet.write(0,1, 'Time', bold)
texts = {'temperature':'Temperature (C)',
'humidity':'Humidity (%)',
'dew_point':'Dew point (C)',
'wind_speed':'Wind speed (m/s)',
'wind_direction':'Wind direction (deg)',
'pressure':'Pressure (hPa)',
'rainfall_rate':'Rainfall rate (mm/hr)',
'solar_radiation':'Solar radiation (W/m2)',
'uv_index':'UV Index'}
for col, f in enumerate(fields, start = 2):
worksheet.write(0, col, texts[f])
for row, elem in enumerate(query, start = 1):
worksheet.write_datetime(row, 0, elem['date'], date_format)
worksheet.write_datetime(row, 1, elem['time'], time_format)
for col, f in enumerate(fields, start = 2):
worksheet.write(row, col, elem[f])
workbook.close()
response.write(fobj.getvalue())
return response
| FSavoy/visuo-server | downloads/views.py | Python | bsd-3-clause | 12,500 |
""" test with the .transform """
from io import StringIO
import numpy as np
import pytest
from pandas.core.dtypes.common import ensure_platform_int, is_timedelta64_dtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
)
import pandas._testing as tm
from pandas.core.groupby.groupby import DataError
def assert_fp_equal(a, b):
assert (np.abs(a - b) < 1e-12).all()
def test_transform():
data = Series(np.arange(9) // 3, index=np.arange(9))
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
transformed = grouped.transform(lambda x: x * x.sum())
assert transformed[7] == 12
# GH 8046
# make sure that we preserve the input order
df = DataFrame(
np.arange(6, dtype="int64").reshape(3, 2), columns=["a", "b"], index=[0, 2, 1]
)
key = [0, 0, 1]
expected = (
df.sort_index()
.groupby(key)
.transform(lambda x: x - x.mean())
.groupby(key)
.mean()
)
result = df.groupby(key).transform(lambda x: x - x.mean()).groupby(key).mean()
tm.assert_frame_equal(result, expected)
def demean(arr):
return arr - arr.mean()
people = DataFrame(
np.random.randn(5, 5),
columns=["a", "b", "c", "d", "e"],
index=["Joe", "Steve", "Wes", "Jim", "Travis"],
)
key = ["one", "two", "one", "two", "one"]
result = people.groupby(key).transform(demean).groupby(key).mean()
expected = people.groupby(key).apply(demean).groupby(key).mean()
tm.assert_frame_equal(result, expected)
# GH 8430
df = tm.makeTimeDataFrame()
g = df.groupby(pd.Grouper(freq="M"))
g.transform(lambda x: x - 1)
# GH 9700
df = DataFrame({"a": range(5, 10), "b": range(5)})
result = df.groupby("a").transform(max)
expected = DataFrame({"b": range(5)})
tm.assert_frame_equal(result, expected)
def test_transform_fast():
df = DataFrame({"id": np.arange(100000) / 3, "val": np.random.randn(100000)})
grp = df.groupby("id")["val"]
values = np.repeat(grp.mean().values, ensure_platform_int(grp.count().values))
expected = Series(values, index=df.index, name="val")
result = grp.transform(np.mean)
tm.assert_series_equal(result, expected)
result = grp.transform("mean")
tm.assert_series_equal(result, expected)
# GH 12737
df = DataFrame(
{
"grouping": [0, 1, 1, 3],
"f": [1.1, 2.1, 3.1, 4.5],
"d": pd.date_range("2014-1-1", "2014-1-4"),
"i": [1, 2, 3, 4],
},
columns=["grouping", "f", "i", "d"],
)
result = df.groupby("grouping").transform("first")
dates = [
Timestamp("2014-1-1"),
Timestamp("2014-1-2"),
Timestamp("2014-1-2"),
Timestamp("2014-1-4"),
]
expected = DataFrame(
{"f": [1.1, 2.1, 2.1, 4.5], "d": dates, "i": [1, 2, 2, 4]},
columns=["f", "i", "d"],
)
tm.assert_frame_equal(result, expected)
# selection
result = df.groupby("grouping")[["f", "i"]].transform("first")
expected = expected[["f", "i"]]
tm.assert_frame_equal(result, expected)
# dup columns
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["g", "a", "a"])
result = df.groupby("g").transform("first")
expected = df.drop("g", axis=1)
tm.assert_frame_equal(result, expected)
def test_transform_broadcast(tsframe, ts):
grouped = ts.groupby(lambda x: x.month)
result = grouped.transform(np.mean)
tm.assert_index_equal(result.index, ts.index)
for _, gp in grouped:
assert_fp_equal(result.reindex(gp.index), gp.mean())
grouped = tsframe.groupby(lambda x: x.month)
result = grouped.transform(np.mean)
tm.assert_index_equal(result.index, tsframe.index)
for _, gp in grouped:
agged = gp.mean()
res = result.reindex(gp.index)
for col in tsframe:
assert_fp_equal(res[col], agged[col])
# group columns
grouped = tsframe.groupby({"A": 0, "B": 0, "C": 1, "D": 1}, axis=1)
result = grouped.transform(np.mean)
tm.assert_index_equal(result.index, tsframe.index)
tm.assert_index_equal(result.columns, tsframe.columns)
for _, gp in grouped:
agged = gp.mean(1)
res = result.reindex(columns=gp.columns)
for idx in gp.index:
assert_fp_equal(res.xs(idx), agged[idx])
def test_transform_axis_1(request, transformation_func):
# GH 36308
if transformation_func == "tshift":
request.node.add_marker(pytest.mark.xfail(reason="tshift is deprecated"))
args = ("ffill",) if transformation_func == "fillna" else ()
df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}, index=["x", "y"])
result = df.groupby([0, 0, 1], axis=1).transform(transformation_func, *args)
expected = df.T.groupby([0, 0, 1]).transform(transformation_func, *args).T
if transformation_func == "diff":
# Result contains nans, so transpose coerces to float
expected["b"] = expected["b"].astype("int64")
# cumcount returns Series; the rest are DataFrame
tm.assert_equal(result, expected)
def test_transform_axis_ts(tsframe):
# make sure that we are setting the axes
# correctly when on axis=0 or 1
# in the presence of a non-monotonic indexer
# GH12713
base = tsframe.iloc[0:5]
r = len(base.index)
c = len(base.columns)
tso = DataFrame(
np.random.randn(r, c), index=base.index, columns=base.columns, dtype="float64"
)
# monotonic
ts = tso
grouped = ts.groupby(lambda x: x.weekday())
result = ts - grouped.transform("mean")
expected = grouped.apply(lambda x: x - x.mean())
tm.assert_frame_equal(result, expected)
ts = ts.T
grouped = ts.groupby(lambda x: x.weekday(), axis=1)
result = ts - grouped.transform("mean")
expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
tm.assert_frame_equal(result, expected)
# non-monotonic
ts = tso.iloc[[1, 0] + list(range(2, len(base)))]
grouped = ts.groupby(lambda x: x.weekday())
result = ts - grouped.transform("mean")
expected = grouped.apply(lambda x: x - x.mean())
tm.assert_frame_equal(result, expected)
ts = ts.T
grouped = ts.groupby(lambda x: x.weekday(), axis=1)
result = ts - grouped.transform("mean")
expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
tm.assert_frame_equal(result, expected)
def test_transform_dtype():
# GH 9807
# Check transform dtype output is preserved
df = DataFrame([[1, 3], [2, 3]])
result = df.groupby(1).transform("mean")
expected = DataFrame([[1.5], [1.5]])
tm.assert_frame_equal(result, expected)
def test_transform_bug():
# GH 5712
# transforming on a datetime column
df = DataFrame({"A": Timestamp("20130101"), "B": np.arange(5)})
result = df.groupby("A")["B"].transform(lambda x: x.rank(ascending=False))
expected = Series(np.arange(5, 0, step=-1), name="B")
tm.assert_series_equal(result, expected)
def test_transform_numeric_to_boolean():
# GH 16875
# inconsistency in transforming boolean values
expected = Series([True, True], name="A")
df = DataFrame({"A": [1.1, 2.2], "B": [1, 2]})
result = df.groupby("B").A.transform(lambda x: True)
tm.assert_series_equal(result, expected)
df = DataFrame({"A": [1, 2], "B": [1, 2]})
result = df.groupby("B").A.transform(lambda x: True)
tm.assert_series_equal(result, expected)
def test_transform_datetime_to_timedelta():
# GH 15429
# transforming a datetime to timedelta
df = DataFrame({"A": Timestamp("20130101"), "B": np.arange(5)})
expected = Series([Timestamp("20130101") - Timestamp("20130101")] * 5, name="A")
# this does date math without changing result type in transform
base_time = df["A"][0]
result = (
df.groupby("A")["A"].transform(lambda x: x.max() - x.min() + base_time)
- base_time
)
tm.assert_series_equal(result, expected)
# this does date math and causes the transform to return timedelta
result = df.groupby("A")["A"].transform(lambda x: x.max() - x.min())
tm.assert_series_equal(result, expected)
def test_transform_datetime_to_numeric():
# GH 10972
# convert dt to float
df = DataFrame({"a": 1, "b": date_range("2015-01-01", periods=2, freq="D")})
result = df.groupby("a").b.transform(
lambda x: x.dt.dayofweek - x.dt.dayofweek.mean()
)
expected = Series([-0.5, 0.5], name="b")
tm.assert_series_equal(result, expected)
# convert dt to int
df = DataFrame({"a": 1, "b": date_range("2015-01-01", periods=2, freq="D")})
result = df.groupby("a").b.transform(
lambda x: x.dt.dayofweek - x.dt.dayofweek.min()
)
expected = Series([0, 1], name="b")
tm.assert_series_equal(result, expected)
def test_transform_casting():
# 13046
data = """
idx A ID3 DATETIME
0 B-028 b76cd912ff "2014-10-08 13:43:27"
1 B-054 4a57ed0b02 "2014-10-08 14:26:19"
2 B-076 1a682034f8 "2014-10-08 14:29:01"
3 B-023 b76cd912ff "2014-10-08 18:39:34"
4 B-023 f88g8d7sds "2014-10-08 18:40:18"
5 B-033 b76cd912ff "2014-10-08 18:44:30"
6 B-032 b76cd912ff "2014-10-08 18:46:00"
7 B-037 b76cd912ff "2014-10-08 18:52:15"
8 B-046 db959faf02 "2014-10-08 18:59:59"
9 B-053 b76cd912ff "2014-10-08 19:17:48"
10 B-065 b76cd912ff "2014-10-08 19:21:38"
"""
df = pd.read_csv(
StringIO(data), sep=r"\s+", index_col=[0], parse_dates=["DATETIME"]
)
result = df.groupby("ID3")["DATETIME"].transform(lambda x: x.diff())
assert is_timedelta64_dtype(result.dtype)
result = df[["ID3", "DATETIME"]].groupby("ID3").transform(lambda x: x.diff())
assert is_timedelta64_dtype(result.DATETIME.dtype)
def test_transform_multiple(ts):
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
grouped.transform(lambda x: x * 2)
grouped.transform(np.mean)
def test_dispatch_transform(tsframe):
df = tsframe[::5].reindex(tsframe.index)
grouped = df.groupby(lambda x: x.month)
filled = grouped.fillna(method="pad")
fillit = lambda x: x.fillna(method="pad")
expected = df.groupby(lambda x: x.month).transform(fillit)
tm.assert_frame_equal(filled, expected)
def test_transform_transformation_func(request, transformation_func):
# GH 30918
df = DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "bar", "bar", "baz"],
"B": [1, 2, np.nan, 3, 3, np.nan, 4],
},
index=pd.date_range("2020-01-01", "2020-01-07"),
)
if transformation_func == "cumcount":
test_op = lambda x: x.transform("cumcount")
mock_op = lambda x: Series(range(len(x)), x.index)
elif transformation_func == "fillna":
test_op = lambda x: x.transform("fillna", value=0)
mock_op = lambda x: x.fillna(value=0)
elif transformation_func == "tshift":
msg = (
"Current behavior of groupby.tshift is inconsistent with other "
"transformations. See GH34452 for more details"
)
request.node.add_marker(pytest.mark.xfail(reason=msg))
else:
test_op = lambda x: x.transform(transformation_func)
mock_op = lambda x: getattr(x, transformation_func)()
result = test_op(df.groupby("A"))
groups = [df[["B"]].iloc[:4], df[["B"]].iloc[4:6], df[["B"]].iloc[6:]]
expected = concat([mock_op(g) for g in groups])
if transformation_func == "cumcount":
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
def test_transform_select_columns(df):
f = lambda x: x.mean()
result = df.groupby("A")[["C", "D"]].transform(f)
selection = df[["C", "D"]]
expected = selection.groupby(df["A"]).transform(f)
tm.assert_frame_equal(result, expected)
def test_transform_exclude_nuisance(df):
# this also tests orderings in transform between
# series/frame to make sure it's consistent
expected = {}
grouped = df.groupby("A")
expected["C"] = grouped["C"].transform(np.mean)
expected["D"] = grouped["D"].transform(np.mean)
expected = DataFrame(expected)
result = df.groupby("A").transform(np.mean)
tm.assert_frame_equal(result, expected)
def test_transform_function_aliases(df):
result = df.groupby("A").transform("mean")
expected = df.groupby("A").transform(np.mean)
tm.assert_frame_equal(result, expected)
result = df.groupby("A")["C"].transform("mean")
expected = df.groupby("A")["C"].transform(np.mean)
tm.assert_series_equal(result, expected)
def test_series_fast_transform_date():
# GH 13191
df = DataFrame(
{"grouping": [np.nan, 1, 1, 3], "d": pd.date_range("2014-1-1", "2014-1-4")}
)
result = df.groupby("grouping")["d"].transform("first")
dates = [
pd.NaT,
Timestamp("2014-1-2"),
Timestamp("2014-1-2"),
Timestamp("2014-1-4"),
]
expected = Series(dates, name="d")
tm.assert_series_equal(result, expected)
def test_transform_length():
# GH 9697
df = DataFrame({"col1": [1, 1, 2, 2], "col2": [1, 2, 3, np.nan]})
expected = Series([3.0] * 4)
def nsum(x):
return np.nansum(x)
results = [
df.groupby("col1").transform(sum)["col2"],
df.groupby("col1")["col2"].transform(sum),
df.groupby("col1").transform(nsum)["col2"],
df.groupby("col1")["col2"].transform(nsum),
]
for result in results:
tm.assert_series_equal(result, expected, check_names=False)
def test_transform_coercion():
# 14457
# when we are transforming be sure to not coerce
# via assignment
df = DataFrame({"A": ["a", "a"], "B": [0, 1]})
g = df.groupby("A")
expected = g.transform(np.mean)
result = g.transform(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
def test_groupby_transform_with_int():
# GH 3740, make sure that we might upcast on item-by-item transform
# floats
df = DataFrame(
{
"A": [1, 1, 1, 2, 2, 2],
"B": Series(1, dtype="float64"),
"C": Series([1, 2, 3, 1, 2, 3], dtype="float64"),
"D": "foo",
}
)
with np.errstate(all="ignore"):
result = df.groupby("A").transform(lambda x: (x - x.mean()) / x.std())
expected = DataFrame(
{"B": np.nan, "C": Series([-1, 0, 1, -1, 0, 1], dtype="float64")}
)
tm.assert_frame_equal(result, expected)
# int case
df = DataFrame(
{
"A": [1, 1, 1, 2, 2, 2],
"B": 1,
"C": [1, 2, 3, 1, 2, 3],
"D": "foo",
}
)
with np.errstate(all="ignore"):
result = df.groupby("A").transform(lambda x: (x - x.mean()) / x.std())
expected = DataFrame({"B": np.nan, "C": [-1, 0, 1, -1, 0, 1]})
tm.assert_frame_equal(result, expected)
# int that needs float conversion
s = Series([2, 3, 4, 10, 5, -1])
df = DataFrame({"A": [1, 1, 1, 2, 2, 2], "B": 1, "C": s, "D": "foo"})
with np.errstate(all="ignore"):
result = df.groupby("A").transform(lambda x: (x - x.mean()) / x.std())
s1 = s.iloc[0:3]
s1 = (s1 - s1.mean()) / s1.std()
s2 = s.iloc[3:6]
s2 = (s2 - s2.mean()) / s2.std()
expected = DataFrame({"B": np.nan, "C": concat([s1, s2])})
tm.assert_frame_equal(result, expected)
# int downcasting
result = df.groupby("A").transform(lambda x: x * 2 / 2)
expected = DataFrame({"B": 1, "C": [2, 3, 4, 10, 5, -1]})
tm.assert_frame_equal(result, expected)
def test_groupby_transform_with_nan_group():
# GH 9941
df = DataFrame({"a": range(10), "b": [1, 1, 2, 3, np.nan, 4, 4, 5, 5, 5]})
result = df.groupby(df.b)["a"].transform(max)
expected = Series([1.0, 1.0, 2.0, 3.0, np.nan, 6.0, 6.0, 9.0, 9.0, 9.0], name="a")
tm.assert_series_equal(result, expected)
def test_transform_mixed_type():
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]])
df = DataFrame(
{
"d": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0],
"c": np.tile(["a", "b", "c"], 2),
"v": np.arange(1.0, 7.0),
},
index=index,
)
def f(group):
group["g"] = group["d"] * 2
return group[:1]
grouped = df.groupby("c")
result = grouped.apply(f)
assert result["d"].dtype == np.float64
# this is by definition a mutating operation!
with pd.option_context("mode.chained_assignment", None):
for key, group in grouped:
res = f(group)
tm.assert_frame_equal(res, result.loc[key])
@pytest.mark.parametrize(
"op, args, targop",
[
("cumprod", (), lambda x: x.cumprod()),
("cumsum", (), lambda x: x.cumsum()),
("shift", (-1,), lambda x: x.shift(-1)),
("shift", (1,), lambda x: x.shift()),
],
)
def test_cython_transform_series(op, args, targop):
# GH 4095
s = Series(np.random.randn(1000))
s_missing = s.copy()
s_missing.iloc[2:10] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
# series
for data in [s, s_missing]:
# print(data.head())
expected = data.groupby(labels).transform(targop)
tm.assert_series_equal(expected, data.groupby(labels).transform(op, *args))
tm.assert_series_equal(expected, getattr(data.groupby(labels), op)(*args))
@pytest.mark.parametrize("op", ["cumprod", "cumsum"])
@pytest.mark.parametrize("skipna", [False, True])
@pytest.mark.parametrize(
"input, exp",
[
# When everything is NaN
({"key": ["b"] * 10, "value": np.nan}, Series([np.nan] * 10, name="value")),
# When there is a single NaN
(
{"key": ["b"] * 10 + ["a"] * 2, "value": [3] * 3 + [np.nan] + [3] * 8},
{
("cumprod", False): [3.0, 9.0, 27.0] + [np.nan] * 7 + [3.0, 9.0],
("cumprod", True): [
3.0,
9.0,
27.0,
np.nan,
81.0,
243.0,
729.0,
2187.0,
6561.0,
19683.0,
3.0,
9.0,
],
("cumsum", False): [3.0, 6.0, 9.0] + [np.nan] * 7 + [3.0, 6.0],
("cumsum", True): [
3.0,
6.0,
9.0,
np.nan,
12.0,
15.0,
18.0,
21.0,
24.0,
27.0,
3.0,
6.0,
],
},
),
],
)
def test_groupby_cum_skipna(op, skipna, input, exp):
df = DataFrame(input)
result = df.groupby("key")["value"].transform(op, skipna=skipna)
if isinstance(exp, dict):
expected = exp[(op, skipna)]
else:
expected = exp
expected = Series(expected, name="value")
tm.assert_series_equal(expected, result)
@pytest.mark.arm_slow
@pytest.mark.parametrize(
"op, args, targop",
[
("cumprod", (), lambda x: x.cumprod()),
("cumsum", (), lambda x: x.cumsum()),
("shift", (-1,), lambda x: x.shift(-1)),
("shift", (1,), lambda x: x.shift()),
],
)
def test_cython_transform_frame(op, args, targop):
s = Series(np.random.randn(1000))
s_missing = s.copy()
s_missing.iloc[2:10] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
strings = list("qwertyuiopasdfghjklz")
strings_missing = strings[:]
strings_missing[5] = np.nan
df = DataFrame(
{
"float": s,
"float_missing": s_missing,
"int": [1, 1, 1, 1, 2] * 200,
"datetime": pd.date_range("1990-1-1", periods=1000),
"timedelta": pd.timedelta_range(1, freq="s", periods=1000),
"string": strings * 50,
"string_missing": strings_missing * 50,
},
columns=[
"float",
"float_missing",
"int",
"datetime",
"timedelta",
"string",
"string_missing",
],
)
df["cat"] = df["string"].astype("category")
df2 = df.copy()
df2.index = pd.MultiIndex.from_product([range(100), range(10)])
# DataFrame - Single and MultiIndex,
# group by values, index level, columns
for df in [df, df2]:
for gb_target in [
{"by": labels},
{"level": 0},
{"by": "string"},
]: # {"by": 'string_missing'}]:
# {"by": ['int','string']}]:
gb = df.groupby(**gb_target)
# allowlisted methods set the selection before applying
# bit a of hack to make sure the cythonized shift
# is equivalent to pre 0.17.1 behavior
if op == "shift":
gb._set_group_selection()
if op != "shift" and "int" not in gb_target:
# numeric apply fastpath promotes dtype so have
# to apply separately and concat
i = gb[["int"]].apply(targop)
f = gb[["float", "float_missing"]].apply(targop)
expected = pd.concat([f, i], axis=1)
else:
expected = gb.apply(targop)
expected = expected.sort_index(axis=1)
tm.assert_frame_equal(expected, gb.transform(op, *args).sort_index(axis=1))
tm.assert_frame_equal(expected, getattr(gb, op)(*args).sort_index(axis=1))
# individual columns
for c in df:
if c not in ["float", "int", "float_missing"] and op != "shift":
msg = "No numeric types to aggregate"
with pytest.raises(DataError, match=msg):
gb[c].transform(op)
with pytest.raises(DataError, match=msg):
getattr(gb[c], op)()
else:
expected = gb[c].apply(targop)
expected.name = c
tm.assert_series_equal(expected, gb[c].transform(op, *args))
tm.assert_series_equal(expected, getattr(gb[c], op)(*args))
def test_transform_with_non_scalar_group():
# GH 10165
cols = pd.MultiIndex.from_tuples(
[
("syn", "A"),
("mis", "A"),
("non", "A"),
("syn", "C"),
("mis", "C"),
("non", "C"),
("syn", "T"),
("mis", "T"),
("non", "T"),
("syn", "G"),
("mis", "G"),
("non", "G"),
]
)
df = DataFrame(
np.random.randint(1, 10, (4, 12)), columns=cols, index=["A", "C", "G", "T"]
)
msg = "transform must return a scalar value for each group.*"
with pytest.raises(ValueError, match=msg):
df.groupby(axis=1, level=1).transform(lambda z: z.div(z.sum(axis=1), axis=0))
@pytest.mark.parametrize(
"cols,exp,comp_func",
[
("a", Series([1, 1, 1], name="a"), tm.assert_series_equal),
(
["a", "c"],
DataFrame({"a": [1, 1, 1], "c": [1, 1, 1]}),
tm.assert_frame_equal,
),
],
)
@pytest.mark.parametrize("agg_func", ["count", "rank", "size"])
def test_transform_numeric_ret(cols, exp, comp_func, agg_func, request):
if agg_func == "size" and isinstance(cols, list):
# https://github.com/pytest-dev/pytest/issues/6300
# workaround to xfail fixture/param permutations
reason = "'size' transformation not supported with NDFrameGroupy"
request.node.add_marker(pytest.mark.xfail(reason=reason))
# GH 19200
df = DataFrame(
{"a": pd.date_range("2018-01-01", periods=3), "b": range(3), "c": range(7, 10)}
)
result = df.groupby("b")[cols].transform(agg_func)
if agg_func == "rank":
exp = exp.astype("float")
comp_func(result, exp)
@pytest.mark.parametrize("mix_groupings", [True, False])
@pytest.mark.parametrize("as_series", [True, False])
@pytest.mark.parametrize("val1,val2", [("foo", "bar"), (1, 2), (1.0, 2.0)])
@pytest.mark.parametrize(
"fill_method,limit,exp_vals",
[
(
"ffill",
None,
[np.nan, np.nan, "val1", "val1", "val1", "val2", "val2", "val2"],
),
("ffill", 1, [np.nan, np.nan, "val1", "val1", np.nan, "val2", "val2", np.nan]),
(
"bfill",
None,
["val1", "val1", "val1", "val2", "val2", "val2", np.nan, np.nan],
),
("bfill", 1, [np.nan, "val1", "val1", np.nan, "val2", "val2", np.nan, np.nan]),
],
)
def test_group_fill_methods(
mix_groupings, as_series, val1, val2, fill_method, limit, exp_vals
):
vals = [np.nan, np.nan, val1, np.nan, np.nan, val2, np.nan, np.nan]
_exp_vals = list(exp_vals)
# Overwrite placeholder values
for index, exp_val in enumerate(_exp_vals):
if exp_val == "val1":
_exp_vals[index] = val1
elif exp_val == "val2":
_exp_vals[index] = val2
# Need to modify values and expectations depending on the
# Series / DataFrame that we ultimately want to generate
if mix_groupings: # ['a', 'b', 'a, 'b', ...]
keys = ["a", "b"] * len(vals)
def interweave(list_obj):
temp = []
for x in list_obj:
temp.extend([x, x])
return temp
_exp_vals = interweave(_exp_vals)
vals = interweave(vals)
else: # ['a', 'a', 'a', ... 'b', 'b', 'b']
keys = ["a"] * len(vals) + ["b"] * len(vals)
_exp_vals = _exp_vals * 2
vals = vals * 2
df = DataFrame({"key": keys, "val": vals})
if as_series:
result = getattr(df.groupby("key")["val"], fill_method)(limit=limit)
exp = Series(_exp_vals, name="val")
tm.assert_series_equal(result, exp)
else:
result = getattr(df.groupby("key"), fill_method)(limit=limit)
exp = DataFrame({"val": _exp_vals})
tm.assert_frame_equal(result, exp)
@pytest.mark.parametrize("fill_method", ["ffill", "bfill"])
def test_pad_stable_sorting(fill_method):
# GH 21207
x = [0] * 20
y = [np.nan] * 10 + [1] * 10
if fill_method == "bfill":
y = y[::-1]
df = DataFrame({"x": x, "y": y})
expected = df.drop("x", 1)
result = getattr(df.groupby("x"), fill_method)()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("test_series", [True, False])
@pytest.mark.parametrize(
"freq",
[
None,
pytest.param(
"D",
marks=pytest.mark.xfail(
reason="GH#23918 before method uses freq in vectorized approach"
),
),
],
)
@pytest.mark.parametrize("periods", [1, -1])
@pytest.mark.parametrize("fill_method", ["ffill", "bfill", None])
@pytest.mark.parametrize("limit", [None, 1])
def test_pct_change(test_series, freq, periods, fill_method, limit):
# GH 21200, 21621, 30463
vals = [3, np.nan, np.nan, np.nan, 1, 2, 4, 10, np.nan, 4]
keys = ["a", "b"]
key_v = np.repeat(keys, len(vals))
df = DataFrame({"key": key_v, "vals": vals * 2})
df_g = df
if fill_method is not None:
df_g = getattr(df.groupby("key"), fill_method)(limit=limit)
grp = df_g.groupby(df.key)
expected = grp["vals"].obj / grp["vals"].shift(periods) - 1
if test_series:
result = df.groupby("key")["vals"].pct_change(
periods=periods, fill_method=fill_method, limit=limit, freq=freq
)
tm.assert_series_equal(result, expected)
else:
result = df.groupby("key").pct_change(
periods=periods, fill_method=fill_method, limit=limit, freq=freq
)
tm.assert_frame_equal(result, expected.to_frame("vals"))
@pytest.mark.parametrize(
"func, expected_status",
[
("ffill", ["shrt", "shrt", "lng", np.nan, "shrt", "ntrl", "ntrl"]),
("bfill", ["shrt", "lng", "lng", "shrt", "shrt", "ntrl", np.nan]),
],
)
def test_ffill_bfill_non_unique_multilevel(func, expected_status):
# GH 19437
date = pd.to_datetime(
[
"2018-01-01",
"2018-01-01",
"2018-01-01",
"2018-01-01",
"2018-01-02",
"2018-01-01",
"2018-01-02",
]
)
symbol = ["MSFT", "MSFT", "MSFT", "AAPL", "AAPL", "TSLA", "TSLA"]
status = ["shrt", np.nan, "lng", np.nan, "shrt", "ntrl", np.nan]
df = DataFrame({"date": date, "symbol": symbol, "status": status})
df = df.set_index(["date", "symbol"])
result = getattr(df.groupby("symbol")["status"], func)()
index = MultiIndex.from_tuples(
tuples=list(zip(*[date, symbol])), names=["date", "symbol"]
)
expected = Series(expected_status, index=index, name="status")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func", [np.any, np.all])
def test_any_all_np_func(func):
# GH 20653
df = DataFrame(
[["foo", True], [np.nan, True], ["foo", True]], columns=["key", "val"]
)
exp = Series([True, np.nan, True], name="val")
res = df.groupby("key")["val"].transform(func)
tm.assert_series_equal(res, exp)
def test_groupby_transform_rename():
# https://github.com/pandas-dev/pandas/issues/23461
def demean_rename(x):
result = x - x.mean()
if isinstance(x, pd.Series):
return result
result = result.rename(columns={c: "{c}_demeaned" for c in result.columns})
return result
df = DataFrame({"group": list("ababa"), "value": [1, 1, 1, 2, 2]})
expected = DataFrame({"value": [-1.0 / 3, -0.5, -1.0 / 3, 0.5, 2.0 / 3]})
result = df.groupby("group").transform(demean_rename)
tm.assert_frame_equal(result, expected)
result_single = df.groupby("group").value.transform(demean_rename)
tm.assert_series_equal(result_single, expected["value"])
@pytest.mark.parametrize("func", [min, max, np.min, np.max, "first", "last"])
def test_groupby_transform_timezone_column(func):
# GH 24198
ts = pd.to_datetime("now", utc=True).tz_convert("Asia/Singapore")
result = DataFrame({"end_time": [ts], "id": [1]})
result["max_end_time"] = result.groupby("id").end_time.transform(func)
expected = DataFrame([[ts, 1, ts]], columns=["end_time", "id", "max_end_time"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"func, values",
[
("idxmin", ["1/1/2011"] * 2 + ["1/3/2011"] * 7 + ["1/10/2011"]),
("idxmax", ["1/2/2011"] * 2 + ["1/9/2011"] * 7 + ["1/10/2011"]),
],
)
def test_groupby_transform_with_datetimes(func, values):
# GH 15306
dates = pd.date_range("1/1/2011", periods=10, freq="D")
stocks = DataFrame({"price": np.arange(10.0)}, index=dates)
stocks["week_id"] = dates.isocalendar().week
result = stocks.groupby(stocks["week_id"])["price"].transform(func)
expected = Series(data=pd.to_datetime(values), index=dates, name="price")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func", ["cumsum", "cumprod", "cummin", "cummax"])
def test_transform_absent_categories(func):
# GH 16771
# cython transforms with more groups than rows
x_vals = [1]
x_cats = range(2)
y = [1]
df = DataFrame({"x": Categorical(x_vals, x_cats), "y": y})
result = getattr(df.y.groupby(df.x), func)()
expected = df.y
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func", ["ffill", "bfill", "shift"])
@pytest.mark.parametrize("key, val", [("level", 0), ("by", Series([0]))])
def test_ffill_not_in_axis(func, key, val):
# GH 21521
df = DataFrame([[np.nan]])
result = getattr(df.groupby(**{key: val}), func)()
expected = df
tm.assert_frame_equal(result, expected)
def test_transform_invalid_name_raises():
# GH#27486
df = DataFrame({"a": [0, 1, 1, 2]})
g = df.groupby(["a", "b", "b", "c"])
with pytest.raises(ValueError, match="not a valid function name"):
g.transform("some_arbitrary_name")
# method exists on the object, but is not a valid transformation/agg
assert hasattr(g, "aggregate") # make sure the method exists
with pytest.raises(ValueError, match="not a valid function name"):
g.transform("aggregate")
# Test SeriesGroupBy
g = df["a"].groupby(["a", "b", "b", "c"])
with pytest.raises(ValueError, match="not a valid function name"):
g.transform("some_arbitrary_name")
@pytest.mark.parametrize(
"obj",
[
DataFrame(
{"a": [0, 0, 0, 1, 1, 1], "b": range(6)},
index=["A", "B", "C", "D", "E", "F"],
),
Series([0, 0, 0, 1, 1, 1], index=["A", "B", "C", "D", "E", "F"]),
],
)
def test_transform_agg_by_name(request, reduction_func, obj):
func = reduction_func
g = obj.groupby(np.repeat([0, 1], 3))
if func == "ngroup": # GH#27468
request.node.add_marker(
pytest.mark.xfail(reason="TODO: g.transform('ngroup') doesn't work")
)
if func == "size" and obj.ndim == 2: # GH#27469
request.node.add_marker(
pytest.mark.xfail(reason="TODO: g.transform('size') doesn't work")
)
if func == "corrwith" and isinstance(obj, Series): # GH#32293
request.node.add_marker(
pytest.mark.xfail(reason="TODO: implement SeriesGroupBy.corrwith")
)
args = {"nth": [0], "quantile": [0.5], "corrwith": [obj]}.get(func, [])
result = g.transform(func, *args)
# this is the *definition* of a transformation
tm.assert_index_equal(result.index, obj.index)
if hasattr(obj, "columns"):
tm.assert_index_equal(result.columns, obj.columns)
# verify that values were broadcasted across each group
assert len(set(DataFrame(result).iloc[-3:, -1])) == 1
def test_transform_lambda_with_datetimetz():
# GH 27496
df = DataFrame(
{
"time": [
Timestamp("2010-07-15 03:14:45"),
Timestamp("2010-11-19 18:47:06"),
],
"timezone": ["Etc/GMT+4", "US/Eastern"],
}
)
result = df.groupby(["timezone"])["time"].transform(
lambda x: x.dt.tz_localize(x.name)
)
expected = Series(
[
Timestamp("2010-07-15 03:14:45", tz="Etc/GMT+4"),
Timestamp("2010-11-19 18:47:06", tz="US/Eastern"),
],
name="time",
)
tm.assert_series_equal(result, expected)
def test_transform_fastpath_raises():
# GH#29631 case where fastpath defined in groupby.generic _choose_path
# raises, but slow_path does not
df = DataFrame({"A": [1, 1, 2, 2], "B": [1, -1, 1, 2]})
gb = df.groupby("A")
def func(grp):
# we want a function such that func(frame) fails but func.apply(frame)
# works
if grp.ndim == 2:
# Ensure that fast_path fails
raise NotImplementedError("Don't cross the streams")
return grp * 2
# Check that the fastpath raises, see _transform_general
obj = gb._obj_with_exclusions
gen = gb.grouper.get_iterator(obj, axis=gb.axis)
fast_path, slow_path = gb._define_paths(func)
_, group = next(gen)
with pytest.raises(NotImplementedError, match="Don't cross the streams"):
fast_path(group)
result = gb.transform(func)
expected = DataFrame([2, -2, 2, 4], columns=["B"])
tm.assert_frame_equal(result, expected)
def test_transform_lambda_indexing():
# GH 7883
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "flux", "foo", "flux"],
"B": ["one", "one", "two", "three", "two", "six", "five", "three"],
"C": range(8),
"D": range(8),
"E": range(8),
}
)
df = df.set_index(["A", "B"])
df = df.sort_index()
result = df.groupby(level="A").transform(lambda x: x.iloc[-1])
expected = DataFrame(
{
"C": [3, 3, 7, 7, 4, 4, 4, 4],
"D": [3, 3, 7, 7, 4, 4, 4, 4],
"E": [3, 3, 7, 7, 4, 4, 4, 4],
},
index=MultiIndex.from_tuples(
[
("bar", "one"),
("bar", "three"),
("flux", "six"),
("flux", "three"),
("foo", "five"),
("foo", "one"),
("foo", "two"),
("foo", "two"),
],
names=["A", "B"],
),
)
tm.assert_frame_equal(result, expected)
def test_categorical_and_not_categorical_key(observed):
# Checks that groupby-transform, when grouping by both a categorical
# and a non-categorical key, doesn't try to expand the output to include
# non-observed categories but instead matches the input shape.
# GH 32494
df_with_categorical = DataFrame(
{
"A": Categorical(["a", "b", "a"], categories=["a", "b", "c"]),
"B": [1, 2, 3],
"C": ["a", "b", "a"],
}
)
df_without_categorical = DataFrame(
{"A": ["a", "b", "a"], "B": [1, 2, 3], "C": ["a", "b", "a"]}
)
# DataFrame case
result = df_with_categorical.groupby(["A", "C"], observed=observed).transform("sum")
expected = df_without_categorical.groupby(["A", "C"]).transform("sum")
tm.assert_frame_equal(result, expected)
expected_explicit = DataFrame({"B": [4, 2, 4]})
tm.assert_frame_equal(result, expected_explicit)
# Series case
result = df_with_categorical.groupby(["A", "C"], observed=observed)["B"].transform(
"sum"
)
expected = df_without_categorical.groupby(["A", "C"])["B"].transform("sum")
tm.assert_series_equal(result, expected)
expected_explicit = Series([4, 2, 4], name="B")
tm.assert_series_equal(result, expected_explicit)
| jreback/pandas | pandas/tests/groupby/transform/test_transform.py | Python | bsd-3-clause | 38,188 |
"""
Commands
Commands describe the input the account can do to the game.
"""
from evennia import Command as BaseCommand
from evennia import default_cmds
from evennia.commands import cmdset
class Command(BaseCommand):
"""
Inherit from this if you want to create your own command styles
from scratch. Note that Evennia's default commands inherits from
MuxCommand instead.
Note that the class's `__doc__` string (this text) is
used by Evennia to create the automatic help entry for
the command, so make sure to document consistently here.
Each Command implements the following methods, called
in this order (only func() is actually required):
- at_pre_command(): If this returns True, execution is aborted.
- parse(): Should perform any extra parsing needed on self.args
and store the result on self.
- func(): Performs the actual work.
- at_post_command(): Extra actions, often things done after
every command, like prompts.
"""
pass
class CmdNoLimbo(default_cmds.MuxCommand):
"""
This command is not available in Limbo. Go to the |ySandbox|n to experiment and get the full help text.
"""
key = "build"
locks = "cmd:perm(desc) or perm(Builders)"
help_category = "Building"
def func(self):
self.caller.msg("Building is not available in Limbo. "
"Go to the |ySandbox| to experiment and get all build commands.")
class CmdTap(BaseCommand):
"""
Inspect character actions for debug purposes.
Usage:
tap <object or #dbref>
untap
"""
key = "tap"
aliases = ["untap"]
locks = "cmd:superuser()"
def parse(self):
self.args = self.args.strip()
def func(self):
caller = self.caller
if self.cmdname == "untap":
if caller.ndb.tapped_data:
targetsess, orig_data_in, orig_data_out = caller.ndb.tapped_data
targetsess.data_in = orig_data_in
targetsess.data_out = orig_data_out
caller.msg(f"|rUntapped {targetsess.account.name}.|n")
del caller.ndb.tapped_data
else:
caller.msg("No tap to untap.")
return
if not self.args:
caller.msg("Usage: tap <object or #dbref> or untap")
return
if caller.ndb.tapped_data:
targetsess, _, _ = caller.ndb.tapped_data
caller.msg(f"|rYou are already tapping {targetsess.account.name}. Untap first.")
return
target = caller.search(self.args, global_search=True)
if not target:
return
targetsess = target.sessions.get()[0]
def _patched_data_in(*args, **kwargs):
try:
text = kwargs["text"][0][0].strip('\n')
except (IndexError, KeyError, ValueError):
text = kwargs
taptxt = f"|wTAP|||g {targetsess.account.name} cmd:>|n '{text}'"
if text != 'idle':
caller.msg(taptxt)
targetsess.sessionhandler.call_inputfuncs(targetsess, **kwargs)
def _patched_data_out(*args, **kwargs):
try:
text = kwargs["text"]
if not isinstance(text, str):
text = text[0] # a tuple
text = text.strip("\n")
text = "|wTAP|||n " + "\n|wTAP|||n ".join(text.split("\n"))
except (IndexError, KeyError, ValueError):
text = kwargs
taptxt = f"|wTAP|||y {targetsess.account.name} sees:|n\n{text}"
caller.msg(taptxt)
targetsess.sessionhandler.data_out(targetsess, **kwargs)
# patch object with custom version
caller.ndb.tapped_data = (targetsess, targetsess.data_in, targetsess.data_out)
targetsess.data_in = _patched_data_in
targetsess.data_out = _patched_data_out
caller.msg(f"|gStart tapping {targetsess.account.name}...|n")
#------------------------------------------------------------
#
# The default commands inherit from
#
# evennia.commands.default.muxcommand.MuxCommand.
#
# If you want to make sweeping changes to default commands you can
# uncomment this copy of the MuxCommand parent and add
#
# COMMAND_DEFAULT_CLASS = "commands.command.MuxCommand"
#
# to your settings file. Be warned that the default commands expect
# the functionality implemented in the parse() method, so be
# careful with what you change.
#
#------------------------------------------------------------
#from evennia.utils import utils
#class MuxCommand(Command):
# """
# This sets up the basis for a MUX command. The idea
# is that most other Mux-related commands should just
# inherit from this and don't have to implement much
# parsing of their own unless they do something particularly
# advanced.
#
# Note that the class's __doc__ string (this text) is
# used by Evennia to create the automatic help entry for
# the command, so make sure to document consistently here.
# """
# def has_perm(self, srcobj):
# """
# This is called by the cmdhandler to determine
# if srcobj is allowed to execute this command.
# We just show it here for completeness - we
# are satisfied using the default check in Command.
# """
# return super(MuxCommand, self).has_perm(srcobj)
#
# def at_pre_cmd(self):
# """
# This hook is called before self.parse() on all commands
# """
# pass
#
# def at_post_cmd(self):
# """
# This hook is called after the command has finished executing
# (after self.func()).
# """
# pass
#
# def parse(self):
# """
# This method is called by the cmdhandler once the command name
# has been identified. It creates a new set of member variables
# that can be later accessed from self.func() (see below)
#
# The following variables are available for our use when entering this
# method (from the command definition, and assigned on the fly by the
# cmdhandler):
# self.key - the name of this command ('look')
# self.aliases - the aliases of this cmd ('l')
# self.permissions - permission string for this command
# self.help_category - overall category of command
#
# self.caller - the object calling this command
# self.cmdstring - the actual command name used to call this
# (this allows you to know which alias was used,
# for example)
# self.args - the raw input; everything following self.cmdstring.
# self.cmdset - the cmdset from which this command was picked. Not
# often used (useful for commands like 'help' or to
# list all available commands etc)
# self.obj - the object on which this command was defined. It is often
# the same as self.caller.
#
# A MUX command has the following possible syntax:
#
# name[ with several words][/switch[/switch..]] arg1[,arg2,...] [[=|,] arg[,..]]
#
# The 'name[ with several words]' part is already dealt with by the
# cmdhandler at this point, and stored in self.cmdname (we don't use
# it here). The rest of the command is stored in self.args, which can
# start with the switch indicator /.
#
# This parser breaks self.args into its constituents and stores them in the
# following variables:
# self.switches = [list of /switches (without the /)]
# self.raw = This is the raw argument input, including switches
# self.args = This is re-defined to be everything *except* the switches
# self.lhs = Everything to the left of = (lhs:'left-hand side'). If
# no = is found, this is identical to self.args.
# self.rhs: Everything to the right of = (rhs:'right-hand side').
# If no '=' is found, this is None.
# self.lhslist - [self.lhs split into a list by comma]
# self.rhslist - [list of self.rhs split into a list by comma]
# self.arglist = [list of space-separated args (stripped, including '=' if it exists)]
#
# All args and list members are stripped of excess whitespace around the
# strings, but case is preserved.
# """
# raw = self.args
# args = raw.strip()
#
# # split out switches
# switches = []
# if args and len(args) > 1 and args[0] == "/":
# # we have a switch, or a set of switches. These end with a space.
# switches = args[1:].split(None, 1)
# if len(switches) > 1:
# switches, args = switches
# switches = switches.split('/')
# else:
# args = ""
# switches = switches[0].split('/')
# arglist = [arg.strip() for arg in args.split()]
#
# # check for arg1, arg2, ... = argA, argB, ... constructs
# lhs, rhs = args, None
# lhslist, rhslist = [arg.strip() for arg in args.split(',')], []
# if args and '=' in args:
# lhs, rhs = [arg.strip() for arg in args.split('=', 1)]
# lhslist = [arg.strip() for arg in lhs.split(',')]
# rhslist = [arg.strip() for arg in rhs.split(',')]
#
# # save to object properties:
# self.raw = raw
# self.switches = switches
# self.args = args.strip()
# self.arglist = arglist
# self.lhs = lhs
# self.lhslist = lhslist
# self.rhs = rhs
# self.rhslist = rhslist
#
# # if the class has the account_caller property set on itself, we make
# # sure that self.caller is always the account if possible. We also create
# # a special property "character" for the puppeted object, if any. This
# # is convenient for commands defined on the Account only.
# if hasattr(self, "account_caller") and self.account_caller:
# if utils.inherits_from(self.caller, "evennia.objects.objects.DefaultObject"):
# # caller is an Object/Character
# self.character = self.caller
# self.caller = self.caller.account
# elif utils.inherits_from(self.caller, "evennia.accounts.accounts.DefaultAccount"):
# # caller was already an Account
# self.character = self.caller.get_puppet(self.session)
# else:
# self.character = None
#
| evennia/evdemo | evdemo/commands/command.py | Python | bsd-3-clause | 10,623 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('letters', '0011_auto_20150215_1830'),
]
operations = [
migrations.DeleteModel(
name='LetterFile',
),
migrations.AddField(
model_name='contenttemplate',
name='template_name',
field=models.CharField(default='Test template', max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='logo',
name='created',
field=models.DateTimeField(default=datetime.datetime(2015, 2, 15, 22, 22, 25, 364812, tzinfo=utc), auto_now_add=True),
preserve_default=False,
),
migrations.AddField(
model_name='logo',
name='end_time',
field=models.DateTimeField(default=datetime.datetime(2015, 2, 15, 22, 22, 33, 653083, tzinfo=utc), auto_now_add=True),
preserve_default=False,
),
migrations.AddField(
model_name='logo',
name='name',
field=models.CharField(default='Test logo', max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='logo',
name='start_time',
field=models.DateTimeField(default=datetime.datetime(2015, 2, 15, 22, 22, 52, 14459, tzinfo=utc), auto_now_add=True),
preserve_default=False,
),
migrations.AlterField(
model_name='lettertext',
name='barcode',
field=models.CharField(default='abc123', max_length=100),
preserve_default=False,
),
]
| garry-cairns/correspondence | api/correspondence/letters/migrations/0012_auto_20150215_2223.py | Python | bsd-3-clause | 1,825 |
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is configman
#
# The Initial Developer of the Original Code is
# Mozilla Foundation
# Portions created by the Initial Developer are Copyright (C) 2011
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# K Lars Lohn, [email protected]
# Peter Bengtsson, [email protected]
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import sys
import os
import unittest
from contextlib import contextmanager
import ConfigParser
import io
from cStringIO import StringIO
import getopt
import configman.config_manager as config_manager
from configman.dotdict import DotDict, DotDictWithAcquisition
import configman.datetime_util as dtu
from configman.config_exceptions import NotAnOptionError
from configman.value_sources.source_exceptions import \
AllHandlersFailedException
import configman.value_sources
import configman.value_sources.for_configparse
class TestCase(unittest.TestCase):
def test_empty_ConfigurationManager_constructor(self):
# because the default option argument defaults to using sys.argv we
# have to mock that
c = config_manager.ConfigurationManager(
use_admin_controls=False,
#use_config_files=False,
use_auto_help=False,
argv_source=[]
)
self.assertEqual(c.option_definitions, config_manager.Namespace())
def test_get_config_1(self):
n = config_manager.Namespace()
n.add_option('a', 1, 'the a')
n.add_option('b', 17)
c = config_manager.ConfigurationManager(
[n],
use_admin_controls=False,
#use_config_files=False,
use_auto_help=False,
argv_source=[]
)
d = c.get_config()
e = DotDict()
e.a = 1
e.b = 17
self.assertEqual(d, e)
def test_get_config_2(self):
n = config_manager.Namespace()
n.add_option('a', 1, 'the a')
n.b = 17
n.c = c = config_manager.Namespace()
c.x = 'fred'
c.y = 3.14159
c.add_option('z', 99, 'the 99')
c = config_manager.ConfigurationManager(
[n],
use_admin_controls=False,
#use_config_files=False,
use_auto_help=False,
argv_source=[]
)
d = c.get_config()
e = DotDict()
e.a = 1
e.b = 17
e.c = DotDict()
e.c.x = 'fred'
e.c.y = 3.14159
e.c.z = 99
self.assertEqual(d, e)
def test_walk_config(self):
"""step through them all"""
n = config_manager.Namespace(doc='top')
n.add_option('aaa', False, 'the a', short_form='a')
n.c = config_manager.Namespace(doc='c space')
n.c.add_option('fred', doc='husband from Flintstones')
n.c.add_option('wilma', doc='wife from Flintstones')
n.d = config_manager.Namespace(doc='d space')
n.d.add_option('fred', doc='male neighbor from I Love Lucy')
n.d.add_option('ethel', doc='female neighbor from I Love Lucy')
n.d.x = config_manager.Namespace(doc='x space')
n.d.x.add_option('size', 100, 'how big in tons', short_form='s')
n.d.x.add_option('password', 'secrets', 'the password')
c = config_manager.ConfigurationManager(
[n],
use_admin_controls=True,
#use_config_files=False,
use_auto_help=False,
argv_source=[]
)
e = [('aaa', 'aaa', n.aaa.name),
('c', 'c', n.c._doc),
('c.wilma', 'wilma', n.c.wilma.name),
('c.fred', 'fred', n.c.fred.name),
('d', 'd', n.d._doc),
('d.ethel', 'ethel', n.d.ethel.name),
('d.fred', 'fred', n.d.fred.name),
('d.x', 'x', n.d.x._doc),
('d.x.size', 'size', n.d.x.size.name),
('d.x.password', 'password', n.d.x.password.name),
]
e.sort()
r = [(q, k, v.name if isinstance(v, config_manager.Option) else v._doc)
for q, k, v in c._walk_config()]
r.sort()
for expected, received in zip(e, r):
self.assertEqual(received, expected)
def _some_namespaces(self):
"""set up some namespaces"""
n = config_manager.Namespace(doc='top')
n.add_option('aaa', '2011-05-04T15:10:00', 'the a',
short_form='a',
from_string_converter=dtu.datetime_from_ISO_string
)
n.c = config_manager.Namespace(doc='c space')
n.c.add_option('fred', 'stupid', 'husband from Flintstones')
n.c.add_option('wilma', 'waspish', 'wife from Flintstones')
n.d = config_manager.Namespace(doc='d space')
n.d.add_option('fred', 'crabby', 'male neighbor from I Love Lucy')
n.d.add_option('ethel', 'silly', 'female neighbor from I Love Lucy')
n.x = config_manager.Namespace(doc='x space')
n.x.add_option('size', 100, 'how big in tons', short_form='s')
n.x.add_option('password', 'secret', 'the password')
return n
def test_overlay_config_1(self):
n = config_manager.Namespace()
n.add_option('a')
n.a.default = 1
n.a.doc = 'the a'
n.b = 17
n.c = c = config_manager.Namespace()
c.x = 'fred'
c.y = 3.14159
c.add_option('z')
c.z.default = 99
c.z.doc = 'the 99'
c = config_manager.ConfigurationManager([n],
use_admin_controls=False,
#use_config_files=False,
use_auto_help=False,
argv_source=[])
o = {"a": 2, "c.z": 22, "c.x": 'noob', "c.y": "2.89"}
c._overlay_value_sources_recurse(o)
d = c._generate_config(DotDict)
e = DotDict()
e.a = 2
e.b = 17
e.c = DotDict()
e.c.x = 'noob'
e.c.y = 2.89
e.c.z = 22
self.assertEqual(d, e)
def test_overlay_config_2(self):
n = config_manager.Namespace()
n.add_option('a')
n.a.default = 1
n.a.doc = 'the a'
n.b = 17
n.c = c = config_manager.Namespace()
c.x = 'fred'
c.y = 3.14159
c.add_option('z')
c.z.default = 99
c.z.doc = 'the 99'
c = config_manager.ConfigurationManager([n],
use_admin_controls=False,
#use_config_files=False,
use_auto_help=False,
argv_source=[])
o = {"a": 2, "c.z": 22, "c.x": 'noob', "c.y": "2.89", "n": "not here"}
c._overlay_value_sources_recurse(o, ignore_mismatches=True)
d = c._generate_config(DotDict)
e = DotDict()
e.a = 2
e.b = 17
e.c = DotDict()
e.c.x = 'noob'
e.c.y = 2.89
e.c.z = 22
self.assertEqual(d, e)
def test_overlay_config_3(self):
n = config_manager.Namespace()
n.add_option('a')
n.a.default = 1
n.a.doc = 'the a'
n.b = 17
n.c = c = config_manager.Namespace()
c.x = 'fred'
c.y = 3.14159
c.add_option('z')
c.z.default = 99
c.z.doc = 'the 99'
c = config_manager.ConfigurationManager([n],
use_admin_controls=True,
#use_config_files=False,
use_auto_help=False,
argv_source=[])
output = {
"a": 2,
"c.z": 22,
"c.x": 'noob',
"c.y": "2.89",
"c.n": "not here"
}
self.assertRaises(NotAnOptionError,
c._overlay_value_sources_recurse, output,
ignore_mismatches=False)
def test_overlay_config_4(self):
"""test overlay dict w/flat source dict"""
n = config_manager.Namespace()
n.add_option('a', doc='the a', default=1)
n.b = 17
n.c = config_manager.Namespace()
n.c.add_option('extra', doc='the x', default=3.14159)
g = {'a': 2, 'c.extra': 2.89}
c = config_manager.ConfigurationManager([n], [g],
use_admin_controls=True,
#use_config_files=False,
use_auto_help=False,
argv_source=[])
self.assertEqual(c.option_definitions.a, n.a)
self.assertTrue(isinstance(c.option_definitions.b,
config_manager.Option))
self.assertEqual(c.option_definitions.a.value, 2)
self.assertEqual(c.option_definitions.b.value, 17)
self.assertEqual(c.option_definitions.b.default, 17)
self.assertEqual(c.option_definitions.b.name, 'b')
self.assertEqual(c.option_definitions.c.extra.name, 'extra')
self.assertEqual(c.option_definitions.c.extra.doc, 'the x')
self.assertEqual(c.option_definitions.c.extra.default, 3.14159)
self.assertEqual(c.option_definitions.c.extra.value, 2.89)
def test_overlay_config_4a(self):
"""test overlay dict w/deep source dict"""
n = config_manager.Namespace()
n.add_option('a', 1, doc='the a')
n.b = 17
n.c = config_manager.Namespace()
n.c.add_option('extra', doc='the x', default=3.14159)
g = {'a': 2, 'c': {'extra': 2.89}}
c = config_manager.ConfigurationManager([n], [g],
use_admin_controls=True,
#use_config_files=False,
use_auto_help=False,
argv_source=[])
self.assertEqual(c.option_definitions.a, n.a)
self.assertTrue(isinstance(c.option_definitions.b,
config_manager.Option))
self.assertEqual(c.option_definitions.a.value, 2)
self.assertEqual(c.option_definitions.b.value, 17)
self.assertEqual(c.option_definitions.b.default, 17)
self.assertEqual(c.option_definitions.b.name, 'b')
self.assertEqual(c.option_definitions.c.extra.name, 'extra')
self.assertEqual(c.option_definitions.c.extra.doc, 'the x')
self.assertEqual(c.option_definitions.c.extra.default, 3.14159)
self.assertEqual(c.option_definitions.c.extra.value, 2.89)
def test_overlay_config_5(self):
"""test namespace definition w/getopt"""
n = config_manager.Namespace()
n.add_option('a', doc='the a', default=1)
n.b = 17
n.add_option('c', doc='the c', default=False)
c = config_manager.ConfigurationManager([n], [getopt],
use_admin_controls=True,
#use_config_files=False,
use_auto_help=False,
argv_source=['--a', '2', '--c'])
self.assertEqual(c.option_definitions.a, n.a)
self.assertTrue(isinstance(c.option_definitions.b,
config_manager.Option))
self.assertEqual(c.option_definitions.a.value, 2)
self.assertEqual(c.option_definitions.b.value, 17)
self.assertEqual(c.option_definitions.b.default, 17)
self.assertEqual(c.option_definitions.b.name, 'b')
self.assertEqual(c.option_definitions.c.name, 'c')
self.assertEqual(c.option_definitions.c.value, True)
def test_overlay_config_6(self):
"""test namespace definition w/getopt"""
n = config_manager.Namespace()
n.add_option('a', doc='the a', default=1)
n.b = 17
n.c = config_manager.Namespace()
n.c.add_option('extra', short_form='e', doc='the x', default=3.14159)
c = config_manager.ConfigurationManager([n], [getopt],
use_admin_controls=True,
#use_config_files=False,
use_auto_help=False,
argv_source=['--a', '2', '--c.extra',
'11.0'])
self.assertEqual(c.option_definitions.a, n.a)
self.assertEqual(type(c.option_definitions.b), config_manager.Option)
self.assertEqual(c.option_definitions.a.value, 2)
self.assertEqual(c.option_definitions.b.value, 17)
self.assertEqual(c.option_definitions.b.default, 17)
self.assertEqual(c.option_definitions.b.name, 'b')
self.assertEqual(c.option_definitions.c.extra.name, 'extra')
self.assertEqual(c.option_definitions.c.extra.doc, 'the x')
self.assertEqual(c.option_definitions.c.extra.default, 3.14159)
self.assertEqual(c.option_definitions.c.extra.value, 11.0)
def test_overlay_config_6a(self):
"""test namespace w/getopt w/short form"""
n = config_manager.Namespace()
n.add_option('a', doc='the a', default=1)
n.b = 17
n.c = config_manager.Namespace()
n.c.add_option('extra', 3.14159, 'the x', short_form='e')
c = config_manager.ConfigurationManager([n], [getopt],
use_admin_controls=True,
#use_config_files=False,
use_auto_help=False,
argv_source=['--a', '2', '-e', '11.0'])
self.assertEqual(c.option_definitions.a, n.a)
self.assertEqual(type(c.option_definitions.b), config_manager.Option)
self.assertEqual(c.option_definitions.a.value, 2)
self.assertEqual(c.option_definitions.b.value, 17)
self.assertEqual(c.option_definitions.b.default, 17)
self.assertEqual(c.option_definitions.b.name, 'b')
self.assertEqual(c.option_definitions.c.extra.name, 'extra')
self.assertEqual(c.option_definitions.c.extra.doc, 'the x')
self.assertEqual(c.option_definitions.c.extra.default, 3.14159)
self.assertEqual(c.option_definitions.c.extra.value, 11.0)
def test_overlay_config_7(self):
"""test namespace definition flat file"""
n = config_manager.Namespace()
n.add_option('a', doc='the a', default=1)
n.b = 17
n.c = config_manager.Namespace()
n.c.add_option('extra', 3.14159, 'the x')
n.c.add_option('string', 'fred', doc='str')
@contextmanager
def dummy_open():
yield ['# comment line to be ignored\n',
'\n', # blank line to be ignored
'a=22\n',
'b = 33\n',
'c.extra = 2.0\n',
'c.string = wilma\n'
]
#g = config_manager.ConfValueSource('dummy-filename', dummy_open)
c = config_manager.ConfigurationManager([n], [dummy_open],
use_admin_controls=True,
#use_config_files=False,
use_auto_help=False)
self.assertEqual(c.option_definitions.a, n.a)
self.assertEqual(type(c.option_definitions.b), config_manager.Option)
self.assertEqual(c.option_definitions.a.value, 22)
self.assertEqual(c.option_definitions.b.value, 33)
self.assertEqual(c.option_definitions.b.default, 17)
self.assertEqual(c.option_definitions.b.name, 'b')
self.assertEqual(c.option_definitions.c.extra.name, 'extra')
self.assertEqual(c.option_definitions.c.extra.doc, 'the x')
self.assertEqual(c.option_definitions.c.extra.default, 3.14159)
self.assertEqual(c.option_definitions.c.extra.value, 2.0)
self.assertEqual(c.option_definitions.c.string.name, 'string')
self.assertEqual(c.option_definitions.c.string.doc, 'str')
self.assertEqual(c.option_definitions.c.string.default, 'fred')
self.assertEqual(c.option_definitions.c.string.value, 'wilma')
def test_overlay_config_8(self):
"""test namespace definition ini file"""
n = config_manager.Namespace()
n.other = config_manager.Namespace()
n.other.add_option('t', 'tee', 'the t')
n.d = config_manager.Namespace()
n.d.add_option('a', 1, doc='the a')
n.d.b = 17
n.c = config_manager.Namespace()
n.c.add_option('extra', 3.14159, 'the x')
n.c.add_option('string', 'fred', doc='str')
ini_data = """
other.t=tea
# blank line to be ignored
d.a=22
d.b=33
c.extra = 2.0
c.string = wilma
"""
def strio():
return io.BytesIO(ini_data)
c = config_manager.ConfigurationManager([n], [strio],
use_admin_controls=True,
use_auto_help=False)
self.assertEqual(c.option_definitions.other.t.name, 't')
self.assertEqual(c.option_definitions.other.t.value, 'tea')
self.assertEqual(c.option_definitions.d.a, n.d.a)
self.assertEqual(type(c.option_definitions.d.b), config_manager.Option)
self.assertEqual(c.option_definitions.d.a.value, 22)
self.assertEqual(c.option_definitions.d.b.value, 33)
self.assertEqual(c.option_definitions.d.b.default, 17)
self.assertEqual(c.option_definitions.d.b.name, 'b')
self.assertEqual(c.option_definitions.c.extra.name, 'extra')
self.assertEqual(c.option_definitions.c.extra.doc, 'the x')
self.assertEqual(c.option_definitions.c.extra.default, 3.14159)
self.assertEqual(c.option_definitions.c.extra.value, 2.0)
self.assertEqual(c.option_definitions.c.string.name, 'string')
self.assertEqual(c.option_definitions.c.string.doc, 'str')
self.assertEqual(c.option_definitions.c.string.default, 'fred')
self.assertEqual(c.option_definitions.c.string.value, 'wilma')
def test_overlay_config_9(self):
"""test namespace definition ini file"""
n = config_manager.Namespace()
n.other = config_manager.Namespace()
n.other.add_option('t', 'tee', 'the t')
n.d = config_manager.Namespace()
n.d.add_option('a', 1, doc='the a')
n.d.b = 17
n.c = config_manager.Namespace()
n.c.add_option('extra', 3.14159, 'the x')
n.c.add_option('string', 'fred', 'str')
ini_data = """
other.t=tea
# blank line to be ignored
d.a=22
c.extra = 2.0
c.string = from ini
"""
def strio():
return io.BytesIO(ini_data)
e = DotDict()
e.fred = DotDict() # should be ignored
e.fred.t = 'T' # should be ignored
e.d = DotDict()
e.d.a = 16
e.c = DotDict()
e.c.extra = 18.6
e.c.string = 'from environment'
#fake_os_module = DotDict()
#fake_os_module.environ = e
#import configman.value_sources.for_mapping as fm
#saved_os = fm.os
#fm.os = fake_os_module
saved_environ = os.environ
os.environ = e
try:
c = config_manager.ConfigurationManager([n], [e, strio, getopt],
use_admin_controls=True,
use_auto_help=False,
argv_source=['--other.t', 'TTT',
'--c.extra', '11.0'])
finally:
os.environ = saved_environ
#fm.os = saved_os
self.assertEqual(c.option_definitions.other.t.name, 't')
self.assertEqual(c.option_definitions.other.t.value, 'TTT')
self.assertEqual(c.option_definitions.d.a, n.d.a)
self.assertEqual(type(c.option_definitions.d.b), config_manager.Option)
self.assertEqual(c.option_definitions.d.a.value, 22)
self.assertEqual(c.option_definitions.d.b.value, 17)
self.assertEqual(c.option_definitions.d.b.default, 17)
self.assertEqual(c.option_definitions.d.b.name, 'b')
self.assertEqual(c.option_definitions.c.extra.name, 'extra')
self.assertEqual(c.option_definitions.c.extra.doc, 'the x')
self.assertEqual(c.option_definitions.c.extra.default, 3.14159)
self.assertEqual(c.option_definitions.c.extra.value, 11.0)
self.assertEqual(c.option_definitions.c.string.name, 'string')
self.assertEqual(c.option_definitions.c.string.doc, 'str')
self.assertEqual(c.option_definitions.c.string.default, 'fred')
self.assertEqual(c.option_definitions.c.string.value, 'from ini')
def test_overlay_config_10(self):
"""test namespace definition ini file"""
n = config_manager.Namespace()
n.other = config_manager.Namespace()
n.other.add_option('t', 'tee', 'the t')
n.d = config_manager.Namespace()
n.d.add_option('a', 1, 'the a')
n.d.b = 17
n.c = config_manager.Namespace()
n.c.add_option('extra', 3.14159, 'the x')
n.c.add_option('string', 'fred', doc='str')
ini_data = """
other.t=tea
# blank line to be ignored
d.a=22
c.extra = 2.0
c.string = from ini
"""
def strio():
return io.BytesIO(ini_data)
e = DotDict()
e.other = DotDict()
e.other.t = 'T'
e.d = DotDict()
e.d.a = 16
e.c = DotDict()
e.c.extra = 18.6
e.c.string = 'from environment'
#v = config_manager.GetoptValueSource(
#argv_source=['--c.extra', '11.0']
#)
c = config_manager.ConfigurationManager([n], [e, strio, getopt],
use_admin_controls=True,
argv_source=['--c.extra', '11.0'],
#use_config_files=False,
use_auto_help=False)
self.assertEqual(c.option_definitions.other.t.name, 't')
self.assertEqual(c.option_definitions.other.t.value, 'tea')
self.assertEqual(c.option_definitions.d.a, n.d.a)
self.assertEqual(type(c.option_definitions.d.b), config_manager.Option)
self.assertEqual(c.option_definitions.d.a.value, 22)
self.assertEqual(c.option_definitions.d.b.value, 17)
self.assertEqual(c.option_definitions.d.b.default, 17)
self.assertEqual(c.option_definitions.d.b.name, 'b')
self.assertEqual(c.option_definitions.c.extra.name, 'extra')
self.assertEqual(c.option_definitions.c.extra.doc, 'the x')
self.assertEqual(c.option_definitions.c.extra.default, 3.14159)
self.assertEqual(c.option_definitions.c.extra.value, 11.0)
self.assertEqual(c.option_definitions.c.string.name, 'string')
self.assertEqual(c.option_definitions.c.string.doc, 'str')
self.assertEqual(c.option_definitions.c.string.default, 'fred')
self.assertEqual(c.option_definitions.c.string.value, 'from ini')
def test_mapping_types_1(self):
n = config_manager.Namespace()
n.add_option('a')
n.a.default = 1
n.a.doc = 'the a'
n.b = 17
n.c = c = config_manager.Namespace()
c.x = 'fred'
c.y = 3.14159
c.add_option('z')
c.z.default = 99
c.z.doc = 'the 99'
c = config_manager.ConfigurationManager([n],
use_admin_controls=False,
#use_config_files=False,
use_auto_help=False,
argv_source=[])
o = {"a": 2, "c.z": 22, "c.x": 'noob', "c.y": "2.89"}
c._overlay_value_sources_recurse(o)
e = DotDict()
e.a = 2
e.b = 17
e.c = DotDict()
e.c.x = 'noob'
e.c.y = 2.89
e.c.z = 22
d = c._generate_config(dict)
self.assertTrue(isinstance(d, dict))
self.assertTrue(isinstance(d['c'], dict))
self.assertEqual(d, e)
d = c._generate_config(DotDict)
self.assertTrue(isinstance(d, DotDict))
self.assertTrue(isinstance(d.c, DotDict))
self.assertEqual(d, e)
d = c._generate_config(DotDictWithAcquisition)
self.assertTrue(isinstance(d, DotDictWithAcquisition))
self.assertTrue(isinstance(d.c, DotDictWithAcquisition))
self.assertEqual(d, e)
self.assertEqual(d.a, 2)
self.assertEqual(d.c.a, 2)
self.assertEqual(d.c.b, 17)
def test_get_option_names(self):
n = config_manager.Namespace()
n.add_option('a', 1, 'the a')
n.b = 17
n.c = config_manager.Namespace()
n.c.add_option('fred')
n.c.add_option('wilma')
n.d = config_manager.Namespace()
n.d.add_option('fred')
n.d.add_option('wilma')
n.d.x = config_manager.Namespace()
n.d.x.add_option('size')
c = config_manager.ConfigurationManager([n],
use_admin_controls=False,
#use_config_files=False,
use_auto_help=False,
argv_source=[])
names = c.get_option_names()
names.sort()
e = ['a', 'b', 'c.fred', 'c.wilma', 'd.fred', 'd.wilma', 'd.x.size']
e.sort()
self.assertEqual(names, e)
def test_get_option(self):
n = config_manager.Namespace()
n.add_option('a', 1, 'the a')
n.b = 17
n.c = config_manager.Namespace()
n.c.add_option('fred')
n.c.add_option('wilma')
n.d = config_manager.Namespace()
n.d.add_option('fred')
n.d.add_option('wilma')
n.d.x = config_manager.Namespace()
n.d.x.add_option('size')
c = config_manager.ConfigurationManager([n],
use_admin_controls=True,
#use_config_files=False,
use_auto_help=False,
argv_source=[])
self.assertEqual(c._get_option('a'), n.a)
self.assertEqual(c._get_option('b').name, 'b')
self.assertEqual(c._get_option('c.fred'), n.c.fred)
self.assertEqual(c._get_option('c.wilma'), n.c.wilma)
self.assertEqual(c._get_option('d.fred'), n.d.fred)
self.assertEqual(c._get_option('d.wilma'), n.d.wilma)
self.assertEqual(c._get_option('d.wilma'), n.d.wilma)
self.assertEqual(c._get_option('d.x.size'), n.d.x.size)
def test_output_summary(self):
"""test_output_summary: the output from help"""
n = config_manager.Namespace()
n.add_option('aaa', False, 'the a', short_form='a')
n.add_option('bee', True)
n.b = 17
n.c = config_manager.Namespace()
n.c.add_option('fred', doc='husband from Flintstones')
n.d = config_manager.Namespace()
n.d.add_option('fred', doc='male neighbor from I Love Lucy')
n.d.x = config_manager.Namespace()
n.d.x.add_option('size', 100, 'how big in tons', short_form='s')
n.d.x.add_option('password', 'secrets', 'the password')
c = config_manager.ConfigurationManager([n],
use_admin_controls=True,
#use_config_files=False,
use_auto_help=False,
argv_source=[],
#app_name='foo',
#app_version='1.0',
#app_description='This app is cool.'
)
s = StringIO()
c.output_summary(output_stream=s)
r = s.getvalue()
self.assertTrue('Options:\n' in r)
options = r.split('Options:\n')[1]
s.close()
expect = [
('-a, --aaa', 'the a (default: False)'),
('--b', '(default: 17)'),
('--bee', '(default: True)'),
('--c.fred', 'husband from Flintstones'),
('--d.fred', 'male neighbor from I Love Lucy'),
('--d.x.password', 'the password (default: *********)'),
('-s, --d.x.size', 'how big in tons (default: 100)'),
]
point = -1 # used to assert the sort order
for i, (start, end) in enumerate(expect):
self.assertTrue(point < options.find(start + ' ')
< options.find(' ' + end))
point = options.find(end)
def test_output_summary_header(self):
"""a config with an app_name, app_version and app_description is
printed on the output summary.
"""
n = config_manager.Namespace()
n.add_option('aaa', False, 'the a', short_form='a')
c = config_manager.ConfigurationManager(n,
use_admin_controls=True,
use_auto_help=False,
argv_source=[],
)
def get_output(conf):
s = StringIO()
conf.output_summary(output_stream=s)
return s.getvalue()
output = get_output(c)
assert 'Options:' in output
self.assertTrue('Application:' not in output)
c.app_name = 'foobar'
output = get_output(c)
assert 'Options:' in output
self.assertTrue('Application: foobar' in output)
c.app_version = '1.0'
output = get_output(c)
assert 'Options:' in output
self.assertTrue('Application: foobar 1.0' in output)
c.app_description = "This ain't your mama's app"
output = get_output(c)
assert 'Options:' in output
self.assertTrue('Application: foobar 1.0\n' in output)
self.assertTrue("This ain't your mama's app\n\n" in output)
def test_eval_as_converter(self):
"""does eval work as a to string converter on an Option object?"""
n = config_manager.Namespace()
n.add_option('aaa', doc='the a', default='', short_form='a')
self.assertEqual(n.aaa.value, '')
def test_RequiredConfig_get_required_config(self):
class Foo:
required_config = {'foo': True}
class Bar:
required_config = {'bar': False}
class Poo:
pass
class Combined(config_manager.RequiredConfig, Foo, Poo, Bar):
pass
result = Combined.get_required_config()
self.assertEqual(result.foo.value, True)
self.assertEqual(result.bar.value, False)
c = Combined()
c.config_assert({'foo': True, 'bar': False})
self.assertRaises(AssertionError, c.config_assert, ({},))
def test_app_name_from_app_obj(self):
class MyApp(config_manager.RequiredConfig):
app_name = 'fred'
app_version = '1.0'
app_description = "my app"
def __init__(inner_self, config):
inner_self.config = config
n = config_manager.Namespace()
n.admin = config_manager.Namespace()
n.admin.add_option('application',
MyApp,
'the app object class')
c = config_manager.ConfigurationManager([n],
use_admin_controls=True,
use_auto_help=False,
argv_source=[])
self.assertEqual(c.app_name, MyApp.app_name)
self.assertEqual(c.app_version, MyApp.app_version)
self.assertEqual(c.app_description, MyApp.app_description)
def test_help_out(self):
class MyApp(config_manager.RequiredConfig):
app_name = 'fred'
app_version = '1.0'
app_description = "my app"
required_config = config_manager.Namespace()
required_config.add_option('password', 'fred', 'the password')
def __init__(inner_self, config):
inner_self.config = config
n = config_manager.Namespace()
n.admin = config_manager.Namespace()
n.admin.add_option('application',
MyApp,
'the app object class')
class MyConfigManager(config_manager.ConfigurationManager):
def output_summary(inner_self):
output_stream = StringIO()
r = super(MyConfigManager, inner_self).output_summary(
output_stream=output_stream,
block_password=False)
r = output_stream.getvalue()
output_stream.close()
self.assertTrue('Application: fred 1.0' in r)
self.assertTrue('my app\n\n' in r)
self.assertTrue('Options:\n' in r)
self.assertTrue(' --help' in r and 'print this' in r)
self.assertTrue('print this (default: True)' not in r)
self.assertTrue(' --password' in r)
self.assertTrue('the password (default: *********)' in r)
self.assertTrue(' --admin.application' not in r)
def my_exit():
pass
old_sys_exit = sys.exit
sys.exit = my_exit
try:
MyConfigManager(n,
[getopt],
use_admin_controls=True,
use_auto_help=True,
argv_source=['--password=wilma', '--help'])
finally:
sys.exit = old_sys_exit
def test_write_gets_called(self):
class MyApp(config_manager.RequiredConfig):
app_name = 'fred'
app_version = '1.0'
app_description = "my app"
required_config = config_manager.Namespace()
required_config.add_option('password', 'fred', 'the password')
def __init__(inner_self, config):
inner_self.config = config
n = config_manager.Namespace()
n.admin = config_manager.Namespace()
n.admin.add_option('application',
MyApp,
'the app object class')
class MyConfigManager(config_manager.ConfigurationManager):
def __init__(inner_self, *args, **kwargs):
inner_self.write_called = False
super(MyConfigManager, inner_self).__init__(*args, **kwargs)
def dump_conf(inner_self):
inner_self.dump_conf_called = True
def my_exit():
pass
old_sys_exit = sys.exit
sys.exit = my_exit
try:
c = MyConfigManager(n,
[getopt],
use_admin_controls=True,
use_auto_help=True,
argv_source=['--password=wilma',
'--admin.dump_conf=x.ini'])
self.assertEqual(c.dump_conf_called, True)
finally:
sys.exit = old_sys_exit
def test_get_options(self):
class MyApp(config_manager.RequiredConfig):
app_name = 'fred'
app_version = '1.0'
app_description = "my app"
required_config = config_manager.Namespace()
required_config.add_option('password', 'fred', 'the password')
required_config.sub = config_manager.Namespace()
required_config.sub.add_option('name',
'ethel',
'the name')
def __init__(inner_self, config):
inner_self.config = config
n = config_manager.Namespace()
n.admin = config_manager.Namespace()
n.admin.add_option('application',
MyApp,
'the app object class')
c = config_manager.ConfigurationManager(n,
use_admin_controls=True,
use_auto_help=False,
argv_source=[])
r = c._get_options()
e = (
('admin.print_conf', 'print_conf', None),
('admin.application', 'application', MyApp),
('admin.dump_conf', 'dump_conf', ''),
('admin.conf', 'conf', './config.ini'),
('password', 'password', 'fred'),
('sub.name', 'name', 'ethel'))
for expected, result in zip(e, r):
expected_key, expected_name, expected_default = expected
result_key, result_option = result
self.assertEqual(expected_key, result_key)
self.assertEqual(expected_name, result_option.name)
self.assertEqual(expected_default, result_option.default)
def test_log_config(self):
class MyApp(config_manager.RequiredConfig):
app_name = 'fred'
app_version = '1.0'
app_description = "my app"
required_config = config_manager.Namespace()
required_config.add_option('password', 'fred', 'the password')
required_config.sub = config_manager.Namespace()
required_config.sub.add_option('name',
'ethel',
'the name')
def __init__(inner_self, config):
inner_self.config = config
n = config_manager.Namespace()
n.admin = config_manager.Namespace()
n.admin.add_option('application',
MyApp,
'the app object class')
c = config_manager.ConfigurationManager(n,
[getopt],
use_admin_controls=True,
use_auto_help=False,
argv_source=['--sub.name=wilma'])
class FakeLogger(object):
def __init__(self):
self.log = []
def info(self, *args):
self.log.append(args[0] % args[1:])
fl = FakeLogger()
c.log_config(fl)
e = ["app_name: fred",
"app_version: 1.0",
"current configuration:",
"password: *********",
"sub.name: wilma"]
for expected, received in zip(e, fl.log):
self.assertEqual(expected, received)
def test_extra_commandline_parameters(self):
class MyApp(config_manager.RequiredConfig):
app_name = 'fred'
app_version = '1.0'
app_description = "my app"
required_config = config_manager.Namespace()
required_config.add_option('password', 'fred', 'the password')
required_config.sub = config_manager.Namespace()
required_config.sub.add_option('name',
'ethel',
'the name')
def __init__(inner_self, config):
inner_self.config = config
n = config_manager.Namespace()
n.admin = config_manager.Namespace()
n.admin.add_option('application',
MyApp,
'the app object class')
c = config_manager.ConfigurationManager(n,
[getopt],
use_admin_controls=True,
use_auto_help=False,
argv_source=['--sub.name=wilma',
'argument 1',
'argument 2',
'argument 3'])
expected = ['argument 1',
'argument 2',
'argument 3']
self.assertEqual(c.args, expected)
def test_print_conf_called(self):
class MyApp(config_manager.RequiredConfig):
app_name = 'fred'
app_version = '1.0'
app_description = "my app"
required_config = config_manager.Namespace()
required_config.add_option('password', 'fred', 'the password')
required_config.sub = config_manager.Namespace()
required_config.sub.add_option('name',
'ethel',
'the name')
def __init__(inner_self, config):
inner_self.config = config
n = config_manager.Namespace()
n.admin = config_manager.Namespace()
n.admin.add_option('application',
MyApp,
'the app object class')
class MyConfigManager(config_manager.ConfigurationManager):
def __init__(inner_self, *args, **kwargs):
inner_self.write_called = False
super(MyConfigManager, inner_self).__init__(*args, **kwargs)
def print_conf(inner_self):
inner_self.print_conf_called = True
c = MyConfigManager(n,
[getopt],
use_admin_controls=True,
use_auto_help=False,
quit_after_admin=False,
argv_source=['--admin.print_conf=ini',
'argument 1',
'argument 2',
'argument 3'])
self.assertEqual(c.print_conf_called, True)
def test_non_compliant_app_object(self):
# the MyApp class doesn't define required config
class MyApp():
app_name = 'fred'
app_version = '1.0'
app_description = "my app"
def __init__(inner_self, config):
inner_self.config = config
n = config_manager.Namespace()
n.admin = config_manager.Namespace()
n.admin.add_option('application',
MyApp,
'the app object class')
c = config_manager.ConfigurationManager(n,
[getopt],
use_admin_controls=True,
use_auto_help=False,
argv_source=['argument 1',
'argument 2',
'argument 3'])
conf = c.get_config()
self.assertEqual(conf.keys(), ['admin']) # there should be nothing but
# the admin key
def test_print_conf(self):
n = config_manager.Namespace()
class MyConfigManager(config_manager.ConfigurationManager):
def __init__(inner_self, *args, **kwargs):
inner_self.write_called = False
super(MyConfigManager, inner_self).__init__(*args, **kwargs)
def print_conf(self):
temp_stdout = sys.stdout
sys.stdout = 17
try:
super(MyConfigManager, self).print_conf()
finally:
sys.stdout = temp_stdout
def write_conf(inner_self, file_type, opener, skip_keys=None):
self.assertEqual(file_type, 'ini')
with opener() as f:
self.assertEqual(f, 17)
MyConfigManager(
n,
[getopt],
use_admin_controls=True,
use_auto_help=False,
quit_after_admin=False,
argv_source=['--admin.print_conf=ini',
'argument 1',
'argument 2',
'argument 3'],
config_pathname='fred')
def test_dump_conf(self):
n = config_manager.Namespace()
class MyConfigManager(config_manager.ConfigurationManager):
def __init__(inner_self, *args, **kwargs):
inner_self.write_called = False
super(MyConfigManager, inner_self).__init__(*args, **kwargs)
def write_conf(inner_self, file_type, opener, skip_keys=None):
self.assertEqual(file_type, 'ini')
self.assertEqual(opener.args, ('fred.ini', 'w'))
MyConfigManager(
n,
[getopt],
use_admin_controls=True,
use_auto_help=False,
quit_after_admin=False,
argv_source=['--admin.dump_conf=fred.ini',
'argument 1',
'argument 2',
'argument 3'],
config_pathname='fred'
)
def test_print_conf_some_options_excluded(self):
n = config_manager.Namespace()
n.add_option('gender',
default='Male',
doc='What kind of genitalia?')
n.add_option('salary',
default=10000,
doc='How much do you earn?',
exclude_from_print_conf=True
)
old_stdout = sys.stdout
temp_output = StringIO()
sys.stdout = temp_output
try:
config_manager.ConfigurationManager(
n,
[getopt],
use_admin_controls=True,
use_auto_help=False,
quit_after_admin=False,
argv_source=['--admin.print_conf=ini'],
config_pathname='fred'
)
finally:
sys.stdout = old_stdout
printed = temp_output.getvalue()
self.assertTrue('gender' in printed)
self.assertTrue('salary' not in printed)
def test_dump_conf_some_options_excluded(self):
n = config_manager.Namespace()
n.add_option('gender',
default='Male',
doc='What kind of genitalia?',
exclude_from_print_conf=True)
n.add_option('salary',
default=10000,
doc='How much do you earn?',
exclude_from_dump_conf=True
)
try:
config_manager.ConfigurationManager(
n,
[getopt],
use_admin_controls=True,
use_auto_help=False,
quit_after_admin=False,
argv_source=['--admin.dump_conf=foo.conf'],
config_pathname='fred'
)
printed = open('foo.conf').read()
self.assertTrue('gender' in printed)
self.assertTrue('salary' not in printed)
finally:
if os.path.isfile('foo.conf'):
os.remove('foo.conf')
def test_config_pathname_set(self):
class MyConfigManager(config_manager.ConfigurationManager):
def __init__(inner_self, *args, **kwargs):
inner_self.write_called = False
super(MyConfigManager, inner_self).__init__(*args, **kwargs)
def get_config_pathname(self):
temp_fn = os.path.isdir
os.path.isdir = lambda x: False
try:
r = super(MyConfigManager, self)._get_config_pathname()
finally:
os.path.isdir = temp_fn
return r
self.assertRaises(AllHandlersFailedException,
MyConfigManager,
use_admin_controls=True,
use_auto_help=False,
quit_after_admin=False,
argv_source=['argument 1',
'argument 2',
'argument 3'],
config_pathname='fred')
def test_ConfigurationManager_block_password(self):
function = config_manager.ConfigurationManager._block_password
self.assertEqual(function('foo', 'bar', 'peter', block_password=False),
('foo', 'bar', 'peter'))
self.assertEqual(function('foo', 'bar', 'peter', block_password=True),
('foo', 'bar', 'peter'))
self.assertEqual(function('foo', 'password', 'peter',
block_password=True),
('foo', 'password', '*********'))
self.assertEqual(function('foo', 'my_password', 'peter',
block_password=True),
('foo', 'my_password', '*********'))
def test_do_aggregations(self):
def aggregation_test(all_config, local_namespace, args):
self.assertTrue('password' in all_config)
self.assertTrue('sub1' in all_config)
self.assertTrue('name' in all_config.sub1)
self.assertTrue('name' in local_namespace)
self.assertTrue('spouse' in local_namespace)
self.assertEqual(len(args), 2)
return ('%s married %s using password %s but '
'divorced because of %s.' % (local_namespace.name,
local_namespace.spouse,
all_config.password,
args[1]))
class MyApp(config_manager.RequiredConfig):
app_name = 'fred'
app_version = '1.0'
app_description = "my app"
required_config = config_manager.Namespace()
required_config.add_option('password', '@$*$&26Ht', 'the password')
required_config.namespace('sub1')
required_config.sub1.add_option('name', 'ethel', 'the name')
required_config.sub1.add_option('spouse', 'fred', 'the spouse')
required_config.sub1.add_aggregation('statement', aggregation_test)
def __init__(inner_self, config):
inner_self.config = config
n = config_manager.Namespace()
n.admin = config_manager.Namespace()
n.admin.add_option('application',
MyApp,
'the app object class')
c = config_manager.ConfigurationManager(n,
[getopt],
use_admin_controls=True,
use_auto_help=False,
argv_source=['--sub1.name=wilma',
'arg1',
'arg2'])
config = c.get_config()
self.assertEqual(config.sub1.statement,
'wilma married fred using password @$*$&26Ht '
'but divorced because of arg2.')
def test_context(self):
class AggregatedValue(object):
def __init__(self, value):
self.value = value
def close(self):
self.value = None
def aggregation_test(all_config, local_namespace, args):
self.assertTrue('password' in all_config)
self.assertTrue('sub1' in all_config)
self.assertTrue('name' in all_config.sub1)
self.assertTrue('name' in local_namespace)
self.assertTrue('spouse' in local_namespace)
self.assertEqual(len(args), 2)
return AggregatedValue('%s married %s using password %s but '
'divorced because of %s.' %
(local_namespace.name,
local_namespace.spouse,
all_config.password,
args[1]))
class MyApp(config_manager.RequiredConfig):
app_name = 'fred'
app_version = '1.0'
app_description = "my app"
required_config = config_manager.Namespace()
required_config.add_option('password', '@$*$&26Ht', 'the password')
required_config.namespace('sub1')
required_config.sub1.add_option('name', 'ethel', 'the name')
required_config.sub1.add_option('spouse', 'fred', 'the spouse')
required_config.sub1.add_aggregation('statement', aggregation_test)
def __init__(inner_self, config):
inner_self.config = config
n = config_manager.Namespace()
n.admin = config_manager.Namespace()
n.admin.add_option('application',
MyApp,
'the app object class')
c = config_manager.ConfigurationManager(n,
[getopt],
use_admin_controls=True,
use_auto_help=False,
argv_source=['--sub1.name=wilma',
'arg1',
'arg2'])
with c.context() as config:
statement = config.sub1.statement
self.assertEqual(statement.value,
'wilma married fred using password @$*$&26Ht '
'but divorced because of arg2.')
self.assertTrue(statement.value is None)
def test_failing_aggregate_error_bubbling(self):
"""Reproduces and assures this issue
https://github.com/mozilla/configman/issues/21
"""
class AggregatedValue(object):
def __init__(self, value):
self.value = value
def close(self):
self.value = None
class SomeException(Exception):
pass
def aggregation_test(all_config, local_namespace, args):
# the aggregator might be broken
raise SomeException('anything')
class MyApp(config_manager.RequiredConfig):
app_name = 'fred'
app_version = '1.0'
app_description = "my app"
required_config = config_manager.Namespace()
required_config.add_aggregation('statement', aggregation_test)
n = config_manager.Namespace()
n.admin = config_manager.Namespace()
n.admin.add_option('application',
MyApp,
'the app object class')
c = config_manager.ConfigurationManager(n,
[getopt],
use_admin_controls=True,
use_auto_help=False,
argv_source=[])
contextmanager_ = c.context()
self.assertRaises(SomeException, contextmanager_.__enter__)
| peterbe/configman | configman/tests/test_config_manager.py | Python | bsd-3-clause | 56,350 |
#!/usr/bin/env python
from amino import *
print Quat(XAngle(3.14)) * Quat(YAngle(3.14))
# print Quat((1,2,3,4))
# print Quat(RotMat(YAngle(3.14)))
# print Quat(RotMat(RotMat(YAngle(3.14))))
# print TfMat( (XAngle(3.14), (0,0,1)) ).translation()
# print aa.RotMat(aa.YAngle(3.14)).cx
# print aa.RotMat(aa.YAngle(3.14)).cy
# print aa.RotMat(aa.YAngle(3.14)).cz
# print aa.RotMat(1).cx
# print aa.RotMat(1).cy
# print aa.RotMat(1).cz
print "end"
| golems/amino | demo/cpython/tf.py | Python | bsd-3-clause | 453 |
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("AdaBoostClassifier" , "FourClass_10" , "sqlite")
| antoinecarme/sklearn2sql_heroku | tests/classification/FourClass_10/ws_FourClass_10_AdaBoostClassifier_sqlite_code_gen.py | Python | bsd-3-clause | 146 |
import resource
import numpy as np
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.conditions import InCondition
from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, \
UniformIntegerHyperparameter, CategoricalHyperparameter, \
UnParametrizedHyperparameter
from autosklearn.pipeline.components.base import AutoSklearnRegressionAlgorithm
from autosklearn.pipeline.constants import *
class LibSVM_SVR(AutoSklearnRegressionAlgorithm):
def __init__(self, kernel, C, epsilon, tol, shrinking, gamma=0.0,
degree=3, coef0=0.0, verbose=False,
max_iter=-1, random_state=None):
self.kernel = kernel
self.C = C
self.epsilon = epsilon
self.tol = tol
self.shrinking = shrinking
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.verbose = verbose
self.max_iter = max_iter
self.random_state = random_state
self.estimator = None
def fit(self, X, Y):
import sklearn.svm
try:
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
if soft > 0:
soft /= 1024 * 1024
maxrss = resource.getrusage(resource.RUSAGE_SELF)[2] / 1024
cache_size = (soft - maxrss) / 1.5
else:
cache_size = 200
except Exception:
cache_size = 200
self.C = float(self.C)
self.epsilon = float(self.epsilon)
self.tol = float(self.tol)
self.shrinking = self.shrinking == 'True'
self.degree = int(self.degree)
self.gamma = float(self.gamma)
if self.coef0 is None:
self.coef0 = 0.0
else:
self.coef0 = float(self.coef0)
self.verbose = int(self.verbose)
self.max_iter = int(self.max_iter)
self.estimator = sklearn.svm.SVR(
kernel=self.kernel,
C=self.C,
epsilon=self.epsilon,
tol=self.tol,
shrinking=self.shrinking,
degree=self.degree,
gamma=self.gamma,
coef0=self.coef0,
cache_size=cache_size,
verbose=self.verbose,
max_iter=self.max_iter
)
self.scaler = sklearn.preprocessing.StandardScaler(copy=True)
self.scaler.fit(Y)
Y_scaled = self.scaler.transform(Y)
self.estimator.fit(X, Y_scaled)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
if self.scaler is None:
raise NotImplementedError
Y_pred = self.estimator.predict(X)
return self.scaler.inverse_transform(Y_pred)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'SVR',
'name': 'Support Vector Regression',
'handles_regression': True,
'handles_classification': False,
'handles_multiclass': False,
'handles_multilabel': False,
'prefers_data_normalized': True,
'is_deterministic': True,
'input': (SPARSE, DENSE, UNSIGNED_DATA),
'output': (PREDICTIONS,)}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
# Copied from libsvm_c
C = UniformFloatHyperparameter(
name="C", lower=0.03125, upper=32768, log=True, default=1.0)
kernel = CategoricalHyperparameter(
name="kernel", choices=['linear', 'poly', 'rbf', 'sigmoid'],
default="rbf")
degree = UniformIntegerHyperparameter(
name="degree", lower=1, upper=5, default=3)
# Changed the gamma value to 0.0 (is 0.1 for classification)
gamma = UniformFloatHyperparameter(
name="gamma", lower=3.0517578125e-05, upper=8, log=True, default=0.1)
# TODO this is totally ad-hoc
coef0 = UniformFloatHyperparameter(
name="coef0", lower=-1, upper=1, default=0)
# probability is no hyperparameter, but an argument to the SVM algo
shrinking = CategoricalHyperparameter(
name="shrinking", choices=["True", "False"], default="True")
tol = UniformFloatHyperparameter(
name="tol", lower=1e-5, upper=1e-1, default=1e-3, log=True)
max_iter = UnParametrizedHyperparameter("max_iter", -1)
# Random Guess
epsilon = UniformFloatHyperparameter(name="epsilon", lower=0.001,
upper=1, default=0.1, log=True)
cs = ConfigurationSpace()
cs.add_hyperparameter(C)
cs.add_hyperparameter(kernel)
cs.add_hyperparameter(degree)
cs.add_hyperparameter(gamma)
cs.add_hyperparameter(coef0)
cs.add_hyperparameter(shrinking)
cs.add_hyperparameter(tol)
cs.add_hyperparameter(max_iter)
cs.add_hyperparameter(epsilon)
degree_depends_on_kernel = InCondition(child=degree, parent=kernel,
values=('poly', 'rbf', 'sigmoid'))
gamma_depends_on_kernel = InCondition(child=gamma, parent=kernel,
values=('poly', 'rbf'))
coef0_depends_on_kernel = InCondition(child=coef0, parent=kernel,
values=('poly', 'sigmoid'))
cs.add_condition(degree_depends_on_kernel)
cs.add_condition(gamma_depends_on_kernel)
cs.add_condition(coef0_depends_on_kernel)
return cs
| hmendozap/auto-sklearn | autosklearn/pipeline/components/regression/libsvm_svr.py | Python | bsd-3-clause | 5,653 |
from django.conf import settings
from paymentexpress.gateway import (
AUTH, COMPLETE, PURCHASE, REFUND, VALIDATE, Gateway
)
from paymentexpress.models import OrderTransaction
from oscar.apps.payment.exceptions import (UnableToTakePayment,
InvalidGatewayRequestError)
import random
class Facade(object):
"""
A bridge between oscar's objects and the core gateway object
"""
def __init__(self):
self.gateway = Gateway(
settings.PAYMENTEXPRESS_POST_URL,
settings.PAYMENTEXPRESS_USERNAME,
settings.PAYMENTEXPRESS_PASSWORD,
getattr(settings, 'PAYMENTEXPRESS_CURRENCY', 'AUD')
)
def _check_amount(self, amount):
if amount == 0 or amount is None:
raise UnableToTakePayment("Order amount must be non-zero")
def _get_merchant_reference(self, order_number, txn_type):
num_previous = OrderTransaction.objects.filter(
order_number=order_number,
txn_type=txn_type).count()
# Get a random number to append to the end. This solves the problem
# where a previous request crashed out and didn't save a model instance
# Hence we can get a clash of merchant references.
rand = "%04.f" % (random.random() * 10000)
return u'%s_%s_%d_%s' % (
order_number, txn_type.upper(), num_previous + 1, rand
)
def _get_friendly_decline_message(self):
return ('The transaction was declined by your bank - ' +
'please check your bankcard details and try again')
def _handle_response(self, txn_type, order_number, amount, response):
OrderTransaction.objects.create(
order_number=order_number,
txn_type=txn_type,
txn_ref=response['dps_txn_ref'],
amount=amount,
response_code=response['response_code'],
response_message=response.get_message(),
request_xml=response.request_xml,
response_xml=response.response_xml
)
if response.is_successful():
return {
'txn_reference': response['dps_txn_ref'],
'partner_reference': response['dps_billing_id'],
}
elif response.is_declined():
raise UnableToTakePayment(self._get_friendly_decline_message())
else:
raise InvalidGatewayRequestError(response.get_message())
def _format_card_date(self, str_date):
# Dirty hack so that Oscar's BankcardForm doesn't need to be overridden
if str_date is None:
return None
return str_date.replace('/', '')
def authorise(self, order_number, amount, bankcard):
"""
Authorizes a transaction.
Must be completed within 7 days using the "Complete" TxnType
"""
self._check_amount(amount)
card_issue_date = self._format_card_date(bankcard.start_date)
card_expiry_date = self._format_card_date(bankcard.expiry_date)
merchant_ref = self._get_merchant_reference(order_number, AUTH)
res = self.gateway.authorise(card_holder=bankcard.card_holder_name,
card_number=bankcard.card_number,
card_issue_date=card_issue_date,
card_expiry=card_expiry_date,
cvc2=bankcard.cvv,
amount=amount,
merchant_ref=merchant_ref)
return self._handle_response(AUTH, order_number, amount, res)
def complete(self, order_number, amount, dps_txn_ref):
"""
Completes (settles) a pre-approved Auth Transaction.
The DpsTxnRef value returned by the original approved Auth transaction
must be supplied.
"""
self._check_amount(amount)
merchant_ref = self._get_merchant_reference(order_number, COMPLETE)
res = self.gateway.complete(amount=amount,
dps_txn_ref=dps_txn_ref,
merchant_ref=merchant_ref)
return self._handle_response(COMPLETE, order_number, amount, res)
def purchase(self, order_number, amount, billing_id=None, bankcard=None):
"""
Purchase - Funds are transferred immediately.
"""
self._check_amount(amount)
res = None
merchant_ref = self._get_merchant_reference(order_number, PURCHASE)
if billing_id:
res = self.gateway.purchase(amount=amount,
dps_billing_id=billing_id,
merchant_ref=merchant_ref)
elif bankcard:
card_issue_date = self._format_card_date(bankcard.start_date)
card_expiry_date = self._format_card_date(bankcard.expiry_date)
res = self.gateway.purchase(amount=amount,
card_holder=bankcard.card_holder_name,
card_number=bankcard.card_number,
card_issue_date=card_issue_date,
card_expiry=card_expiry_date,
cvc2=bankcard.cvv,
merchant_ref=merchant_ref,
enable_add_bill_card=1)
else:
raise ValueError("You must specify either a billing id or " +
"a merchant reference")
return self._handle_response(PURCHASE, order_number, amount, res)
def refund(self, order_number, amount, dps_txn_ref):
"""
Refund - Funds transferred immediately.
Must be enabled as a special option.
"""
self._check_amount(amount)
merchant_ref = self._get_merchant_reference(order_number, REFUND)
res = self.gateway.refund(amount=amount,
dps_txn_ref=dps_txn_ref,
merchant_ref=merchant_ref)
return self._handle_response(REFUND, order_number, amount, res)
def validate(self, bankcard):
"""
Validation Transaction.
Effects a $1.00 Auth to validate card details including expiry date.
Often utilised with the EnableAddBillCard property set to 1 to
automatically add to Billing Database if the transaction is approved.
"""
amount = 1.00
card_issue_date = self._format_card_date(bankcard.start_date)
card_expiry_date = self._format_card_date(bankcard.expiry_date)
res = self.gateway.validate(amount=amount,
card_holder=bankcard.card_holder_name,
card_number=bankcard.card_number,
card_issue_date=card_issue_date,
card_expiry=card_expiry_date,
cvc2=bankcard.cvv,
enable_add_bill_card=1)
return self._handle_response(VALIDATE, None, amount, res)
| django-oscar/django-oscar-paymentexpress | paymentexpress/facade.py | Python | bsd-3-clause | 7,203 |
from django.apps import AppConfig
class WagtailLinkchekerAppConfig(AppConfig):
name = 'wagtaillinkchecker'
verbose_name = "Wagtail Link Checker"
| takeflight/wagtail-linkchecker | wagtaillinkchecker/apps.py | Python | bsd-3-clause | 155 |
fig, ax = plt.subplots()
count_weekday_years.median(axis=0).plot(kind='barh', ax=ax, color='#66b266')
xticks = ax.set_yticklabels(['Monday', 'Tuesday', 'Wednesday', "Thursday", "Friday", "Saturday", "Sunday"]) | jorisvandenbossche/DS-python-data-analysis | notebooks/_solutions/case2_biodiversity_analysis41.py | Python | bsd-3-clause | 209 |
# -*- coding: utf-8 -*-
"""STOMP client
.. module:: network.jms.stomp_client
:platform: Unix
:synopsis: STOMP client
.. moduleauthor:: Petr Rašek <[email protected]>
"""
"""
Events:
-------
jms_before_connect
jms_after_connect
jms_before_send
jms_after_send
jms_before_receive
jms_after_receive
jms_before_browse
jms_after_browse
"""
from hydratk.core.masterhead import MasterHead
from hydratk.core import event
from logging import basicConfig, getLogger, DEBUG, CRITICAL
from stompest.config import StompConfig
from stompest.sync import Stomp
from stompest.protocol import StompSpec
from stompest.error import StompError
from sys import version_info
getLogger('stompest.sync.client').setLevel(CRITICAL)
mapping = {
'JMSCorrelationID': 'correlation-id',
'JMSExpiration': 'expires',
'JMSDeliveryMode': 'persistent',
'JMSPriority': 'priority',
'JMSReplyTo': 'reply-to',
'JMSType': 'type',
'JMSMessageID': 'message-id',
'JMSDestination': 'destination',
'JMSTimestamp': 'timestamp',
'JMSRedelivered': 'redelivered'
}
class JMSClient(object):
"""Class JMSClient
"""
_mh = None
_client = None
_host = None
_port = None
_user = None
_passw = None
_verbose = None
_is_connected = None
def __init__(self, verbose=False):
"""Class constructor
Called when the object is initialized
Args:
verbose (bool): verbose mode
"""
try:
self._mh = MasterHead.get_head()
self._verbose = verbose
if (self._verbose):
basicConfig()
getLogger().setLevel(DEBUG)
except StompError as ex:
self._mh.demsg('htk_on_error', ex, self._mh.fromhere())
@property
def client(self):
""" STOMP client property getter """
return self._client
@property
def host(self):
""" server host property getter """
return self._host
@property
def port(self):
""" server port property getter """
return self._port
@property
def user(self):
""" username property getter """
return self._user
@property
def passw(self):
""" user password property getter """
return self._passw
@property
def verbose(self):
""" verbose mode property getter """
return self._verbose
@property
def is_connected(self):
""" is_connected property getter """
return self._is_connected
def connect(self, host, port=61613, user=None, passw=None, timeout=10):
"""Method connects to server
Args:
host (str): hostname
port (str): port
user (str): username
passw (str): password
timeout (int): timeout
Returns:
bool: result
Raises:
event: jms_before_connect
event: jms_after_connected
"""
try:
msg = 'host:{0}, port:{1}, user:{2}, passw:{3}, timeout:{4}'.format(
host, port, user, passw, timeout)
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'htk_jms_connecting', msg), self._mh.fromhere())
ev = event.Event(
'jms_before_connect', host, port, user, passw, timeout)
if (self._mh.fire_event(ev) > 0):
host = ev.argv(0)
port = ev.argv(1)
user = ev.argv(2)
passw = ev.argv(3)
timeout = ev.argv(4)
self._host = host
self._port = port
self._user = user
self._passw = passw
if (ev.will_run_default()):
self._client = Stomp(StompConfig('tcp://{0}:{1}'.format(self._host, self._port),
login=self._user, passcode=self._passw))
self._client.connect(
connectTimeout=timeout, connectedTimeout=timeout)
self._is_connected = True
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'htk_jms_connected'), self._mh.fromhere())
ev = event.Event('jms_after_connect')
self._mh.fire_event(ev)
return True
except StompError as ex:
self._mh.demsg('htk_on_error', ex, self._mh.fromhere())
return False
def disconnect(self):
"""Method disconnects from server
Args:
none
Returns:
bool: result
"""
try:
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'htk_jms_disconnecting'), self._mh.fromhere())
if (not self._is_connected):
self._mh.demsg('htk_on_warning', self._mh._trn.msg(
'htk_jms_not_connected'), self._mh.fromhere())
return False
else:
self._client.disconnect()
self._client.close()
self._is_connected = False
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'htk_jms_disconnected'), self._mh.fromhere())
return True
except StompError as ex:
self._mh.demsg('htk_on_error', ex, self._mh.fromhere())
return False
def send(self, destination_name, message, destination_type='queue', headers={}):
"""Method sends message
JMS headers - JMSCorrelationID, JMSExpiration, JMSDeliveryMode, JMSPriority,
JMSReplyTo, JMSType
Args:
destination_name (str): queue|topic name
message (str): message
destination_type (str): queue|topic
headers (dict): JMS headers, key - title, value - string
Returns:
bool: result
Raises:
event: jms_before_send
event: jms_after_send
"""
try:
msg = 'destination_name:{0}, message:{1}, destination_type:{2}, headers:{3}'.format(
destination_name, message, destination_type, headers)
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'htk_jms_sending_msg', msg), self._mh.fromhere())
if (not self._is_connected):
self._mh.demsg('htk_on_warning', self._mh._trn.msg(
'htk_jms_not_connected'), self._mh.fromhere())
return False
ev = event.Event(
'jms_before_send', destination_name, message, destination_type, headers)
if (self._mh.fire_event(ev) > 0):
destination_name = ev.argv(0)
message = ev.argv(1)
destination_type = ev.argv(2)
headers = ev.argv(3)
if (ev.will_run_default()):
headers_new = {}
for key, value in headers.items():
if (key in mapping):
headers_new[mapping[key]] = value
self._client.send('/{0}/{1}'.format(destination_type, destination_name), message if (
version_info[0] == 2) else message.encode('utf-8'), headers_new)
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'htk_jms_msg_sent'), self._mh.fromhere())
ev = event.Event('jms_after_send')
self._mh.fire_event(ev)
return True
except StompError as ex:
self._mh.demsg('htk_on_error', ex, self._mh.fromhere())
return False
def receive(self, destination_name, cnt=1):
"""Method receives messages
Args:
destination_name (str): queue name
cnt (int): count of messages
Returns:
list: messages as dictionary {'message', JMS headers}
Raises:
event: jms_before_receive
event: jms_after_receive
"""
try:
msg = 'destination_name:{0}, count:{1}'.format(
destination_name, cnt)
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'htk_jms_receiving_msg', msg), self._mh.fromhere())
if (not self._is_connected):
self._mh.demsg('htk_on_warning', self._mh._trn.msg(
'htk_jms_not_connected'), self._mh.fromhere())
return None
ev = event.Event('jms_before_receive', destination_name, cnt)
if (self._mh.fire_event(ev) > 0):
destination_name = ev.argv(0)
cnt = ev.argv(1)
if (ev.will_run_default()):
token = self._client.subscribe('/queue/{0}'.format(destination_name),
{StompSpec.ACK_HEADER: StompSpec.ACK_CLIENT_INDIVIDUAL})
msgs = []
i = 0
while (i < cnt and self._client.canRead(1)):
frame = self._client.receiveFrame()
if (frame.command != 'MESSAGE'):
break
self._client.ack(frame)
msgs.append(frame)
i = i + 1
self._client.unsubscribe(token)
messages = []
for msg in msgs:
message = {}
message['message'] = msg.body.decode()
for header in msg.rawHeaders:
if (header[0] in mapping.values()):
message[
list(mapping.keys())[list(mapping.values()).index(header[0])]] = header[1]
messages.append(message)
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'htk_jms_msg_received', len(messages)), self._mh.fromhere())
ev = event.Event('jms_after_receive')
self._mh.fire_event(ev)
return messages
except StompError as ex:
self._mh.demsg('htk_on_error', ex, self._mh.fromhere())
return None
def browse(self, destination_name, cnt=100, jms_correlation_id=None, jms_type=None):
"""Method browses queue
Args:
destination_name (str): queue name
cnt (int): count of messages
jms_correlation_id (str): requested JMSCorrelationID
jms_type (str): requested JMSType
Returns:
list: messages as dictionary {'message', JMS headers}
Raises:
event: jms_before_browse
event: jms_after_browse
"""
try:
msg = 'destination_name:{0}, count:{1}, jms_correlation_id:{2}, jms_type:{3}'.format(
destination_name, cnt, jms_correlation_id, jms_type)
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'htk_jms_browsing', msg), self._mh.fromhere())
if (not self._is_connected):
self._mh.demsg('htk_on_warning', self._mh._trn.msg(
'htk_jms_not_connected'), self._mh.fromhere())
return None
ev = event.Event(
'jms_before_browse', destination_name, cnt, jms_correlation_id, jms_type)
if (self._mh.fire_event(ev) > 0):
destination_name = ev.argv(0)
cnt = ev.argv(1)
jms_correlation_id = ev.argv(2)
jms_type = ev.argv(3)
if (ev.will_run_default()):
token = self._client.subscribe('/queue/{0}'.format(destination_name),
{StompSpec.ACK_HEADER: StompSpec.ACK_CLIENT_INDIVIDUAL})
msgs = []
i = 0
while (i < cnt and self._client.canRead(1)):
frame = self._client.receiveFrame()
correlation_id = None
type = None
for header in frame.rawHeaders:
if (header[0] == 'correlation-id'):
correlation_id = header[1]
elif (header[0] == 'type'):
type = header[1]
if ((jms_correlation_id == None or jms_correlation_id == correlation_id) and
(jms_type == None or jms_type == type)):
msgs.append(frame)
i = i + 1
self._client.unsubscribe(token)
messages = []
for msg in msgs:
message = {}
message['message'] = msg.body.decode()
for header in msg.rawHeaders:
if (header[0] in mapping.values()):
message[
list(mapping.keys())[list(mapping.values()).index(header[0])]] = header[1]
messages.append(message)
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'htk_jms_msg_received', len(messages)), self._mh.fromhere())
ev = event.Event('jms_after_browse')
self._mh.fire_event(ev)
return messages
except StompError as ex:
self._mh.demsg('htk_on_error', ex, self._mh.fromhere())
return None
| hydratk/hydratk-lib-network | src/hydratk/lib/network/jms/stomp_client.py | Python | bsd-3-clause | 13,434 |
#!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Lin, Wanming <[email protected]>
import unittest
import os
import sys
import commands
import comm
import time
class TestWebAppFunctions(unittest.TestCase):
def test_close(self):
comm.setUp()
app_name = "helloworld"
pkg_name = "com.example." + app_name.lower()
if not comm.check_app_installed(pkg_name, self):
comm.app_install(app_name, pkg_name, self)
if not comm.check_app_launched(pkg_name, self):
print "Close app ---------------->%s App haven't launched, need to launch it!" % app_name
comm.app_launch(app_name, pkg_name, self)
time.sleep(1)
comm.app_stop(pkg_name, self)
if __name__ == '__main__':
unittest.main()
| jiajiax/crosswalk-test-suite | cordova/cordova-webapp-android-tests/webapp/webapp_close.py | Python | bsd-3-clause | 2,249 |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.db import transaction
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from greenmine.core.generic import GenericView
from greenmine.core.decorators import login_required, staff_required
from greenmine import models
from greenmine.wiki import models as wiki_models
import datetime
import subprocess
import shutil
import pickle
import base64
import zlib
import copy
import sys
import os
import re
import io
class BinaryFile(object):
def __init__(self, name):
self.name = name
def __enter__(self):
self._file = io.open(self.name, mode='w+b')
return self._file
def __exit__(self, exc_type, exc_value, traceback):
self._file.flush()
self._file.close()
class ProjectExportView(GenericView):
template_path = 'config/project-export.html'
menu = ['settings', 'export']
@login_required
def get(self, request, pslug):
project = get_object_or_404(models.Project, slug=pslug)
context = {
'project': project,
'flist': models.ExportDirectoryCache.objects.all()
}
return self.render_to_response(self.template_path, context)
class RehashExportsDirectory(GenericView):
def backup_path_list(self):
for path in os.listdir(settings.BACKUP_PATH):
if os.path.splitext(path)[1] != '.xz':
continue
yield os.path.join(settings.BACKUP_PATH, path)
def backup_file_list(self):
for path in self.backup_path_list():
yield path, os.path.basename(path), os.path.getsize(path)
@login_required
def get(self, request, pslug):
project = get_object_or_404(models.Project, slug=pslug)
models.ExportDirectoryCache.objects.all().delete()
for path, name, size in self.backup_file_list():
models.ExportDirectoryCache.objects.create(
path = name,
size = size,
)
return self.redirect_referer(_(u"Now rehashed"))
class PerojectImportNow(GenericView):
@login_required
def get(self, request, project, iid):
project = get_object_or_404(models.Project, slug=pslug)
class ProjectExportNow(GenericView):
def _clean_copy(self, obj):
new_object = copy.deepcopy(obj)
if "_state" in new_object:
del new_object["_state"]
return new_object
def create_tempdir_for_project(self, project):
self.dirname = u"{0}_backup".format(project.slug)
self.path = os.path.join(settings.BACKUP_PATH, self.dirname)
if os.path.exists(self.path):
shutil.rmtree(self.path)
os.mkdir(self.path)
def _backup_project_data(self, project):
filename = "project-data.data"
filepath = os.path.join(self.path, filename)
with io.open(filepath, 'w+b') as f:
obj = self._clean_copy(project.__dict__)
pickle.dump(obj, f, -1)
filename = 'project-owner.data'
filepath = os.path.join(self.path, filename)
with io.open(filepath, 'w+b') as f:
obj = self._clean_copy(project.owner.__dict__)
pickle.dump(obj, f, -1)
def _backup_user_roles(self, project):
directory_pathname = "user_roles"
path = os.path.join(self.path, directory_pathname)
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
for pur in models.ProjectUserRole.objects.filter(project=project):
obj = self._clean_copy(pur.__dict__)
filename = "{0}_{1}.data".format(pur.id, project.id)
filepath = os.path.join(path, filename)
with BinaryFile(filepath) as f:
pickle.dump(obj, f, -1)
def _backup_milestones(self, project):
directory_pathname = "milestones"
path = os.path.join(self.path, directory_pathname)
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
for milestone in project.milestones.all():
obj = self._clean_copy(milestone.__dict__)
filename = "{0}_{1}.data".format(milestone.id, project.id)
filepath = os.path.join(path, filename)
with BinaryFile(filepath) as f:
pickle.dump(obj, f, -1)
def _backup_user_story(self, project):
directory_pathname = "user_stories"
path = os.path.join(self.path, directory_pathname)
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
for user_story in project.user_stories.all():
obj = self._clean_copy(user_story.__dict__)
obj['watchers'] = [o.id for o in user_story.watchers.all().distinct()]
filename = "{0}_{1}.data".format(user_story.id, project.id)
filepath = os.path.join(path, filename)
with BinaryFile(filepath) as f:
pickle.dump(obj, f, -1)
def _backup_tasks(self, project):
directory_pathname = "tasks"
path = os.path.join(self.path, directory_pathname)
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
for task in project.tasks.all():
obj = self._clean_copy(task.__dict__)
obj['watchers'] = [o.id for o in task.watchers.all()]
filename = "task_{0}_{1}.data".format(task.id, project.id)
filepath = os.path.join(path, filename)
with BinaryFile(filepath) as f:
pickle.dump(obj, f, -1)
#for response in models.TaskResponse.objects.filter(task__in=project.tasks.all()):
# obj = self._clean_copy(response.__dict__)
# obj['watchers'] = [o.id for o in task.watchers.all()]
# filename = "response_{0}_{1}.data".format(response.id, project.id)
# filepath = os.path.join(path, filename)
# with BinaryFile(filepath) as f:
# pickle.dump(obj, f, -1)
#
#for res_file in models.TaskAttachedFile.objects.filter(task__in=project.tasks.all()):
# obj = self._clean_copy(res_file.__dict__)
# raw_file_data = res_file.attached_file.read()
# raw_file_data = zlib.compress(raw_file_data, 9)
# raw_file_data = base64.b64encode(raw_file_data)
# obj['__raw_file_data'] = raw_file_data
# filename = "file_response_{0}_{1}.data".format(res_file.id, project.id)
# filepath = os.path.join(path, filename)
# with BinaryFile(filepath) as f:
# pickle.dump(obj, f, -1)
def _backup_questions(self, project):
directory_pathname = "questions"
path = os.path.join(self.path, directory_pathname)
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
for question in project.questions.all():
obj = self._clean_copy(question.__dict__)
obj['watchers'] = [o.id for o in question.watchers.all()]
filename = "{0}_{1}.data".format(question.id, project.id)
filepath = os.path.join(path, filename)
with BinaryFile(filepath) as f:
pickle.dump(obj, f, -1)
for response in models.QuestionResponse.objects\
.filter(question__in=project.questions.all()):
obj = self._clean_copy(question.__dict__)
raw_file_data = response.attached_file.read()
raw_file_data = zlib.compress(raw_file_data, 9)
raw_file_data = base64.b64encode(raw_file_data)
obj['__raw_file_data'] = raw_file_data
filename = "file_response_{0}_{1}.data".format(response.id, project.id)
filepath = os.path.join(path, filename)
with BinaryFile(filepath) as f:
pickle.dump(obj, f, -1)
def _backup_wiki(self, project):
directory_pathname = "wiki"
path = os.path.join(self.path, directory_pathname)
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
for wikipage in project.wiki_pages.all():
obj = self._clean_copy(wikipage.__dict__)
obj['watchers'] = [o.id for o in wikipage.watchers.all()]
filename = "{0}_{1}.data".format(wikipage.id, project.id)
filepath = os.path.join(path, filename)
with BinaryFile(filepath) as f:
pickle.dump(obj, f, -1)
for fattached in wiki_models.WikiPageAttachment.objects\
.filter(wikipage__in=project.wiki_pages.all()):
obj = self._clean_copy(fattached.__dict__)
raw_file_data = fattached.attached_file.read()
raw_file_data = zlib.compress(raw_file_data, 9)
raw_file_data = base64.b64encode(raw_file_data)
obj['__raw_file_data'] = raw_file_data
filename = "file_response_{0}_{1}.data".format(fattached.id, project.id)
filepath = os.path.join(path, filename)
with BinaryFile(filepath) as f:
pickle.dump(obj, f, -1)
def _create_tarball(self, project):
current_date = datetime.datetime.now().strftime("%Y-%m-%d-%H%M%s")
filename = "{0}-{1}.tar.xz".format(project.slug, current_date)
current_pwd = os.getcwd()
os.chdir(settings.BACKUP_PATH)
command = "tar cvJf {0} {1}".format(filename, self.dirname)
p = subprocess.Popen(command.split(), stdout=sys.stdout)
os.chdir(current_pwd)
@login_required
def get(self, request, pslug):
project = get_object_or_404(models.Project, slug=pslug)
self.create_tempdir_for_project(project)
self._backup_project_data(project)
self._backup_user_roles(project)
self._backup_milestones(project)
self._backup_user_story(project)
self._backup_tasks(project)
self._backup_questions(project)
self._backup_wiki(project)
self._create_tarball(project)
return self.redirect_referer("Now exported, rehash directory!")
| niwinz/Green-Mine | src/greenmine/base/views/export.py | Python | bsd-3-clause | 10,254 |
# -*- coding: utf-8 -*-
import json
import os
from datetime import datetime, timedelta
from urllib.parse import urlencode
from django.conf import settings
from django.core import mail
from django.core.files.storage import default_storage as storage
from django.test import RequestFactory
from django.urls import reverse
from django.utils.encoding import force_str
from django.utils.translation import trim_whitespace
from unittest import mock
import pytest
import responses
from pyquery import PyQuery as pq
from waffle.testutils import override_switch
from olympia import amo, core
from olympia.accounts.views import API_TOKEN_COOKIE
from olympia.activity.models import ActivityLog
from olympia.addons.models import Addon, AddonCategory, AddonUser
from olympia.amo.storage_utils import copy_stored_file
from olympia.amo.templatetags.jinja_helpers import (
format_date,
url as url_reverse,
urlparams,
)
from olympia.amo.tests import TestCase, addon_factory, user_factory, version_factory
from olympia.amo.tests.test_helpers import get_image_path
from olympia.api.models import SYMMETRIC_JWT_TYPE, APIKey, APIKeyConfirmation
from olympia.applications.models import AppVersion
from olympia.constants.promoted import RECOMMENDED
from olympia.devhub.decorators import dev_required
from olympia.devhub.models import BlogPost
from olympia.devhub.views import get_next_version_number
from olympia.files.models import FileUpload
from olympia.files.tests.test_models import UploadTest as BaseUploadTest
from olympia.ratings.models import Rating
from olympia.translations.models import Translation, delete_translation
from olympia.users.models import IPNetworkUserRestriction, UserProfile
from olympia.users.tests.test_views import UserViewBase
from olympia.versions.models import ApplicationsVersions, Version, VersionPreview
from olympia.zadmin.models import set_config
class HubTest(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(HubTest, self).setUp()
self.url = reverse('devhub.index')
assert self.client.login(email='[email protected]')
assert self.client.get(self.url).status_code == 200
self.user_profile = UserProfile.objects.get(id=999)
not_their_addon = addon_factory(users=[user_factory()])
AddonUser.unfiltered.create(
addon=not_their_addon, user=self.user_profile, role=amo.AUTHOR_ROLE_DELETED
)
def clone_addon(self, num, addon_id=3615):
addons = []
source = Addon.objects.get(id=addon_id)
for i in range(num):
data = {
'type': source.type,
'status': source.status,
'name': 'cloned-addon-%s-%s' % (addon_id, i),
'users': [self.user_profile],
}
addons.append(addon_factory(**data))
return addons
class TestDashboard(HubTest):
def setUp(self):
super(TestDashboard, self).setUp()
self.url = reverse('devhub.addons')
self.themes_url = reverse('devhub.themes')
assert self.client.get(self.url).status_code == 200
self.addon = Addon.objects.get(pk=3615)
self.addon.addonuser_set.create(user=self.user_profile)
def test_addons_layout(self):
doc = pq(self.client.get(self.url).content)
assert doc('title').text() == (
'Manage My Submissions :: Developer Hub :: Add-ons for Firefox'
)
assert doc('.links-footer').length == 1
assert doc('#copyright').length == 1
assert doc('#footer-links .mobile-link').length == 0
def get_action_links(self, addon_id):
response = self.client.get(self.url)
doc = pq(response.content)
selector = '.item[data-addonid="%s"] .item-actions li > a' % addon_id
links = [a.text.strip() for a in doc(selector)]
return links
def test_no_addons(self):
"""Check that no add-ons are displayed for this user."""
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('.item item').length == 0
def test_addon_pagination(self):
"""Check that the correct info. is displayed for each add-on:
namely, that add-ons are paginated at 10 items per page, and that
when there is more than one page, the 'Sort by' header and pagination
footer appear.
"""
# Create 10 add-ons. We going to make the existing one from the setUp
# and a static theme which shouldn't show up as an addon in this list.
addons = self.clone_addon(10)
self.addon.update(type=amo.ADDON_STATICTHEME)
response = self.client.get(self.url)
doc = pq(response.content)
assert len(doc('.item .item-info')) == 10
assert len(doc('.item .info.extension')) == 10
assert doc('nav.paginator').length == 0
for addon in addons:
assert addon.get_icon_url(64) in doc('.item .info h3 a').html()
# Create 5 add-ons -have to change self.addon back to clone extensions.
self.addon.update(type=amo.ADDON_EXTENSION)
self.clone_addon(5)
self.addon.update(type=amo.ADDON_STATICTHEME)
response = self.client.get(self.url, {'page': 2})
doc = pq(response.content)
assert len(doc('.item .item-info')) == 5
assert doc('nav.paginator').length == 1
def test_themes(self):
"""Check themes show on dashboard."""
# Create 2 themes.
staticthemes = []
for x in range(2):
addon = addon_factory(type=amo.ADDON_STATICTHEME, users=[self.user_profile])
VersionPreview.objects.create(version=addon.current_version)
staticthemes.append(addon)
response = self.client.get(self.themes_url)
doc = pq(response.content)
assert len(doc('.item .item-info')) == 2
assert len(doc('.item .info.statictheme')) == 2
for addon in staticthemes:
assert addon.current_previews[0].thumbnail_url in [
img.attrib['src'] for img in doc('.info.statictheme h3 img')
]
def test_show_hide_statistics_and_new_version_for_disabled(self):
# Not disabled: show statistics and new version links.
self.addon.update(disabled_by_user=False)
links = self.get_action_links(self.addon.pk)
assert 'Statistics' in links, 'Unexpected: %r' % links
assert 'New Version' in links, 'Unexpected: %r' % links
# Disabled (user): hide new version link.
self.addon.update(disabled_by_user=True)
links = self.get_action_links(self.addon.pk)
assert 'New Version' not in links, 'Unexpected: %r' % links
# Disabled (admin): hide statistics and new version links.
self.addon.update(disabled_by_user=False, status=amo.STATUS_DISABLED)
links = self.get_action_links(self.addon.pk)
assert 'Statistics' not in links, 'Unexpected: %r' % links
assert 'New Version' not in links, 'Unexpected: %r' % links
def test_public_addon(self):
assert self.addon.status == amo.STATUS_APPROVED
doc = pq(self.client.get(self.url).content)
item = doc('.item[data-addonid="%s"]' % self.addon.id)
assert item.find('h3 a').attr('href') == self.addon.get_dev_url()
assert item.find('p.downloads'), 'Expected weekly downloads'
assert item.find('p.users'), 'Expected ADU'
assert item.find('.item-details'), 'Expected item details'
assert not item.find(
'p.incomplete'
), 'Unexpected message about incomplete add-on'
appver = self.addon.current_version.apps.all()[0]
appver.delete()
def test_dev_news(self):
for i in range(7):
bp = BlogPost(
title='hi %s' % i, date_posted=datetime.now() - timedelta(days=i)
)
bp.save()
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('.blog-posts').length == 1
assert doc('.blog-posts li').length == 5
assert doc('.blog-posts li a').eq(0).text() == 'hi 0'
assert doc('.blog-posts li a').eq(4).text() == 'hi 4'
def test_sort_created_filter(self):
response = self.client.get(self.url + '?sort=created')
doc = pq(response.content)
assert doc('.item-details').length == 1
elm = doc('.item-details .date-created')
assert elm.length == 1
assert elm.remove('strong').text() == (format_date(self.addon.created))
def test_sort_updated_filter(self):
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('.item-details').length == 1
elm = doc('.item-details .date-updated')
assert elm.length == 1
assert elm.remove('strong').text() == (
trim_whitespace(format_date(self.addon.last_updated))
)
def test_purely_unlisted_addon_are_not_shown_as_incomplete(self):
self.make_addon_unlisted(self.addon)
assert self.addon.has_complete_metadata()
response = self.client.get(self.url)
doc = pq(response.content)
# It should not be considered incomplete despite having STATUS_NULL,
# since it's purely unlisted.
assert not doc('.incomplete')
# Rest of the details should be shown, but not the AMO-specific stuff.
assert not doc('.item-info')
assert doc('.item-details')
def test_mixed_versions_addon_with_incomplete_metadata(self):
self.make_addon_unlisted(self.addon)
version = version_factory(addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED)
version.update(license=None)
self.addon.reload()
assert not self.addon.has_complete_metadata()
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('.incomplete').text() == (
'This add-on is missing some required information before it can be'
' submitted for publication.'
)
assert doc('form.resume').attr('action') == (
url_reverse('devhub.request-review', self.addon.slug)
)
assert doc('button.link').text() == 'Resume'
def test_no_versions_addon(self):
self.addon.current_version.delete()
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('.incomplete').text() == ("This add-on doesn't have any versions.")
class TestUpdateCompatibility(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super().setUp()
assert self.client.login(email='[email protected]')
self.url = reverse('devhub.addons')
# These aren't realistic but work with existing tests and the 3615 addon
self.create_appversion('android', '3.7a1pre')
self.create_appversion('android', '4.0')
def create_appversion(self, name, version):
return AppVersion.objects.get_or_create(
application=amo.APPS[name].id, version=version
)
def test_no_compat(self):
addon = Addon.objects.get(pk=3615)
addon.update(type=amo.ADDON_DICT)
self.client.logout()
assert self.client.login(email='[email protected]')
response = self.client.get(self.url)
doc = pq(response.content)
assert not doc('.item[data-addonid="3615"] li.compat')
response = self.client.get(
reverse(
'devhub.ajax.compat.update', args=[addon.slug, addon.current_version.id]
)
)
assert response.status_code == 404
response = self.client.get(
reverse('devhub.ajax.compat.status', args=[addon.slug])
)
assert response.status_code == 404
def test_compat(self):
addon = Addon.objects.get(pk=3615)
response = self.client.get(self.url)
doc = pq(response.content)
cu = doc('.item[data-addonid="3615"] .tooltip.compat-update')
assert not cu
addon.current_version.files.update(strict_compatibility=True)
response = self.client.get(self.url)
doc = pq(response.content)
cu = doc('.item[data-addonid="3615"] .tooltip.compat-update')
assert cu
update_url = reverse(
'devhub.ajax.compat.update', args=[addon.slug, addon.current_version.id]
)
assert cu.attr('data-updateurl') == update_url
status_url = reverse('devhub.ajax.compat.status', args=[addon.slug])
selector = '.item[data-addonid="3615"] li.compat'
assert doc(selector).attr('data-src') == status_url
assert doc('.item[data-addonid="3615"] .compat-update-modal')
def test_incompat_firefox(self):
addon = Addon.objects.get(pk=3615)
addon.current_version.files.update(strict_compatibility=True)
versions = ApplicationsVersions.objects.all()[0]
versions.max = AppVersion.objects.get(version='2.0')
versions.save()
doc = pq(self.client.get(self.url).content)
assert doc('.item[data-addonid="3615"] .tooltip.compat-error')
def test_incompat_android(self):
addon = Addon.objects.get(pk=3615)
addon.current_version.files.update(strict_compatibility=True)
appver = AppVersion.objects.get(version='2.0')
appver.update(application=amo.ANDROID.id)
av = ApplicationsVersions.objects.all()[0]
av.application = amo.ANDROID.id
av.max = appver
av.save()
doc = pq(self.client.get(self.url).content)
assert doc('.item[data-addonid="3615"] .tooltip.compat-error')
class TestDevRequired(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestDevRequired, self).setUp()
self.addon = Addon.objects.get(id=3615)
self.edit_page_url = self.addon.get_dev_url('edit')
self.get_url = self.addon.get_dev_url('versions')
self.post_url = self.addon.get_dev_url('delete')
assert self.client.login(email='[email protected]')
self.au = self.addon.addonuser_set.get(user__email='[email protected]')
assert self.au.role == amo.AUTHOR_ROLE_OWNER
def test_anon(self):
self.client.logout()
self.assertLoginRedirects(self.client.get(self.get_url), self.get_url)
self.assertLoginRedirects(
self.client.get(self.edit_page_url), self.edit_page_url
)
def test_dev_get(self):
assert self.client.get(self.get_url).status_code == 200
assert self.client.get(self.edit_page_url).status_code == 200
def test_dev_post(self):
self.assert3xx(self.client.post(self.post_url), self.get_url)
def test_disabled_post_dev(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert self.client.post(self.get_url).status_code == 403
def test_disabled_post_admin(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert self.client.login(email='[email protected]')
self.assert3xx(self.client.post(self.post_url), self.get_url)
class TestVersionStats(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestVersionStats, self).setUp()
assert self.client.login(email='[email protected]')
def test_counts(self):
addon = Addon.objects.get(id=3615)
version = addon.current_version
user = UserProfile.objects.get(email='[email protected]')
for _ in range(10):
Rating.objects.create(addon=addon, user=user, version=addon.current_version)
url = reverse('devhub.versions.stats', args=[addon.slug])
data = json.loads(force_str(self.client.get(url).content))
exp = {
str(version.id): {
'reviews': 10,
'files': 1,
'version': version.version,
'id': version.id,
}
}
self.assertDictEqual(data, exp)
class TestDelete(TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestDelete, self).setUp()
self.get_addon = lambda: Addon.objects.filter(id=3615)
assert self.client.login(email='[email protected]')
self.user = UserProfile.objects.get(email='[email protected]')
self.get_url = lambda: self.get_addon()[0].get_dev_url('delete')
def test_post_not(self):
response = self.client.post(self.get_url(), follow=True)
assert pq(response.content)('.notification-box').text() == (
'URL name was incorrect. Add-on was not deleted.'
)
assert self.get_addon().exists()
self.assert3xx(response, self.get_addon()[0].get_dev_url('versions'))
def test_post(self):
self.get_addon().get().update(slug='addon-slug')
response = self.client.post(self.get_url(), {'slug': 'addon-slug'}, follow=True)
assert pq(response.content)('.notification-box').text() == ('Add-on deleted.')
assert not self.get_addon().exists()
self.assert3xx(response, reverse('devhub.addons'))
def test_post_wrong_slug(self):
self.get_addon().get().update(slug='addon-slug')
response = self.client.post(self.get_url(), {'slug': 'theme-slug'}, follow=True)
assert pq(response.content)('.notification-box').text() == (
'URL name was incorrect. Add-on was not deleted.'
)
assert self.get_addon().exists()
self.assert3xx(response, self.get_addon()[0].get_dev_url('versions'))
def test_post_statictheme(self):
theme = addon_factory(
name='xpi name',
type=amo.ADDON_STATICTHEME,
slug='stheme-slug',
users=[self.user],
)
response = self.client.post(
theme.get_dev_url('delete'), {'slug': 'stheme-slug'}, follow=True
)
assert pq(response.content)('.notification-box').text() == ('Theme deleted.')
assert not Addon.objects.filter(id=theme.id).exists()
self.assert3xx(response, reverse('devhub.themes'))
def test_post_statictheme_wrong_slug(self):
theme = addon_factory(
name='xpi name',
type=amo.ADDON_STATICTHEME,
slug='stheme-slug',
users=[self.user],
)
response = self.client.post(
theme.get_dev_url('delete'), {'slug': 'foo-slug'}, follow=True
)
assert pq(response.content)('.notification-box').text() == (
'URL name was incorrect. Theme was not deleted.'
)
assert Addon.objects.filter(id=theme.id).exists()
self.assert3xx(response, theme.get_dev_url('versions'))
class TestHome(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(TestHome, self).setUp()
assert self.client.login(email='[email protected]')
self.url = reverse('devhub.index')
self.addon = Addon.objects.get(pk=3615)
def get_pq(self):
response = self.client.get(self.url)
assert response.status_code == 200
return pq(response.content)
def test_basic_logged_out(self):
self.client.logout()
response = self.client.get(self.url)
assert response.status_code == 200
self.assertTemplateUsed(response, 'devhub/index.html')
assert b'Customize Firefox' in response.content
def test_default_lang_selected(self):
self.client.logout()
doc = self.get_pq()
selected_value = doc('#language option:selected').attr('value')
assert selected_value == 'en-us'
def test_basic_logged_in(self):
response = self.client.get(self.url)
assert response.status_code == 200
self.assertTemplateUsed(response, 'devhub/index.html')
assert b'My Add-ons' in response.content
def test_my_addons_addon_versions_link(self):
assert self.client.login(email='[email protected]')
doc = self.get_pq()
addon_list = doc('.DevHub-MyAddons-list')
href = addon_list.find('.DevHub-MyAddons-item-versions a').attr('href')
assert href == self.addon.get_dev_url('versions')
def test_my_addons(self):
statuses = [
(amo.STATUS_NOMINATED, amo.STATUS_AWAITING_REVIEW, 'Awaiting Review'),
(amo.STATUS_APPROVED, amo.STATUS_AWAITING_REVIEW, 'Approved'),
(amo.STATUS_DISABLED, amo.STATUS_APPROVED, 'Disabled by Mozilla'),
]
latest_version = self.addon.find_latest_version(amo.RELEASE_CHANNEL_LISTED)
latest_file = latest_version.files.all()[0]
for addon_status, file_status, status_str in statuses:
latest_file.update(status=file_status)
self.addon.update(status=addon_status)
doc = self.get_pq()
addon_item = doc('.DevHub-MyAddons-list .DevHub-MyAddons-item')
assert addon_item.length == 1
assert addon_item.find('.DevHub-MyAddons-item-edit').attr(
'href'
) == self.addon.get_dev_url('edit')
if self.addon.type != amo.ADDON_STATICTHEME:
assert self.addon.get_icon_url(64) in addon_item.html()
else:
assert self.addon.current_previews[0].thumbnail_url in (
addon_item.html()
)
assert (
status_str == addon_item.find('.DevHub-MyAddons-VersionStatus').text()
)
Addon.objects.all().delete()
assert self.get_pq()('.DevHub-MyAddons-list .DevHub-MyAddons-item').length == 0
def test_my_addons_recommended(self):
self.make_addon_promoted(self.addon, RECOMMENDED, approve_version=True)
latest_version = self.addon.find_latest_version(amo.RELEASE_CHANNEL_LISTED)
latest_file = latest_version.files.all()[0]
statuses = [
(amo.STATUS_NOMINATED, amo.STATUS_AWAITING_REVIEW, 'Awaiting Review'),
(
amo.STATUS_APPROVED,
amo.STATUS_AWAITING_REVIEW,
'Approved and Recommended',
),
(amo.STATUS_DISABLED, amo.STATUS_APPROVED, 'Disabled by Mozilla'),
]
for addon_status, file_status, status_str in statuses:
latest_file.update(status=file_status)
self.addon.update(status=addon_status)
doc = self.get_pq()
addon_item = doc('.DevHub-MyAddons-list .DevHub-MyAddons-item')
assert addon_item.length == 1
assert addon_item.find('.DevHub-MyAddons-item-edit').attr(
'href'
) == self.addon.get_dev_url('edit')
if self.addon.type != amo.ADDON_STATICTHEME:
assert self.addon.get_icon_url(64) in addon_item.html()
else:
assert self.addon.current_previews[0].thumbnail_url in (
addon_item.html()
)
assert (
status_str == addon_item.find('.DevHub-MyAddons-VersionStatus').text()
)
Addon.objects.all().delete()
assert self.get_pq()('.DevHub-MyAddons-list .DevHub-MyAddons-item').length == 0
def test_my_addons_with_static_theme(self):
self.addon.update(type=amo.ADDON_STATICTHEME)
VersionPreview.objects.create(version=self.addon.current_version)
self.test_my_addons()
def test_my_addons_incomplete(self):
self.addon.update(status=amo.STATUS_NULL)
# Make add-on incomplete
AddonCategory.objects.filter(addon=self.addon).delete()
doc = self.get_pq()
addon_item = doc('.DevHub-MyAddons-list .DevHub-MyAddons-item')
assert addon_item.length == 1
assert addon_item.find('.DevHub-MyAddons-item-edit').attr(
'href'
) == self.addon.get_dev_url('edit')
def test_my_addons_no_disabled_or_deleted(self):
self.addon.update(status=amo.STATUS_APPROVED, disabled_by_user=True)
doc = self.get_pq()
addon_item = doc('.DevHub-MyAddons-list .DevHub-MyAddons-item')
assert addon_item.length == 1
assert addon_item.find('.DevHub-MyAddons-VersionStatus').text() == 'Invisible'
class TestActivityFeed(TestCase):
fixtures = ('base/users', 'base/addon_3615')
def setUp(self):
super(TestActivityFeed, self).setUp()
assert self.client.login(email='[email protected]')
self.addon = Addon.objects.get(id=3615)
self.version = self.addon.versions.first()
self.action_user = UserProfile.objects.get(email='[email protected]')
ActivityLog.objects.all().delete()
def test_feed_for_all(self):
response = self.client.get(reverse('devhub.feed_all'))
assert response.status_code == 200
doc = pq(response.content)
assert doc('header h2').text() == 'Recent Activity for My Add-ons'
def test_feed_for_addon(self):
response = self.client.get(reverse('devhub.feed', args=[self.addon.slug]))
assert response.status_code == 200
doc = pq(response.content)
assert doc('header h2').text() == ('Recent Activity for %s' % self.addon.name)
def test_feed_disabled(self):
self.addon.update(status=amo.STATUS_DISABLED)
response = self.client.get(reverse('devhub.feed', args=[self.addon.slug]))
assert response.status_code == 200
def test_feed_disabled_anon(self):
self.client.logout()
response = self.client.get(reverse('devhub.feed', args=[self.addon.slug]))
assert response.status_code == 302
def add_log(self, action=amo.LOG.ADD_RATING):
core.set_user(self.action_user)
ActivityLog.create(action, self.addon, self.version)
def add_hidden_log(self, action=amo.LOG.COMMENT_VERSION):
self.add_log(action=action)
def test_feed_hidden(self):
self.add_hidden_log()
self.add_hidden_log(amo.LOG.OBJECT_ADDED)
res = self.client.get(reverse('devhub.feed', args=[self.addon.slug]))
doc = pq(res.content)
assert len(doc('#recent-activity li.item')) == 0
def test_addons_hidden(self):
self.add_hidden_log()
self.add_hidden_log(amo.LOG.OBJECT_ADDED)
res = self.client.get(reverse('devhub.addons'))
doc = pq(res.content)
assert len(doc('.recent-activity li.item')) == 0
def test_unlisted_addons_dashboard(self):
"""Unlisted addons are displayed in the feed on the dashboard page."""
self.make_addon_unlisted(self.addon)
self.add_log()
res = self.client.get(reverse('devhub.addons'))
doc = pq(res.content)
assert len(doc('.recent-activity li.item')) == 2
def test_unlisted_addons_feed_sidebar(self):
"""Unlisted addons are displayed in the left side in the feed page."""
self.make_addon_unlisted(self.addon)
self.add_log()
res = self.client.get(reverse('devhub.feed_all'))
doc = pq(res.content)
# First li is "All My Add-ons".
assert len(doc('#refine-addon li')) == 2
def test_unlisted_addons_feed(self):
"""Unlisted addons are displayed in the feed page."""
self.make_addon_unlisted(self.addon)
self.add_log()
res = self.client.get(reverse('devhub.feed_all'))
doc = pq(res.content)
assert len(doc('#recent-activity .item')) == 2
def test_unlisted_addons_feed_filter(self):
"""Feed page can be filtered on unlisted addon."""
self.make_addon_unlisted(self.addon)
self.add_log()
res = self.client.get(reverse('devhub.feed', args=[self.addon.slug]))
doc = pq(res.content)
assert len(doc('#recent-activity .item')) == 2
def test_reviewer_name_is_used_for_reviewer_actions(self):
self.action_user.update(display_name='HîdeMe', reviewer_name='ShöwMe')
self.add_log(action=amo.LOG.APPROVE_VERSION)
response = self.client.get(reverse('devhub.feed', args=[self.addon.slug]))
doc = pq(response.content)
assert len(doc('#recent-activity .item')) == 1
content = force_str(response.content)
assert self.action_user.reviewer_name in content
assert self.action_user.name not in content
def test_regular_name_is_used_for_non_reviewer_actions(self):
# Fields are inverted compared to the test above.
self.action_user.update(reviewer_name='HîdeMe', display_name='ShöwMe')
self.add_log(action=amo.LOG.ADD_RATING) # not a reviewer action.
response = self.client.get(reverse('devhub.feed', args=[self.addon.slug]))
doc = pq(response.content)
assert len(doc('#recent-activity .item')) == 1
content = force_str(response.content)
# Assertions are inverted compared to the test above.
assert self.action_user.reviewer_name not in content
assert self.action_user.name in content
def test_addons_dashboard_name(self):
self.add_log()
res = self.client.get(reverse('devhub.addons'))
doc = pq(res.content)
timestamp = doc('.recent-activity li.item span.activity-timestamp')
assert len(timestamp) == 1
assert self.action_user.name
assert self.action_user.name in timestamp.html()
assert '<a href=' not in timestamp.html()
def test_addons_dashboard_reviewer_name(self):
self.action_user.update(reviewer_name='bob')
self.add_log(action=amo.LOG.APPROVE_VERSION)
res = self.client.get(reverse('devhub.addons'))
doc = pq(res.content)
timestamp = doc('.recent-activity li.item span.activity-timestamp')
assert len(timestamp) == 1
assert self.action_user.name
assert self.action_user.name not in timestamp.html()
assert self.action_user.reviewer_name in timestamp.html()
assert '<a href=' not in timestamp.html()
class TestAPIAgreement(TestCase):
fixtures = ['base/addon_3615', 'base/addon_5579', 'base/users']
def setUp(self):
super(TestAPIAgreement, self).setUp()
assert self.client.login(email='[email protected]')
self.user = UserProfile.objects.get(email='[email protected]')
self.user.update(last_login_ip='192.168.1.1')
def test_agreement_read(self):
self.user.update(read_dev_agreement=self.days_ago(0))
response = self.client.get(reverse('devhub.api_key_agreement'))
self.assert3xx(response, reverse('devhub.api_key'))
def test_agreement_unread_captcha_inactive(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(reverse('devhub.api_key_agreement'))
assert response.status_code == 200
assert 'agreement_form' in response.context
form = response.context['agreement_form']
assert 'recaptcha' not in form.fields
doc = pq(response.content)
assert doc('.g-recaptcha') == []
@override_switch('developer-agreement-captcha', active=True)
def test_agreement_unread_captcha_active(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(reverse('devhub.api_key_agreement'))
assert response.status_code == 200
assert 'agreement_form' in response.context
form = response.context['agreement_form']
assert 'recaptcha' in form.fields
doc = pq(response.content)
assert doc('.g-recaptcha')
def test_agreement_submit_success(self):
self.user.update(read_dev_agreement=None)
response = self.client.post(
reverse('devhub.api_key_agreement'),
data={
'distribution_agreement': 'on',
'review_policy': 'on',
},
)
assert response.status_code == 302
assert response['Location'] == reverse('devhub.api_key')
self.user.reload()
self.assertCloseToNow(self.user.read_dev_agreement)
@override_switch('developer-agreement-captcha', active=True)
def test_agreement_submit_captcha_active_error(self):
self.user.update(read_dev_agreement=None)
response = self.client.post(reverse('devhub.api_key_agreement'))
# Captcha is properly rendered
doc = pq(response.content)
assert doc('.g-recaptcha')
assert 'recaptcha' in response.context['agreement_form'].errors
@override_switch('developer-agreement-captcha', active=True)
def test_agreement_submit_captcha_active_success(self):
self.user.update(read_dev_agreement=None)
verify_data = urlencode(
{
'secret': '',
'remoteip': '127.0.0.1',
'response': 'test',
}
)
responses.add(
responses.GET,
'https://www.google.com/recaptcha/api/siteverify?' + verify_data,
json={'error-codes': [], 'success': True},
)
response = self.client.post(
reverse('devhub.api_key_agreement'),
data={
'g-recaptcha-response': 'test',
'distribution_agreement': 'on',
'review_policy': 'on',
},
)
assert response.status_code == 302
assert response['Location'] == reverse('devhub.api_key')
self.user.reload()
self.assertCloseToNow(self.user.read_dev_agreement)
def test_agreement_read_but_too_long_ago(self):
set_config('last_dev_agreement_change_date', '2018-01-01 12:00')
before_agreement_last_changed = datetime(2018, 1, 1, 12, 0) - timedelta(days=1)
self.user.update(read_dev_agreement=before_agreement_last_changed)
response = self.client.get(reverse('devhub.api_key_agreement'))
assert response.status_code == 200
assert 'agreement_form' in response.context
@mock.patch('olympia.addons.utils.RestrictionChecker.is_submission_allowed')
def test_cant_submit_agreement_if_restricted(self, is_submission_allowed_mock):
is_submission_allowed_mock.return_value = False
self.user.update(read_dev_agreement=None)
response = self.client.post(
reverse('devhub.api_key_agreement'),
data={
'distribution_agreement': 'on',
'review_policy': 'on',
},
)
assert response.status_code == 200
assert response.context['agreement_form'].is_valid() is False
self.user.reload()
assert self.user.read_dev_agreement is None
assert is_submission_allowed_mock.call_count == 2
# First call is from the form, and it's not checking the agreement,
# it's just to see if the user is restricted.
assert is_submission_allowed_mock.call_args_list[0] == (
(),
{'check_dev_agreement': False},
)
# Second call is from the view itself, no arguments
assert is_submission_allowed_mock.call_args_list[1] == ((), {})
def test_cant_submit_agreement_if_restricted_functional(self):
# Like test_cant_submit_agreement_if_restricted() but with no mocks,
# picking a single restriction and making sure it's working properly.
IPNetworkUserRestriction.objects.create(network='127.0.0.1/32')
self.user.update(read_dev_agreement=None)
response = self.client.post(
reverse('devhub.api_key_agreement'),
data={
'distribution_agreement': 'on',
'review_policy': 'on',
},
)
assert response.status_code == 200
assert response.context['agreement_form'].is_valid() is False
doc = pq(response.content)
assert doc('.addon-submission-process').text() == (
'Multiple add-ons violating our policies have been submitted '
'from your location. The IP address has been blocked.\n'
'More information on Developer Accounts'
)
@mock.patch('olympia.addons.utils.RestrictionChecker.is_submission_allowed')
def test_agreement_page_shown_if_restricted(self, is_submission_allowed_mock):
# Like test_agreement_read() above, but with a restricted user: they
# are shown the agreement page again instead of redirecting to the
# api keys page.
is_submission_allowed_mock.return_value = False
self.user.update(read_dev_agreement=self.days_ago(0))
response = self.client.get(reverse('devhub.api_key_agreement'))
assert response.status_code == 200
assert 'agreement_form' in response.context
class TestAPIKeyPage(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(TestAPIKeyPage, self).setUp()
self.url = reverse('devhub.api_key')
assert self.client.login(email='[email protected]')
self.user = UserProfile.objects.get(email='[email protected]')
self.user.update(last_login_ip='192.168.1.1')
def test_key_redirect(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(reverse('devhub.api_key'))
self.assert3xx(response, reverse('devhub.api_key_agreement'))
def test_redirect_if_restricted(self):
IPNetworkUserRestriction.objects.create(network='127.0.0.1/32')
response = self.client.get(reverse('devhub.api_key'))
self.assert3xx(response, reverse('devhub.api_key_agreement'))
def test_view_without_credentials_not_confirmed_yet(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
submit = doc('#generate-key')
assert submit.text() == 'Generate new credentials'
inputs = doc('.api-input input')
assert len(inputs) == 0, 'Inputs should be absent before keys exist'
assert not doc('input[name=confirmation_token]')
def test_view_with_credentials(self):
APIKey.objects.create(
user=self.user,
type=SYMMETRIC_JWT_TYPE,
key='some-jwt-key',
secret='some-jwt-secret',
)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
submit = doc('#generate-key')
assert submit.text() == 'Revoke and regenerate credentials'
assert doc('#revoke-key').text() == 'Revoke'
key_input = doc('.key-input input').val()
assert key_input == 'some-jwt-key'
def test_view_without_credentials_confirmation_requested_no_token(self):
APIKeyConfirmation.objects.create(
user=self.user, token='doesnt matter', confirmed_once=False
)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
# Since confirmation has already been requested, there shouldn't be
# any buttons on the page if no token was passed in the URL - the user
# needs to follow the link in the email to continue.
assert not doc('input[name=confirmation_token]')
assert not doc('input[name=action]')
def test_view_without_credentials_confirmation_requested_with_token(self):
APIKeyConfirmation.objects.create(
user=self.user, token='secrettoken', confirmed_once=False
)
self.url += '?token=secrettoken'
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('input[name=confirmation_token]')) == 1
token_input = doc('input[name=confirmation_token]')[0]
assert token_input.value == 'secrettoken'
submit = doc('#generate-key')
assert submit.text() == 'Confirm and generate new credentials'
def test_view_no_credentials_has_been_confirmed_once(self):
APIKeyConfirmation.objects.create(
user=self.user, token='doesnt matter', confirmed_once=True
)
# Should look similar to when there are no credentials and no
# confirmation has been requested yet, the post action is where it
# will differ.
self.test_view_without_credentials_not_confirmed_yet()
def test_create_new_credentials_has_been_confirmed_once(self):
APIKeyConfirmation.objects.create(
user=self.user, token='doesnt matter', confirmed_once=True
)
patch = mock.patch('olympia.devhub.views.APIKey.new_jwt_credentials')
with patch as mock_creator:
response = self.client.post(self.url, data={'action': 'generate'})
mock_creator.assert_called_with(self.user)
assert len(mail.outbox) == 1
message = mail.outbox[0]
assert message.to == [self.user.email]
assert message.subject == 'New API key created'
assert reverse('devhub.api_key') in message.body
self.assert3xx(response, self.url)
def test_create_new_credentials_confirming_with_token(self):
confirmation = APIKeyConfirmation.objects.create(
user=self.user, token='secrettoken', confirmed_once=False
)
patch = mock.patch('olympia.devhub.views.APIKey.new_jwt_credentials')
with patch as mock_creator:
response = self.client.post(
self.url,
data={'action': 'generate', 'confirmation_token': 'secrettoken'},
)
mock_creator.assert_called_with(self.user)
assert len(mail.outbox) == 1
message = mail.outbox[0]
assert message.to == [self.user.email]
assert message.subject == 'New API key created'
assert reverse('devhub.api_key') in message.body
confirmation.reload()
assert confirmation.confirmed_once
self.assert3xx(response, self.url)
def test_create_new_credentials_not_confirmed_yet(self):
assert not APIKey.objects.filter(user=self.user).exists()
assert not APIKeyConfirmation.objects.filter(user=self.user).exists()
response = self.client.post(self.url, data={'action': 'generate'})
self.assert3xx(response, self.url)
# Since there was no credentials are no confirmation yet, this should
# create a confirmation, send an email with the token, but not create
# credentials yet.
assert len(mail.outbox) == 1
message = mail.outbox[0]
assert message.to == [self.user.email]
assert not APIKey.objects.filter(user=self.user).exists()
assert APIKeyConfirmation.objects.filter(user=self.user).exists()
confirmation = APIKeyConfirmation.objects.filter(user=self.user).get()
assert confirmation.token
assert not confirmation.confirmed_once
token = confirmation.token
expected_url = (
f'http://testserver/en-US/developers/addon/api/key/?token={token}'
)
assert message.subject == 'Confirmation for developer API keys'
assert expected_url in message.body
def test_create_new_credentials_confirmation_exists_no_token_passed(self):
confirmation = APIKeyConfirmation.objects.create(
user=self.user, token='doesnt matter', confirmed_once=False
)
response = self.client.post(self.url, data={'action': 'generate'})
assert len(mail.outbox) == 0
assert not APIKey.objects.filter(user=self.user).exists()
confirmation.reload()
assert not confirmation.confirmed_once # Unchanged
self.assert3xx(response, self.url)
def test_create_new_credentials_confirmation_exists_token_is_wrong(self):
confirmation = APIKeyConfirmation.objects.create(
user=self.user, token='sometoken', confirmed_once=False
)
response = self.client.post(
self.url, data={'action': 'generate', 'confirmation_token': 'wrong'}
)
# Nothing should have happened, the user will just be redirect to the
# page.
assert len(mail.outbox) == 0
assert not APIKey.objects.filter(user=self.user).exists()
confirmation.reload()
assert not confirmation.confirmed_once
self.assert3xx(response, self.url)
def test_delete_and_recreate_credentials_has_been_confirmed_once(self):
APIKeyConfirmation.objects.create(
user=self.user, token='doesnt matter', confirmed_once=True
)
old_key = APIKey.objects.create(
user=self.user,
type=SYMMETRIC_JWT_TYPE,
key='some-jwt-key',
secret='some-jwt-secret',
)
response = self.client.post(self.url, data={'action': 'generate'})
self.assert3xx(response, self.url)
old_key = APIKey.objects.get(pk=old_key.pk)
assert old_key.is_active is None
new_key = APIKey.get_jwt_key(user=self.user)
assert new_key.key != old_key.key
assert new_key.secret != old_key.secret
def test_delete_and_recreate_credentials_has_not_been_confirmed_yet(self):
old_key = APIKey.objects.create(
user=self.user,
type=SYMMETRIC_JWT_TYPE,
key='some-jwt-key',
secret='some-jwt-secret',
)
response = self.client.post(self.url, data={'action': 'generate'})
self.assert3xx(response, self.url)
old_key = APIKey.objects.get(pk=old_key.pk)
assert old_key.is_active is None
# Since there was no confirmation, this should create a one, send an
# email with the token, but not create credentials yet. (Would happen
# for an user that had api keys from before we introduced confirmation
# mechanism, but decided to regenerate).
assert len(mail.outbox) == 2 # 2 because of key revocation email.
assert 'revoked' in mail.outbox[0].body
message = mail.outbox[1]
assert message.to == [self.user.email]
assert not APIKey.objects.filter(user=self.user, is_active=True).exists()
assert APIKeyConfirmation.objects.filter(user=self.user).exists()
confirmation = APIKeyConfirmation.objects.filter(user=self.user).get()
assert confirmation.token
assert not confirmation.confirmed_once
token = confirmation.token
expected_url = (
f'http://testserver/en-US/developers/addon/api/key/?token={token}'
)
assert message.subject == 'Confirmation for developer API keys'
assert expected_url in message.body
def test_delete_credentials(self):
old_key = APIKey.objects.create(
user=self.user,
type=SYMMETRIC_JWT_TYPE,
key='some-jwt-key',
secret='some-jwt-secret',
)
response = self.client.post(self.url, data={'action': 'revoke'})
self.assert3xx(response, self.url)
old_key = APIKey.objects.get(pk=old_key.pk)
assert old_key.is_active is None
assert len(mail.outbox) == 1
assert 'revoked' in mail.outbox[0].body
class TestUpload(BaseUploadTest):
fixtures = ['base/users']
def setUp(self):
super(TestUpload, self).setUp()
assert self.client.login(email='[email protected]')
self.url = reverse('devhub.upload')
self.image_path = get_image_path('animated.png')
def post(self, **kwargs):
# Has to be a binary, non xpi file.
data = open(self.image_path, 'rb')
return self.client.post(self.url, {'upload': data}, **kwargs)
def test_login_required(self):
self.client.logout()
response = self.post()
assert response.status_code == 302
def test_create_fileupload(self):
self.post()
upload = FileUpload.objects.filter().order_by('-created').first()
assert 'animated.png' in upload.name
data = open(self.image_path, 'rb').read()
assert storage.open(upload.path).read() == data
def test_fileupload_metadata(self):
user = UserProfile.objects.get(email='[email protected]')
self.client.login(email=user.email)
self.post(REMOTE_ADDR='4.8.15.16.23.42')
upload = FileUpload.objects.get()
assert upload.user == user
assert upload.source == amo.UPLOAD_SOURCE_DEVHUB
assert upload.ip_address == '4.8.15.16.23.42'
def test_fileupload_validation(self):
self.post()
upload = FileUpload.objects.filter().order_by('-created').first()
assert upload.validation
validation = json.loads(upload.validation)
assert not validation['success']
# The current interface depends on this JSON structure:
assert validation['errors'] == 1
assert validation['warnings'] == 0
assert len(validation['messages'])
msg = validation['messages'][0]
assert msg['type'] == 'error'
assert msg['message'] == (
'Unsupported file type, please upload a supported file '
'(.crx, .xpi, .zip).'
)
assert not msg['description']
def test_redirect(self):
response = self.post()
upload = FileUpload.objects.get()
url = reverse('devhub.upload_detail', args=[upload.uuid.hex, 'json'])
self.assert3xx(response, url)
def test_not_an_uuid(self):
url = reverse('devhub.upload_detail', args=['garbage', 'json'])
response = self.client.get(url)
assert response.status_code == 404
@mock.patch('olympia.devhub.tasks.validate')
def test_upload_unlisted_addon(self, validate_mock):
"""Unlisted addons are validated as "self hosted" addons."""
validate_mock.return_value = json.dumps(amo.VALIDATOR_SKELETON_RESULTS)
self.url = reverse('devhub.upload_unlisted')
self.post()
# Make sure it was called with listed=False.
assert not validate_mock.call_args[1]['listed']
class TestUploadDetail(BaseUploadTest):
fixtures = ['base/appversion', 'base/users']
@classmethod
def setUpTestData(cls):
versions = {
'51.0a1',
amo.DEFAULT_WEBEXT_MIN_VERSION,
amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID,
amo.DEFAULT_WEBEXT_MAX_VERSION,
}
for version in versions:
cls.create_appversion('firefox', version)
cls.create_appversion('android', version)
def setUp(self):
super(TestUploadDetail, self).setUp()
assert self.client.login(email='[email protected]')
@classmethod
def create_appversion(cls, application_name, version):
return AppVersion.objects.create(
application=amo.APPS[application_name].id, version=version
)
def post(self):
# Has to be a binary, non xpi file.
data = open(get_image_path('animated.png'), 'rb')
return self.client.post(reverse('devhub.upload'), {'upload': data})
def validation_ok(self):
return {
'errors': 0,
'success': True,
'warnings': 0,
'notices': 0,
'message_tree': {},
'messages': [],
'rejected': False,
'metadata': {},
}
def upload_file(self, file, url='devhub.upload'):
addon = os.path.join(
settings.ROOT, 'src', 'olympia', 'devhub', 'tests', 'addons', file
)
with open(addon, 'rb') as f:
response = self.client.post(reverse(url), {'upload': f})
assert response.status_code == 302
def test_detail_json(self):
self.post()
upload = FileUpload.objects.get()
response = self.client.get(
reverse('devhub.upload_detail', args=[upload.uuid.hex, 'json'])
)
assert response.status_code == 200
data = json.loads(force_str(response.content))
assert data['validation']['errors'] == 1
assert data['url'] == (
reverse('devhub.upload_detail', args=[upload.uuid.hex, 'json'])
)
assert data['full_report_url'] == (
reverse('devhub.upload_detail', args=[upload.uuid.hex])
)
# We must have tiers
assert len(data['validation']['messages'])
msg = data['validation']['messages'][0]
assert msg['tier'] == 1
def test_upload_detail_for_version(self):
user = UserProfile.objects.get(email='[email protected]')
addon = addon_factory()
addon.addonuser_set.create(user=user)
self.post()
upload = FileUpload.objects.get()
response = self.client.get(
reverse(
'devhub.upload_detail_for_version', args=[addon.slug, upload.uuid.hex]
)
)
assert response.status_code == 200
def test_upload_detail_for_version_not_an_uuid(self):
user = UserProfile.objects.get(email='[email protected]')
addon = addon_factory()
addon.addonuser_set.create(user=user)
url = reverse('devhub.upload_detail_for_version', args=[addon.slug, 'garbage'])
response = self.client.get(url)
assert response.status_code == 404
def test_upload_detail_for_version_unlisted(self):
user = UserProfile.objects.get(email='[email protected]')
addon = addon_factory(version_kw={'channel': amo.RELEASE_CHANNEL_UNLISTED})
addon.addonuser_set.create(user=user)
self.post()
upload = FileUpload.objects.get()
response = self.client.get(
reverse(
'devhub.upload_detail_for_version', args=[addon.slug, upload.uuid.hex]
)
)
assert response.status_code == 200
def test_upload_detail_for_version_deleted(self):
user = UserProfile.objects.get(email='[email protected]')
addon = addon_factory()
addon.addonuser_set.create(user=user)
addon.delete()
self.post()
upload = FileUpload.objects.get()
response = self.client.get(
reverse(
'devhub.upload_detail_for_version', args=[addon.slug, upload.uuid.hex]
)
)
assert response.status_code == 404
def test_detail_view(self):
self.post()
upload = FileUpload.objects.filter().order_by('-created').first()
response = self.client.get(
reverse('devhub.upload_detail', args=[upload.uuid.hex])
)
assert response.status_code == 200
doc = pq(response.content)
expected = 'Validation Results for animated.png'
assert doc('header h2').text() == expected
suite = doc('#addon-validator-suite')
expected = reverse('devhub.standalone_upload_detail', args=[upload.uuid.hex])
assert suite.attr('data-validateurl') == expected
def test_not_an_uuid_standalon_upload_detail(self):
url = reverse('devhub.standalone_upload_detail', args=['garbage'])
response = self.client.get(url)
assert response.status_code == 404
def test_no_servererror_on_missing_version(self):
"""https://github.com/mozilla/addons-server/issues/3779
addons-linter and amo-validator both add proper errors if the version
is missing but we shouldn't fail on that but properly show the
validation results.
"""
self.upload_file('valid_webextension_no_version.xpi')
upload = FileUpload.objects.get()
response = self.client.get(
reverse('devhub.upload_detail', args=[upload.uuid.hex, 'json'])
)
data = json.loads(force_str(response.content))
message = [
(m['message'], m.get('type') == 'error')
for m in data['validation']['messages']
]
expected = [('"/version" is a required property', True)]
assert message == expected
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_not_a_valid_xpi(self, run_addons_linter_mock):
run_addons_linter_mock.return_value = json.dumps(self.validation_ok())
self.upload_file('unopenable.xpi')
# We never even reach the linter (we can't: because we're repacking
# zip files, we should raise an error if the zip is invalid before
# calling the linter, even though the linter has a perfectly good error
# message for this kind of situation).
assert not run_addons_linter_mock.called
upload = FileUpload.objects.get()
response = self.client.get(
reverse('devhub.upload_detail', args=[upload.uuid.hex, 'json'])
)
data = json.loads(force_str(response.content))
message = [
(m['message'], m.get('fatal', False))
for m in data['validation']['messages']
]
# We do raise a specific error message explaining that the archive is
# not valid instead of a generic exception.
assert message == [
('Invalid or corrupt add-on file.', True),
]
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_experiment_xpi_allowed(self, mock_validator):
user = UserProfile.objects.get(email='[email protected]')
self.grant_permission(user, 'Experiments:submit')
mock_validator.return_value = json.dumps(self.validation_ok())
self.upload_file(
'../../../files/fixtures/files/experiment_inside_webextension.xpi'
)
upload = FileUpload.objects.get()
response = self.client.get(
reverse('devhub.upload_detail', args=[upload.uuid.hex, 'json'])
)
data = json.loads(force_str(response.content))
assert data['validation']['messages'] == []
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_experiment_xpi_not_allowed(self, mock_validator):
mock_validator.return_value = json.dumps(self.validation_ok())
self.upload_file(
'../../../files/fixtures/files/experiment_inside_webextension.xpi'
)
upload = FileUpload.objects.get()
response = self.client.get(
reverse('devhub.upload_detail', args=[upload.uuid.hex, 'json'])
)
data = json.loads(force_str(response.content))
assert data['validation']['messages'] == [
{
'tier': 1,
'message': 'You cannot submit this type of add-on',
'fatal': True,
'type': 'error',
}
]
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_system_addon_allowed(self, mock_validator):
user = user_factory()
self.grant_permission(user, 'SystemAddon:Submit')
assert self.client.login(email=user.email)
mock_validator.return_value = json.dumps(self.validation_ok())
self.upload_file('../../../files/fixtures/files/mozilla_guid.xpi')
upload = FileUpload.objects.get()
response = self.client.get(
reverse('devhub.upload_detail', args=[upload.uuid.hex, 'json'])
)
data = json.loads(force_str(response.content))
assert data['validation']['messages'] == []
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_system_addon_not_allowed_not_allowed(self, mock_validator):
user_factory(email='[email protected]')
assert self.client.login(email='[email protected]')
mock_validator.return_value = json.dumps(self.validation_ok())
self.upload_file('../../../files/fixtures/files/mozilla_guid.xpi')
upload = FileUpload.objects.get()
response = self.client.get(
reverse('devhub.upload_detail', args=[upload.uuid.hex, 'json'])
)
data = json.loads(force_str(response.content))
assert data['validation']['messages'] == [
{
'tier': 1,
'message': 'You cannot submit an add-on using an ID ending with '
'"@mozilla.com" or "@mozilla.org" or '
'"@pioneer.mozilla.org" or "@search.mozilla.org" or '
'"@shield.mozilla.com" or "@shield.mozilla.org" or '
'"@mozillaonline.com" or "@mozillafoundation.org" or '
'"@rally.mozilla.org"',
'fatal': True,
'type': 'error',
}
]
@mock.patch('olympia.devhub.tasks.run_addons_linter')
@mock.patch('olympia.files.utils.get_signer_organizational_unit_name')
def test_mozilla_signed_allowed(self, mock_get_signature, mock_validator):
user = user_factory()
assert self.client.login(email=user.email)
self.grant_permission(user, 'SystemAddon:Submit')
mock_validator.return_value = json.dumps(self.validation_ok())
mock_get_signature.return_value = 'Mozilla Extensions'
self.upload_file(
'../../../files/fixtures/files/webextension_signed_already.xpi'
)
upload = FileUpload.objects.get()
response = self.client.get(
reverse('devhub.upload_detail', args=[upload.uuid.hex, 'json'])
)
data = json.loads(force_str(response.content))
assert data['validation']['messages'] == []
@mock.patch('olympia.files.utils.get_signer_organizational_unit_name')
def test_mozilla_signed_not_allowed_not_allowed(self, mock_get_signature):
user_factory(email='[email protected]')
assert self.client.login(email='[email protected]')
mock_get_signature.return_value = 'Mozilla Extensions'
self.upload_file(
'../../../files/fixtures/files/webextension_signed_already.xpi'
)
upload = FileUpload.objects.get()
response = self.client.get(
reverse('devhub.upload_detail', args=[upload.uuid.hex, 'json'])
)
data = json.loads(force_str(response.content))
assert data['validation']['messages'] == [
{
'tier': 1,
'message': 'You cannot submit a Mozilla Signed Extension',
'fatal': True,
'type': 'error',
}
]
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_system_addon_update_allowed(self, mock_validator):
"""Updates to system addons are allowed from anyone."""
user = user_factory(email='[email protected]')
addon = addon_factory(guid='[email protected]')
AddonUser.objects.create(addon=addon, user=user)
assert self.client.login(email='[email protected]')
mock_validator.return_value = json.dumps(self.validation_ok())
self.upload_file('../../../files/fixtures/files/mozilla_guid.xpi')
upload = FileUpload.objects.get()
response = self.client.get(
reverse(
'devhub.upload_detail_for_version', args=[addon.slug, upload.uuid.hex]
)
)
data = json.loads(force_str(response.content))
assert data['validation']['messages'] == []
def test_no_redirect_for_metadata(self):
user = UserProfile.objects.get(email='[email protected]')
addon = addon_factory(status=amo.STATUS_NULL)
AddonCategory.objects.filter(addon=addon).delete()
addon.addonuser_set.create(user=user)
self.post()
upload = FileUpload.objects.get()
response = self.client.get(
reverse(
'devhub.upload_detail_for_version', args=[addon.slug, upload.uuid.hex]
)
)
assert response.status_code == 200
def assert_json_error(request, field, msg):
assert request.status_code == 400
assert request['Content-Type'] == 'application/json'
field = '__all__' if field is None else field
content = json.loads(request.content)
assert field in content, '%r not in %r' % (field, content)
assert content[field] == [msg]
def assert_json_field(request, field, msg):
assert request.status_code == 200
assert request['Content-Type'] == 'application/json'
content = json.loads(request.content)
assert field in content, '%r not in %r' % (field, content)
assert content[field] == msg
class TestQueuePosition(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestQueuePosition, self).setUp()
self.addon = Addon.objects.get(id=3615)
self.version = self.addon.current_version
self.addon.update(guid='guid@xpi')
assert self.client.login(email='[email protected]')
self.edit_url = reverse(
'devhub.versions.edit', args=[self.addon.slug, self.version.id]
)
# Add a second one also awaiting review in each queue
addon_factory(
status=amo.STATUS_NOMINATED, file_kw={'status': amo.STATUS_AWAITING_REVIEW}
)
version_factory(
addon=addon_factory(), file_kw={'status': amo.STATUS_AWAITING_REVIEW}
)
# And some static themes that shouldn't be counted
addon_factory(
status=amo.STATUS_NOMINATED,
type=amo.ADDON_STATICTHEME,
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
)
version_factory(
addon=addon_factory(type=amo.ADDON_STATICTHEME),
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
)
addon_factory(
status=amo.STATUS_NOMINATED,
type=amo.ADDON_STATICTHEME,
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
)
version_factory(
addon=addon_factory(type=amo.ADDON_STATICTHEME),
file_kw={'status': amo.STATUS_AWAITING_REVIEW},
)
def test_not_in_queue(self):
response = self.client.get(self.addon.get_dev_url('versions'))
assert self.addon.status == amo.STATUS_APPROVED
assert pq(response.content)('.version-status-actions .dark').length == 0
def test_in_queue(self):
statuses = [
(amo.STATUS_NOMINATED, amo.STATUS_AWAITING_REVIEW),
(amo.STATUS_APPROVED, amo.STATUS_AWAITING_REVIEW),
]
for (addon_status, file_status) in statuses:
latest_version = self.addon.find_latest_version(amo.RELEASE_CHANNEL_LISTED)
latest_version.files.all()[0].update(status=file_status)
self.addon.update(status=addon_status)
response = self.client.get(self.addon.get_dev_url('versions'))
doc = pq(response.content)
span = doc('.queue-position')
assert span.length
assert 'Queue Position: 1 of 2' in span.text()
def test_static_themes_in_queue(self):
statuses = [
(amo.STATUS_NOMINATED, amo.STATUS_AWAITING_REVIEW),
(amo.STATUS_APPROVED, amo.STATUS_AWAITING_REVIEW),
]
self.addon.update(type=amo.ADDON_STATICTHEME)
for (addon_status, file_status) in statuses:
latest_version = self.addon.find_latest_version(amo.RELEASE_CHANNEL_LISTED)
latest_version.files.all()[0].update(status=file_status)
self.addon.update(status=addon_status)
response = self.client.get(self.addon.get_dev_url('versions'))
doc = pq(response.content)
span = doc('.queue-position')
assert span.length
assert 'Queue Position: 1 of 3' in span.text()
class TestVersionXSS(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestVersionXSS, self).setUp()
self.version = Addon.objects.get(id=3615).current_version
assert self.client.login(email='[email protected]')
def test_unique_version_num(self):
# Can't use a "/" to close the tag, as we're doing a get_url_path on
# it, which uses addons.versions, which consumes up to the first "/"
# encountered.
self.version.update(version='<script>alert("Happy XSS-Xmas");<script>')
response = self.client.get(reverse('devhub.addons'))
assert response.status_code == 200
assert b'<script>alert' not in response.content
assert b'<script>alert' in response.content
class TestDeleteAddon(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestDeleteAddon, self).setUp()
self.addon = Addon.objects.get(id=3615)
self.url = self.addon.get_dev_url('delete')
self.client.login(email='[email protected]')
def test_bad_password(self):
response = self.client.post(self.url, {'slug': 'nope'})
self.assert3xx(response, self.addon.get_dev_url('versions'))
assert response.context['title'] == (
'URL name was incorrect. Add-on was not deleted.'
)
assert Addon.objects.count() == 1
def test_success(self):
response = self.client.post(self.url, {'slug': 'a3615'})
self.assert3xx(response, reverse('devhub.addons'))
assert response.context['title'] == 'Add-on deleted.'
assert Addon.objects.count() == 0
class TestRequestReview(TestCase):
fixtures = ['base/users']
def setUp(self):
super(TestRequestReview, self).setUp()
self.addon = addon_factory()
self.version = self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED
)
self.redirect_url = self.addon.get_dev_url('versions')
self.public_url = reverse('devhub.request-review', args=[self.addon.slug])
assert self.client.login(email='[email protected]')
def get_addon(self):
return Addon.objects.get(id=self.addon.id)
def get_version(self):
return Version.objects.get(pk=self.version.id)
def check_400(self, url):
response = self.client.post(url)
assert response.status_code == 400
def test_public(self):
self.addon.update(status=amo.STATUS_APPROVED)
self.check_400(self.public_url)
@mock.patch('olympia.addons.models.Addon.has_complete_metadata')
def test_renominate_for_full_review(self, mock_has_complete_metadata):
# When a version is rejected, the addon is disabled.
# The author must upload a new version and re-nominate.
# Renominating the same version resets the nomination date.
mock_has_complete_metadata.return_value = True
orig_date = datetime.now() - timedelta(days=30)
# Pretend it was nominated in the past:
self.version.update(nomination=orig_date)
self.addon.update(status=amo.STATUS_NULL)
response = self.client.post(self.public_url)
self.assert3xx(response, self.redirect_url)
assert self.get_addon().status == amo.STATUS_NOMINATED
assert self.get_version().nomination.timetuple()[0:5] != (
orig_date.timetuple()[0:5]
)
class TestRedirects(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestRedirects, self).setUp()
self.base = reverse('devhub.index')
assert self.client.login(email='[email protected]')
self.user = UserProfile.objects.get(email='[email protected]')
self.user.update(last_login_ip='192.168.1.1')
def test_edit(self):
url = self.base + 'addon/edit/3615'
response = self.client.get(url, follow=True)
self.assert3xx(response, reverse('devhub.addons.edit', args=['a3615']), 301)
url = self.base + 'addon/edit/3615/'
response = self.client.get(url, follow=True)
self.assert3xx(response, reverse('devhub.addons.edit', args=['a3615']), 301)
def test_status(self):
url = self.base + 'addon/status/3615'
response = self.client.get(url, follow=True)
self.assert3xx(response, reverse('devhub.addons.versions', args=['a3615']), 301)
def test_versions(self):
url = self.base + 'versions/3615'
response = self.client.get(url, follow=True)
self.assert3xx(response, reverse('devhub.addons.versions', args=['a3615']), 301)
def test_lwt_submit_redirects_to_addon_submit(self):
url = reverse('devhub.themes.submit')
response = self.client.get(url, follow=True)
self.assert3xx(response, reverse('devhub.submit.distribution'), 302)
class TestHasCompleteMetadataRedirects(TestCase):
"""Make sure Addons that are not complete in some way are correctly
redirected to the right view (and don't end up in a redirect loop)."""
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestHasCompleteMetadataRedirects, self).setUp()
self.f = mock.Mock()
self.f.__name__ = 'function'
self.request = RequestFactory().get('developers/addon/a3615/edit')
self.request.user = UserProfile.objects.get(email='[email protected]')
self.addon = Addon.objects.get(id=3615)
self.addon.update(status=amo.STATUS_NULL)
self.addon = Addon.objects.get(id=3615)
assert self.addon.has_complete_metadata(), self.addon.get_required_metadata()
assert not self.addon.should_redirect_to_submit_flow()
# We need to be logged in for any redirection into real views.
assert self.client.login(email='[email protected]')
def _test_redirect(self):
func = dev_required(self.f)
response = func(self.request, addon_id='a3615')
assert not self.f.called
assert response.status_code == 302
assert response['Location'] == ('/en-US/developers/addon/a3615/submit/details')
# Check the redirection doesn't redirect also.
redirection = self.client.get(response['Location'])
assert redirection.status_code == 200
def test_default(self):
func = dev_required(self.f)
func(self.request, addon_id='a3615')
# Don't redirect if there is no metadata to collect.
assert self.f.called
def test_no_summary(self):
delete_translation(self.addon, 'summary')
self._test_redirect()
def test_no_license(self):
self.addon.current_version.update(license=None)
self._test_redirect()
def test_no_license_no_summary(self):
self.addon.current_version.update(license=None)
delete_translation(self.addon, 'summary')
self._test_redirect()
class TestDocs(TestCase):
def test_doc_urls(self):
assert '/en-US/developers/docs/' == reverse('devhub.docs', args=[])
assert '/en-US/developers/docs/te' == reverse('devhub.docs', args=['te'])
assert '/en-US/developers/docs/te/st', reverse('devhub.docs', args=['te/st'])
urls = [
(reverse('devhub.docs', args=['getting-started']), 301),
(reverse('devhub.docs', args=['how-to']), 301),
(reverse('devhub.docs', args=['how-to/other-addons']), 301),
(reverse('devhub.docs', args=['fake-page']), 404),
(reverse('devhub.docs', args=['how-to/fake-page']), 404),
(reverse('devhub.docs'), 301),
]
index = reverse('devhub.index')
for url in urls:
response = self.client.get(url[0])
assert response.status_code == url[1]
if url[1] == 302: # Redirect to the index page
self.assert3xx(response, index)
class TestRemoveLocale(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestRemoveLocale, self).setUp()
self.addon = Addon.objects.get(id=3615)
self.url = reverse('devhub.addons.remove-locale', args=['a3615'])
assert self.client.login(email='[email protected]')
def test_bad_request(self):
response = self.client.post(self.url)
assert response.status_code == 400
def test_success(self):
self.addon.name = {'en-US': 'woo', 'el': 'yeah'}
self.addon.save()
self.addon.remove_locale('el')
qs = Translation.objects.filter(localized_string__isnull=False).values_list(
'locale', flat=True
)
response = self.client.post(self.url, {'locale': 'el'})
assert response.status_code == 200
assert sorted(qs.filter(id=self.addon.name_id)) == ['en-US']
def test_delete_default_locale(self):
response = self.client.post(self.url, {'locale': self.addon.default_locale})
assert response.status_code == 400
def test_remove_version_locale(self):
version = self.addon.versions.all()[0]
version.release_notes = {'fr': 'oui'}
version.save()
self.client.post(self.url, {'locale': 'fr'})
res = self.client.get(
reverse('devhub.versions.edit', args=[self.addon.slug, version.pk])
)
doc = pq(res.content)
# There's 2 fields, one for en-us, one for init.
assert len(doc('div.trans textarea')) == 2
class TestXssOnAddonName(amo.tests.TestXss):
def test_devhub_feed_page(self):
url = reverse('devhub.feed', args=[self.addon.slug])
self.assertNameAndNoXSS(url)
def test_devhub_addon_edit_page(self):
url = reverse('devhub.addons.edit', args=[self.addon.slug])
self.assertNameAndNoXSS(url)
def test_devhub_version_edit_page(self):
url = reverse(
'devhub.versions.edit',
args=[self.addon.slug, self.addon.current_version.id],
)
self.assertNameAndNoXSS(url)
def test_devhub_version_list_page(self):
url = reverse('devhub.addons.versions', args=[self.addon.slug])
self.assertNameAndNoXSS(url)
@pytest.mark.django_db
def test_get_next_version_number():
addon = addon_factory(version_kw={'version': '1.0'})
# Easy case - 1.0 to 2.0
assert get_next_version_number(addon) == '2.0'
# version numbers without minor numbers should be okay too.
version_factory(addon=addon, version='2')
assert get_next_version_number(addon) == '3.0'
# We just iterate the major version number
addon.current_version.update(version='34.45.0a1pre')
addon.current_version.save()
assert get_next_version_number(addon) == '35.0'
# "Take" 35.0
version_factory(
addon=addon, version='35.0', file_kw={'status': amo.STATUS_DISABLED}
)
assert get_next_version_number(addon) == '36.0'
# And 36.0, even though it's deleted.
version_factory(addon=addon, version='36.0').delete()
assert addon.current_version.version == '34.45.0a1pre'
assert get_next_version_number(addon) == '37.0'
class TestThemeBackgroundImage(TestCase):
def setUp(self):
user = user_factory(email='[email protected]')
assert self.client.login(email='[email protected]')
self.addon = addon_factory(users=[user])
self.url = reverse(
'devhub.submit.version.previous_background',
args=[self.addon.slug, 'listed'],
)
def test_wrong_user(self):
user_factory(email='[email protected]')
assert self.client.login(email='[email protected]')
response = self.client.post(self.url, follow=True)
assert response.status_code == 403
def test_no_header_image(self):
response = self.client.post(self.url, follow=True)
assert response.status_code == 200
data = json.loads(force_str(response.content))
assert data == {}
def test_header_image(self):
destination = self.addon.current_version.all_files[0].current_file_path
zip_file = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme.zip'
)
copy_stored_file(zip_file, destination)
response = self.client.post(self.url, follow=True)
assert response.status_code == 200
data = json.loads(force_str(response.content))
assert data
assert len(data.items()) == 1
assert 'weta.png' in data
assert len(data['weta.png']) == 168596 # base64-encoded size
class TestLogout(UserViewBase):
def test_success(self):
user = UserProfile.objects.get(email='[email protected]')
self.client.login(email=user.email)
assert user.auth_id
response = self.client.get(reverse('devhub.index'), follow=True)
assert pq(response.content)('li a.avatar').attr('href') == (user.get_url_path())
assert pq(response.content)('li a.avatar img').attr('src') == (user.picture_url)
response = self.client.get('/en-US/developers/logout', follow=False)
self.assert3xx(response, '/en-US/firefox/', status_code=302)
response = self.client.get(reverse('devhub.index'), follow=True)
assert not pq(response.content)('li a.avatar')
user.reload()
assert not user.auth_id
def test_redirect(self):
self.client.login(email='[email protected]')
self.client.get(reverse('devhub.index'), follow=True)
url = '/en-US/about'
response = self.client.get(
urlparams(reverse('devhub.logout'), to=url), follow=True
)
self.assert3xx(response, url, status_code=302)
# Test an invalid domain
url = urlparams(
reverse('devhub.logout'), to='/en-US/about', domain='http://evil.com'
)
response = self.client.get(url, follow=False)
self.assert3xx(response, '/en-US/about', status_code=302)
def test_session_cookie_deleted_on_logout(self):
self.client.login(email='[email protected]')
self.client.cookies[API_TOKEN_COOKIE] = 'some.token.value'
response = self.client.get(reverse('devhub.logout'))
cookie = response.cookies[settings.SESSION_COOKIE_NAME]
cookie_date_string = 'Thu, 01 Jan 1970 00:00:00 GMT'
assert cookie.value == ''
# in django2.1+ changed to django.utils.http.http_date from cookie_date
assert cookie['expires'].replace('-', ' ') == cookie_date_string
jwt_cookie = response.cookies[API_TOKEN_COOKIE]
assert jwt_cookie.value == ''
assert jwt_cookie['expires'].replace('-', ' ') == cookie_date_string
class TestStatsLinksInManageMySubmissionsPage(TestCase):
def setUp(self):
super().setUp()
self.user = user_factory()
self.addon = addon_factory(users=[self.user])
self.url = reverse('devhub.addons')
self.client.login(email=self.user.email)
def test_link_to_stats(self):
response = self.client.get(self.url)
assert reverse('stats.overview', args=[self.addon.slug]) in str(
response.content
)
def test_link_to_stats_for_addon_disabled_by_user(self):
self.addon.update(disabled_by_user=True)
response = self.client.get(self.url)
assert reverse('stats.overview', args=[self.addon.slug]) in str(
response.content
)
def test_link_to_stats_for_unlisted_addon(self):
self.make_addon_unlisted(self.addon)
response = self.client.get(self.url)
assert reverse('stats.overview', args=[self.addon.slug]) in str(
response.content
)
def test_no_link_for_addon_disabled_by_mozilla(self):
self.addon.update(status=amo.STATUS_DISABLED)
self.make_addon_unlisted(self.addon)
response = self.client.get(self.url)
assert reverse('stats.overview', args=[self.addon.slug]) not in str(
response.content
)
def test_link_to_stats_for_langpacks(self):
self.addon.update(type=amo.ADDON_LPAPP)
response = self.client.get(self.url)
assert reverse('stats.overview', args=[self.addon.slug]) in str(
response.content
)
def test_link_to_stats_for_dictionaries(self):
self.addon.update(type=amo.ADDON_DICT)
response = self.client.get(self.url)
assert reverse('stats.overview', args=[self.addon.slug]) in str(
response.content
)
| bqbn/addons-server | src/olympia/devhub/tests/test_views.py | Python | bsd-3-clause | 83,689 |
import itertools
import functools
import operator
import warnings
from distutils.version import LooseVersion
import numpy as np
from pandas import compat
from pandas._libs import tslib, algos, lib
from pandas.core.dtypes.common import (
_get_dtype,
is_float, is_scalar,
is_integer, is_complex, is_float_dtype,
is_complex_dtype, is_integer_dtype,
is_bool_dtype, is_object_dtype,
is_numeric_dtype,
is_datetime64_dtype, is_timedelta64_dtype,
is_datetime_or_timedelta_dtype,
is_int_or_datetime_dtype, is_any_int_dtype)
from pandas.core.dtypes.cast import _int64_max, maybe_upcast_putmask
from pandas.core.dtypes.missing import isna, notna, na_value_for_dtype
from pandas.core.config import get_option
from pandas.core.common import _values_from_object
_BOTTLENECK_INSTALLED = False
_MIN_BOTTLENECK_VERSION = '1.0.0'
try:
import bottleneck as bn
ver = bn.__version__
_BOTTLENECK_INSTALLED = (LooseVersion(ver) >=
LooseVersion(_MIN_BOTTLENECK_VERSION))
if not _BOTTLENECK_INSTALLED:
warnings.warn(
"The installed version of bottleneck {ver} is not supported "
"in pandas and will be not be used\nThe minimum supported "
"version is {min_ver}\n".format(
ver=ver, min_ver=_MIN_BOTTLENECK_VERSION), UserWarning)
except ImportError: # pragma: no cover
pass
_USE_BOTTLENECK = False
def set_use_bottleneck(v=True):
# set/unset to use bottleneck
global _USE_BOTTLENECK
if _BOTTLENECK_INSTALLED:
_USE_BOTTLENECK = v
set_use_bottleneck(get_option('compute.use_bottleneck'))
class disallow(object):
def __init__(self, *dtypes):
super(disallow, self).__init__()
self.dtypes = tuple(np.dtype(dtype).type for dtype in dtypes)
def check(self, obj):
return hasattr(obj, 'dtype') and issubclass(obj.dtype.type,
self.dtypes)
def __call__(self, f):
@functools.wraps(f)
def _f(*args, **kwargs):
obj_iter = itertools.chain(args, compat.itervalues(kwargs))
if any(self.check(obj) for obj in obj_iter):
msg = 'reduction operation {name!r} not allowed for this dtype'
raise TypeError(msg.format(name=f.__name__.replace('nan', '')))
try:
with np.errstate(invalid='ignore'):
return f(*args, **kwargs)
except ValueError as e:
# we want to transform an object array
# ValueError message to the more typical TypeError
# e.g. this is normally a disallowed function on
# object arrays that contain strings
if is_object_dtype(args[0]):
raise TypeError(e)
raise
return _f
class bottleneck_switch(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, alt):
bn_name = alt.__name__
try:
bn_func = getattr(bn, bn_name)
except (AttributeError, NameError): # pragma: no cover
bn_func = None
@functools.wraps(alt)
def f(values, axis=None, skipna=True, **kwds):
if len(self.kwargs) > 0:
for k, v in compat.iteritems(self.kwargs):
if k not in kwds:
kwds[k] = v
try:
if values.size == 0:
# we either return np.nan or pd.NaT
if is_numeric_dtype(values):
values = values.astype('float64')
fill_value = na_value_for_dtype(values.dtype)
if values.ndim == 1:
return fill_value
else:
result_shape = (values.shape[:axis] +
values.shape[axis + 1:])
result = np.empty(result_shape, dtype=values.dtype)
result.fill(fill_value)
return result
if (_USE_BOTTLENECK and skipna and
_bn_ok_dtype(values.dtype, bn_name)):
result = bn_func(values, axis=axis, **kwds)
# prefer to treat inf/-inf as NA, but must compute the func
# twice :(
if _has_infs(result):
result = alt(values, axis=axis, skipna=skipna, **kwds)
else:
result = alt(values, axis=axis, skipna=skipna, **kwds)
except Exception:
try:
result = alt(values, axis=axis, skipna=skipna, **kwds)
except ValueError as e:
# we want to transform an object array
# ValueError message to the more typical TypeError
# e.g. this is normally a disallowed function on
# object arrays that contain strings
if is_object_dtype(values):
raise TypeError(e)
raise
return result
return f
def _bn_ok_dtype(dt, name):
# Bottleneck chokes on datetime64
if (not is_object_dtype(dt) and not is_datetime_or_timedelta_dtype(dt)):
# GH 15507
# bottleneck does not properly upcast during the sum
# so can overflow
# GH 9422
# further we also want to preserve NaN when all elements
# are NaN, unlinke bottleneck/numpy which consider this
# to be 0
if name in ['nansum', 'nanprod']:
return False
return True
return False
def _has_infs(result):
if isinstance(result, np.ndarray):
if result.dtype == 'f8':
return lib.has_infs_f8(result.ravel())
elif result.dtype == 'f4':
return lib.has_infs_f4(result.ravel())
try:
return np.isinf(result).any()
except (TypeError, NotImplementedError):
# if it doesn't support infs, then it can't have infs
return False
def _get_fill_value(dtype, fill_value=None, fill_value_typ=None):
""" return the correct fill value for the dtype of the values """
if fill_value is not None:
return fill_value
if _na_ok_dtype(dtype):
if fill_value_typ is None:
return np.nan
else:
if fill_value_typ == '+inf':
return np.inf
else:
return -np.inf
else:
if fill_value_typ is None:
return tslib.iNaT
else:
if fill_value_typ == '+inf':
# need the max int here
return _int64_max
else:
return tslib.iNaT
def _get_values(values, skipna, fill_value=None, fill_value_typ=None,
isfinite=False, copy=True):
""" utility to get the values view, mask, dtype
if necessary copy and mask using the specified fill_value
copy = True will force the copy
"""
values = _values_from_object(values)
if isfinite:
mask = _isfinite(values)
else:
mask = isna(values)
dtype = values.dtype
dtype_ok = _na_ok_dtype(dtype)
# get our fill value (in case we need to provide an alternative
# dtype for it)
fill_value = _get_fill_value(dtype, fill_value=fill_value,
fill_value_typ=fill_value_typ)
if skipna:
if copy:
values = values.copy()
if dtype_ok:
np.putmask(values, mask, fill_value)
# promote if needed
else:
values, changed = maybe_upcast_putmask(values, mask, fill_value)
elif copy:
values = values.copy()
values = _view_if_needed(values)
# return a platform independent precision dtype
dtype_max = dtype
if is_integer_dtype(dtype) or is_bool_dtype(dtype):
dtype_max = np.int64
elif is_float_dtype(dtype):
dtype_max = np.float64
return values, mask, dtype, dtype_max
def _isfinite(values):
if is_datetime_or_timedelta_dtype(values):
return isna(values)
if (is_complex_dtype(values) or is_float_dtype(values) or
is_integer_dtype(values) or is_bool_dtype(values)):
return ~np.isfinite(values)
return ~np.isfinite(values.astype('float64'))
def _na_ok_dtype(dtype):
return not is_int_or_datetime_dtype(dtype)
def _view_if_needed(values):
if is_datetime_or_timedelta_dtype(values):
return values.view(np.int64)
return values
def _wrap_results(result, dtype):
""" wrap our results if needed """
if is_datetime64_dtype(dtype):
if not isinstance(result, np.ndarray):
result = lib.Timestamp(result)
else:
result = result.view(dtype)
elif is_timedelta64_dtype(dtype):
if not isinstance(result, np.ndarray):
# raise if we have a timedelta64[ns] which is too large
if np.fabs(result) > _int64_max:
raise ValueError("overflow in timedelta operation")
result = lib.Timedelta(result, unit='ns')
else:
result = result.astype('i8').view(dtype)
return result
def nanany(values, axis=None, skipna=True):
values, mask, dtype, _ = _get_values(values, skipna, False, copy=skipna)
return values.any(axis)
def nanall(values, axis=None, skipna=True):
values, mask, dtype, _ = _get_values(values, skipna, True, copy=skipna)
return values.all(axis)
@disallow('M8')
@bottleneck_switch()
def nansum(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna, 0)
dtype_sum = dtype_max
if is_float_dtype(dtype):
dtype_sum = dtype
elif is_timedelta64_dtype(dtype):
dtype_sum = np.float64
the_sum = values.sum(axis, dtype=dtype_sum)
the_sum = _maybe_null_out(the_sum, axis, mask)
return _wrap_results(the_sum, dtype)
@disallow('M8')
@bottleneck_switch()
def nanmean(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna, 0)
dtype_sum = dtype_max
dtype_count = np.float64
if is_integer_dtype(dtype) or is_timedelta64_dtype(dtype):
dtype_sum = np.float64
elif is_float_dtype(dtype):
dtype_sum = dtype
dtype_count = dtype
count = _get_counts(mask, axis, dtype=dtype_count)
the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum))
if axis is not None and getattr(the_sum, 'ndim', False):
the_mean = the_sum / count
ct_mask = count == 0
if ct_mask.any():
the_mean[ct_mask] = np.nan
else:
the_mean = the_sum / count if count > 0 else np.nan
return _wrap_results(the_mean, dtype)
@disallow('M8')
@bottleneck_switch()
def nanmedian(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna)
def get_median(x):
mask = notna(x)
if not skipna and not mask.all():
return np.nan
return algos.median(_values_from_object(x[mask]))
if not is_float_dtype(values):
values = values.astype('f8')
values[mask] = np.nan
if axis is None:
values = values.ravel()
notempty = values.size
# an array from a frame
if values.ndim > 1:
# there's a non-empty array to apply over otherwise numpy raises
if notempty:
return _wrap_results(
np.apply_along_axis(get_median, axis, values), dtype)
# must return the correct shape, but median is not defined for the
# empty set so return nans of shape "everything but the passed axis"
# since "axis" is where the reduction would occur if we had a nonempty
# array
shp = np.array(values.shape)
dims = np.arange(values.ndim)
ret = np.empty(shp[dims != axis])
ret.fill(np.nan)
return _wrap_results(ret, dtype)
# otherwise return a scalar value
return _wrap_results(get_median(values) if notempty else np.nan, dtype)
def _get_counts_nanvar(mask, axis, ddof, dtype=float):
dtype = _get_dtype(dtype)
count = _get_counts(mask, axis, dtype=dtype)
d = count - dtype.type(ddof)
# always return NaN, never inf
if is_scalar(count):
if count <= ddof:
count = np.nan
d = np.nan
else:
mask2 = count <= ddof
if mask2.any():
np.putmask(d, mask2, np.nan)
np.putmask(count, mask2, np.nan)
return count, d
@disallow('M8')
@bottleneck_switch(ddof=1)
def nanstd(values, axis=None, skipna=True, ddof=1):
result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof))
return _wrap_results(result, values.dtype)
@disallow('M8')
@bottleneck_switch(ddof=1)
def nanvar(values, axis=None, skipna=True, ddof=1):
values = _values_from_object(values)
dtype = values.dtype
mask = isna(values)
if is_any_int_dtype(values):
values = values.astype('f8')
values[mask] = np.nan
if is_float_dtype(values):
count, d = _get_counts_nanvar(mask, axis, ddof, values.dtype)
else:
count, d = _get_counts_nanvar(mask, axis, ddof)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
# xref GH10242
# Compute variance via two-pass algorithm, which is stable against
# cancellation errors and relatively accurate for small numbers of
# observations.
#
# See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count
if axis is not None:
avg = np.expand_dims(avg, axis)
sqr = _ensure_numeric((avg - values)**2)
np.putmask(sqr, mask, 0)
result = sqr.sum(axis=axis, dtype=np.float64) / d
# Return variance as np.float64 (the datatype used in the accumulator),
# unless we were dealing with a float array, in which case use the same
# precision as the original values array.
if is_float_dtype(dtype):
result = result.astype(dtype)
return _wrap_results(result, values.dtype)
@disallow('M8', 'm8')
def nansem(values, axis=None, skipna=True, ddof=1):
var = nanvar(values, axis, skipna, ddof=ddof)
mask = isna(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
count, _ = _get_counts_nanvar(mask, axis, ddof, values.dtype)
var = nanvar(values, axis, skipna, ddof=ddof)
return np.sqrt(var) / np.sqrt(count)
def _nanminmax(meth, fill_value_typ):
@bottleneck_switch()
def reduction(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(
values, skipna, fill_value_typ=fill_value_typ, )
if ((axis is not None and values.shape[axis] == 0) or
values.size == 0):
try:
result = getattr(values, meth)(axis, dtype=dtype_max)
result.fill(np.nan)
except:
result = np.nan
else:
result = getattr(values, meth)(axis)
result = _wrap_results(result, dtype)
return _maybe_null_out(result, axis, mask)
reduction.__name__ = 'nan' + meth
return reduction
nanmin = _nanminmax('min', fill_value_typ='+inf')
nanmax = _nanminmax('max', fill_value_typ='-inf')
@disallow('O')
def nanargmax(values, axis=None, skipna=True):
"""
Returns -1 in the NA case
"""
values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='-inf')
result = values.argmax(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
@disallow('O')
def nanargmin(values, axis=None, skipna=True):
"""
Returns -1 in the NA case
"""
values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='+inf')
result = values.argmin(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
@disallow('M8', 'm8')
def nanskew(values, axis=None, skipna=True):
""" Compute the sample skewness.
The statistic computed here is the adjusted Fisher-Pearson standardized
moment coefficient G1. The algorithm computes this coefficient directly
from the second and third central moment.
"""
values = _values_from_object(values)
mask = isna(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
count = _get_counts(mask, axis)
else:
count = _get_counts(mask, axis, dtype=values.dtype)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
mean = values.sum(axis, dtype=np.float64) / count
if axis is not None:
mean = np.expand_dims(mean, axis)
adjusted = values - mean
if skipna:
np.putmask(adjusted, mask, 0)
adjusted2 = adjusted ** 2
adjusted3 = adjusted2 * adjusted
m2 = adjusted2.sum(axis, dtype=np.float64)
m3 = adjusted3.sum(axis, dtype=np.float64)
# floating point error
#
# #18044 in _libs/windows.pyx calc_skew follow this behavior
# to fix the fperr to treat m2 <1e-14 as zero
m2 = _zero_out_fperr(m2)
m3 = _zero_out_fperr(m3)
with np.errstate(invalid='ignore', divide='ignore'):
result = (count * (count - 1) ** 0.5 / (count - 2)) * (m3 / m2 ** 1.5)
dtype = values.dtype
if is_float_dtype(dtype):
result = result.astype(dtype)
if isinstance(result, np.ndarray):
result = np.where(m2 == 0, 0, result)
result[count < 3] = np.nan
return result
else:
result = 0 if m2 == 0 else result
if count < 3:
return np.nan
return result
@disallow('M8', 'm8')
def nankurt(values, axis=None, skipna=True):
""" Compute the sample excess kurtosis.
The statistic computed here is the adjusted Fisher-Pearson standardized
moment coefficient G2, computed directly from the second and fourth
central moment.
"""
values = _values_from_object(values)
mask = isna(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
count = _get_counts(mask, axis)
else:
count = _get_counts(mask, axis, dtype=values.dtype)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
mean = values.sum(axis, dtype=np.float64) / count
if axis is not None:
mean = np.expand_dims(mean, axis)
adjusted = values - mean
if skipna:
np.putmask(adjusted, mask, 0)
adjusted2 = adjusted ** 2
adjusted4 = adjusted2 ** 2
m2 = adjusted2.sum(axis, dtype=np.float64)
m4 = adjusted4.sum(axis, dtype=np.float64)
with np.errstate(invalid='ignore', divide='ignore'):
adj = 3 * (count - 1) ** 2 / ((count - 2) * (count - 3))
numer = count * (count + 1) * (count - 1) * m4
denom = (count - 2) * (count - 3) * m2**2
result = numer / denom - adj
# floating point error
#
# #18044 in _libs/windows.pyx calc_kurt follow this behavior
# to fix the fperr to treat denom <1e-14 as zero
numer = _zero_out_fperr(numer)
denom = _zero_out_fperr(denom)
if not isinstance(denom, np.ndarray):
# if ``denom`` is a scalar, check these corner cases first before
# doing division
if count < 4:
return np.nan
if denom == 0:
return 0
with np.errstate(invalid='ignore', divide='ignore'):
result = numer / denom - adj
dtype = values.dtype
if is_float_dtype(dtype):
result = result.astype(dtype)
if isinstance(result, np.ndarray):
result = np.where(denom == 0, 0, result)
result[count < 4] = np.nan
return result
@disallow('M8', 'm8')
def nanprod(values, axis=None, skipna=True):
mask = isna(values)
if skipna and not is_any_int_dtype(values):
values = values.copy()
values[mask] = 1
result = values.prod(axis)
return _maybe_null_out(result, axis, mask)
def _maybe_arg_null_out(result, axis, mask, skipna):
# helper function for nanargmin/nanargmax
if axis is None or not getattr(result, 'ndim', False):
if skipna:
if mask.all():
result = -1
else:
if mask.any():
result = -1
else:
if skipna:
na_mask = mask.all(axis)
else:
na_mask = mask.any(axis)
if na_mask.any():
result[na_mask] = -1
return result
def _get_counts(mask, axis, dtype=float):
dtype = _get_dtype(dtype)
if axis is None:
return dtype.type(mask.size - mask.sum())
count = mask.shape[axis] - mask.sum(axis)
if is_scalar(count):
return dtype.type(count)
try:
return count.astype(dtype)
except AttributeError:
return np.array(count, dtype=dtype)
def _maybe_null_out(result, axis, mask):
if axis is not None and getattr(result, 'ndim', False):
null_mask = (mask.shape[axis] - mask.sum(axis)) == 0
if np.any(null_mask):
if is_numeric_dtype(result):
if np.iscomplexobj(result):
result = result.astype('c16')
else:
result = result.astype('f8')
result[null_mask] = np.nan
else:
# GH12941, use None to auto cast null
result[null_mask] = None
elif result is not tslib.NaT:
null_mask = mask.size - mask.sum()
if null_mask == 0:
result = np.nan
return result
def _zero_out_fperr(arg):
# #18044 reference this behavior to fix rolling skew/kurt issue
if isinstance(arg, np.ndarray):
with np.errstate(invalid='ignore'):
return np.where(np.abs(arg) < 1e-14, 0, arg)
else:
return arg.dtype.type(0) if np.abs(arg) < 1e-14 else arg
@disallow('M8', 'm8')
def nancorr(a, b, method='pearson', min_periods=None):
"""
a, b: ndarrays
"""
if len(a) != len(b):
raise AssertionError('Operands to nancorr must have same size')
if min_periods is None:
min_periods = 1
valid = notna(a) & notna(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
f = get_corr_func(method)
return f(a, b)
def get_corr_func(method):
if method in ['kendall', 'spearman']:
from scipy.stats import kendalltau, spearmanr
def _pearson(a, b):
return np.corrcoef(a, b)[0, 1]
def _kendall(a, b):
rs = kendalltau(a, b)
if isinstance(rs, tuple):
return rs[0]
return rs
def _spearman(a, b):
return spearmanr(a, b)[0]
_cor_methods = {
'pearson': _pearson,
'kendall': _kendall,
'spearman': _spearman
}
return _cor_methods[method]
@disallow('M8', 'm8')
def nancov(a, b, min_periods=None):
if len(a) != len(b):
raise AssertionError('Operands to nancov must have same size')
if min_periods is None:
min_periods = 1
valid = notna(a) & notna(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
return np.cov(a, b)[0, 1]
def _ensure_numeric(x):
if isinstance(x, np.ndarray):
if is_integer_dtype(x) or is_bool_dtype(x):
x = x.astype(np.float64)
elif is_object_dtype(x):
try:
x = x.astype(np.complex128)
except:
x = x.astype(np.float64)
else:
if not np.any(x.imag):
x = x.real
elif not (is_float(x) or is_integer(x) or is_complex(x)):
try:
x = float(x)
except Exception:
try:
x = complex(x)
except Exception:
raise TypeError('Could not convert {value!s} to numeric'
.format(value=x))
return x
# NA-friendly array comparisons
def make_nancomp(op):
def f(x, y):
xmask = isna(x)
ymask = isna(y)
mask = xmask | ymask
with np.errstate(all='ignore'):
result = op(x, y)
if mask.any():
if is_bool_dtype(result):
result = result.astype('O')
np.putmask(result, mask, np.nan)
return result
return f
nangt = make_nancomp(operator.gt)
nange = make_nancomp(operator.ge)
nanlt = make_nancomp(operator.lt)
nanle = make_nancomp(operator.le)
naneq = make_nancomp(operator.eq)
nanne = make_nancomp(operator.ne)
| winklerand/pandas | pandas/core/nanops.py | Python | bsd-3-clause | 24,684 |
# -*- coding: utf-8 -*-
#
# malepierre documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'malepierre'
copyright = u"2015, Eliot Berriot"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'malepierredoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'malepierre.tex',
u'malepierre Documentation',
u"Eliot Berriot", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'malepierre', u'malepierre Documentation',
[u"Eliot Berriot"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'malepierre', u'malepierre Documentation',
u"Eliot Berriot", 'malepierre',
'Warhammer campaing manager', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| EliotBerriot/malepierre | docs/conf.py | Python | bsd-3-clause | 7,814 |
#!/usr/bin/env python
from distutils.core import setup
with open('README.rst') as f:
readme = f.read()
with open('CHANGES.rst') as f:
changes = f.read()
setup(
name='sdict',
version='0.1.0',
description='dict subclass with slicing and insertion.',
author='Jared Suttles',
url='https://github.com/jaredks/sdict',
py_modules=['sdict'],
package_data={'': ['LICENSE', 'README.rst', 'CHANGES.rst']},
long_description=readme + '\n\n' + changes,
license='BSD License'
)
| jaredks/sdict | setup.py | Python | bsd-3-clause | 511 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Goal: Implement simple tasks executed during deployment with deploy.sh
#
# @authors
# Andrei Sura <[email protected]>
# Taeber Rapczak <[email protected]>
"""
Fabric deployment file.
@see
http://fabric-docs.readthedocs.org/en/latest/
http://docs.fabfile.org/en/latest/usage/fab.html#cmdoption--show
http://docs.fabfile.org/en/latest/api/core/operations.html
"""
import imp
import sys
import os.path
from fabric import colors
from fabric.api import cd
from fabric.api import env, local, lcd
from fabric.context_managers import hide, prefix, settings
from fabric.contrib.console import confirm
from fabric.contrib.files import exists, upload_template
from fabric.operations import require, run, sudo
from fabric.utils import abort
# from pprint import pprint
def help():
local('fab --list')
# =========================================================================
# Deployment repos
# =========================================================================
def load_environ(target, new_settings={}):
""" Load an environment properties file 'environ/fabric.py' """
# pprint(sys.path)
fab_conf_file = os.path.join(target, 'fabric.py')
if not os.path.isfile(fab_conf_file):
abort("Please create the '{}' file".format(fab_conf_file))
try:
fabric = imp.load_source('fabric', fab_conf_file)
except ImportError:
abort("Can't load '{}' environ; is PYTHONPATH exported?".format(target))
env.update(fabric.get_settings(new_settings))
env.environment = target
def production(new_settings={}):
"""Work on the production environment"""
load_environ('production', new_settings)
def staging(new_settings={}):
"""Work on the staging environment"""
load_environ('staging', new_settings)
def _remove_directories():
"""Remove the top project directory"""
print('\n\nRemoving directories...')
if exists('%(project_path)s' % env):
sudo('rm -rf %(project_path)s' % env)
else:
print('Path %(project_path)s does not exist' % env)
def _init_directories():
"""Create initial directories"""
# @TODO: create a backup if directory exists
print('\n\nCreating initial directories...')
_remove_directories()
sudo('mkdir -p %(project_path)s/logs' % env)
# sudo('do something as user', user=notme)
sudo('chown -R %(user)s:%(server_group)s %(project_path)s' % env)
# Let group members to delete files
sudo('chmod -R 770 %(project_path)s' % env)
def _fix_perms(folder):
""" Fixe permissions for a specified folder:
$ chgrp authorized-group some-folder
$ chmod -R g+w,o-rwx some-folder
"""
sudo('chgrp -R {} {}'.format(env.server_group, folder))
sudo('chmod -R g+sw,o-rwx {}'.format(folder))
def _init_virtualenv():
"""Create initial virtualenv"""
print('\n\nCreating virtualenv...')
run('virtualenv -p %(python)s --no-site-packages %(env_path)s' % env)
with prefix('source %(env_path)s/bin/activate' % env):
run('easy_install pip')
_fix_perms(env.env_path)
def _install_requirements():
"""Install dependencies defined in the requirements file"""
print('\n\nInstalling requirements...')
with prefix('source %(env_path)s/bin/activate' % env):
run('pip install -r '
' %(project_repo_path)s/app/requirements/deploy.txt'
% env)
_fix_perms(env.env_path)
def _update_requirements():
"""Update dependencies defined in the requirements file"""
print('\n\nUpdating requirements...')
with prefix('source %(env_path)s/bin/activate' % env):
run('pip install -U -r '
' %(project_repo_path)s/app/requirements/deploy.txt' % env)
_fix_perms(env.env_path)
def _is_prod():
""" Check if env.environment == 'production'"""
require('environment', provided_by=[production, staging])
return env.environment == 'production'
def bootstrap(tag='master'):
"""Bootstrap the deployment using the specified branch"""
require('environment', provided_by=[production, staging])
print(MOTD_PROD if _is_prod() else MOTD_STAG)
msg = colors.red('\n%(project_path)s exists. '
'Do you want to continue anyway?' % env)
if (not exists('%(project_path)s' % env)
or confirm(msg, default=False)):
with settings(hide('stdout', 'stderr')):
_init_directories()
_init_virtualenv()
_git_clone_tag(tag=tag)
_install_requirements()
update_config(tag=tag) # upload new config files
enable_site()
else:
sys.exit('\nAborting.')
def deploy(tag='master'):
"""Update the code, config, requirements, and enable the site
"""
require('environment', provided_by=[production, staging])
with settings(hide('stdout', 'stderr')):
disable_site()
_git_clone_tag(tag=tag)
_install_requirements()
_update_requirements()
update_config(tag=tag) # upload new config files
enable_site()
def mysql_conf():
""" Store mysql login credentials to the encrypted file
~/.mylogin.cnf
Once created you can connect to the database without typing the password.
Example:
$ mysql_config_editor set --login-path=local --user=root --password \
--host=localhost
$ mysql --login-path=local
For more details see:
https://dev.mysql.com/doc/refman/5.6/en/mysql-config-editor.html
"""
require('environment', provided_by=[production, staging])
print("Storing the database credentials to ~/.mylogin.cnf")
print(colors.yellow("⚠ Plese note that if you have a '#' in your password"
" then you have to specify the password in quotes."))
cmd = ("mysql_config_editor set "
" --login-path=fabric_%(db_host)s "
" --user=%(db_user)s "
" --password "
" --host=%(db_host)s"
% env)
local(cmd, capture=True)
def _mysql_login_path():
""" Create a string to be used for storing credentials to ~/.mylogin.cnf
@see #mysql_conf()
"""
require('environment', provided_by=[production, staging])
return "fabric_%(db_host)s" % env
def mysql_conf_test():
""" Check if a configuration was created for the host"""
require('environment', provided_by=[production, staging])
from subprocess import Popen, PIPE
login_path = _mysql_login_path()
cmd = ("mysql_config_editor print --login-path={} 2> /dev/null"
.format(login_path) % env)
proc = Popen(cmd, shell=True, stdout=PIPE)
(out, err) = proc.communicate()
# print("Checking mysql login path: {}".format(login_path))
has_config = ("" != out)
if not has_config:
print("There are no mysql credentials stored in ~/.mylogin.cnf file."
" Please store the database credentials by running: \n\t"
" fab {} mysql_conf".format(env.environment))
sys.exit('\nAborting.')
def mysql_check_db_exists():
""" Check if the specified database was already created """
require('environment', provided_by=[production, staging])
mysql_conf_test()
cmd = ("echo 'SELECT COUNT(*) FROM information_schema.SCHEMATA "
" WHERE SCHEMA_NAME = \"%(db_name)s\" ' "
" | mysql --login-path=fabric_%(db_host)s "
" | sort | head -1"
% env)
result = local(cmd, capture=True)
# print("check_db_exists: {}".format(result))
return result
def mysql_count_tables():
""" Return the number of tables in the database """
require('environment', provided_by=[production, staging])
exists = mysql_check_db_exists()
if not exists:
abort(colors.red("Unable to list database '%(db_name)s' tables."
"The database does not exist." % env))
login_path = _mysql_login_path()
cmd = ("echo 'SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES "
" WHERE TABLE_SCHEMA = \"%(db_name)s\" ' "
" | mysql --login-path={}"
" | sort | head -1".format(login_path)
% env)
result = local(cmd, capture=True)
return int(result)
def mysql_list_tables():
""" Show the list of tables with row counts """
require('environment', provided_by=[production, staging])
exists = mysql_check_db_exists()
if not exists:
abort(colors.red("Unable to list database '%(db_name)s' tables."
"The database does not exist." % env))
login_path = _mysql_login_path()
cmd = ("echo 'SELECT table_name, table_rows FROM INFORMATION_SCHEMA.TABLES "
" WHERE TABLE_SCHEMA = \"%(db_name)s\" ' "
" | mysql --login-path={}".format(login_path)
% env)
result = local(cmd, capture=True)
print(result)
def mysql_create_tables():
""" Create the application tables.
Assumes that the database was already created and
an user was granted `create` privileges.
"""
require('environment', provided_by=[production, staging])
exists = mysql_check_db_exists()
if not exists:
abort(colors.red("Unable to create tables in database '%(db_name)s'."
"The database does not exist" % env))
total_tables = mysql_count_tables()
if total_tables > 0:
print(colors.red("The database already contains {} tables."
.format(total_tables)))
sys.exit("If you need to re-create the tables please run: "
"\n\t fab {} mysql_reset_tables"
.format(env.environment))
login_path = _mysql_login_path()
files = ['001/upgrade.sql', '002/upgrade.sql', '002/data.sql']
with lcd('../db/'):
for sql in files:
cmd = ("mysql --login-path={} %(db_name)s < {}"
.format(login_path, sql)
% env)
local(cmd)
def mysql_drop_tables():
""" Drop the application tables"""
require('environment', provided_by=[production, staging])
total_tables = mysql_count_tables()
question = ("Do you want to drop the {} tables in '%(db_name)s'?"
.format(total_tables) % env)
if not confirm(question):
abort(colors.yellow("Aborting at user request."))
exists = mysql_check_db_exists()
if not exists:
abort(colors.red("Unable to drop tables in database '%(db_name)s'."
"The database does not exist" % env))
files = ['002/downgrade.sql', '001/downgrade.sql']
with lcd('../db/'):
for sql in files:
cmd = ("mysql --login-path=fabric_%(db_host)s %(db_name)s < {}"
.format(sql)
% env)
local(cmd)
def mysql_reset_tables():
""" Drop and re-create the application tables"""
total_tables = mysql_count_tables()
if total_tables > 0:
mysql_drop_tables()
mysql_create_tables()
def _toggle_apache_site(state):
"""Switch site's status to enabled or disabled
Note: the `project_name` is used for referencing the config files
"""
action = "Enabling" if state else "Disabling"
print('\n%s site...' % action)
env.apache_command = 'a2ensite' if state else 'a2dissite'
sudo('%(apache_command)s %(project_name)s' % env)
# We have to have the ssl config too because we use the NetScaler
sudo('%(apache_command)s %(project_name)s-ssl' % env)
sudo('service apache2 reload')
def check_syntax_apache():
"""Check the syntax of apache configurations"""
require('environment', provided_by=[production, staging])
out = sudo('apache2ctl -S')
print("\n ==> Apache syntax check: \n{}".format(out))
def show_errors_apache():
"""Show info about apache"""
require('environment', provided_by=[production, staging])
out = sudo('cat %(project_path)s/logs/error.log' % env)
print("\n ==> Apache errors: \n{}".format(out))
def show_config_apache():
"""Show info about apache"""
require('environment', provided_by=[production, staging])
out = sudo('apachectl -V')
print("\n ==> Apache config: \n{}".format(out))
out = sudo('apachectl -S 2>&1')
print("\n ==> Apache virtualhosts listening on port 443: \n{}".format(out))
# sudo('apachectl -D DUMP_MODULES')
def enable_site():
"""Enable the site"""
require('environment', provided_by=[production, staging])
with settings(hide('stdout', 'stderr')):
_toggle_apache_site(True)
def disable_site():
"""Disable the site"""
require('environment', provided_by=[production, staging])
with settings(hide('stdout', 'stderr')):
_toggle_apache_site(False)
def update_config(tag='master'):
"""Update server configuration files
Warnings:
- the CWD of the fabfile is used to specify paths
- if you use the "%(var)s/ % env" syntax make *sure*
that you provide the "var" in your fabric.py file
"""
require('environment', provided_by=[production, staging])
print('\n\nUpdating server configuration...')
local_settings_file = os.path.abspath('%(environment)s/settings.conf' % env)
local("""sed -i'.bak' -e "s|^APP_VERSION.*|APP_VERSION = '{}'|" {}"""
.format(tag, local_settings_file))
with settings(hide('stdout', 'stderr')):
# Create a map of files to upload
# https://github.com/fabric/fabric/blob/master/fabric/operations.py#put
files_map = {
0: {
'local': os.path.abspath('dropper.wsgi'),
'remote': env.wsgi_file,
'mode': '644',
},
1: {
'local': os.path.abspath('%(environment)s/virtualhost.conf'
% env),
'remote': env.vhost_file,
'mode': '644',
'group': 'root'
},
2: {
'local': os.path.abspath('%(environment)s/virtualhost-ssl.conf'
% env),
'remote': env.vhost_ssl_file,
'mode': '644',
'group': 'root'
},
3: {
'local': local_settings_file,
'remote': env.settings_file,
'mode': '640'
}
}
# print files_map
# upload files but create a bakup with *.bak extension if the
# remote file already exists
for key, file_data in files_map.iteritems():
local_file = file_data['local']
remote_file = file_data['remote']
mode = file_data['mode']
if not os.path.isfile(local_file):
abort("Please create the file: {}".format(local_file))
print('\nUploading {} \n to ==> {} with mode {}'
.format(local_file, remote_file, mode))
upload_template(filename=local_file,
destination=remote_file,
context=env,
use_sudo=True,
mirror_local_mode=False,
mode=mode,
pty=None)
if 'group' in file_data:
sudo('chgrp {} {}'.format(file_data['group'], remote_file))
print("Changed group to {} for {}"
.format(file_data['group'], remote_file))
else:
sudo('chgrp {} {}'.format(env.server_group, remote_file))
def restart_wsgi_app():
"""Reload the daemon processes by touching the WSGI file"""
require('environment', provided_by=[production, staging])
with settings(hide('stdout', 'stderr')):
sudo('touch %(wsgi_file)s' % env)
def check_app():
"""cURL the target server to check if the app is up"""
require('environment', provided_by=[production, staging])
local('curl -sk https://%(project_url)s | grep "Version " ' % env)
def print_project_repo():
""" Show the git repository path specified in the fabric.py file"""
print("\n Project repo: {}".format(env.project_repo))
def print_project_name():
""" Show the project name uses as name for deploying the code"""
print("Project name: {}".format(env.project_name))
def git_tags(url=None, last_only=False):
""" Show repo tags"""
require('environment', provided_by=[production, staging])
if url is None:
url = '%(project_repo)s' % env
cmd = ('git ls-remote --tags {} '
' | cut -d / -f3 '
' | sort -t. -k 1,1n -k 2,2n -k 3,3n '.format(url))
if last_only:
cmd += ' | tail -1'
result = local(cmd, capture=True)
return result
def _git_clone_tag(tag=None):
""" Clone a `slim` version of the code
Note: if the tag was already deployed once we create a backup
"""
require('environment', provided_by=[production, staging])
url = env.project_repo
if tag is None:
print(colors.yellow(
"No tag specified. Attempt to read the last tag from: {}"
.format(url)))
tag = git_tags(url=url, last_only=True)
if not tag:
abort(colors.red('\nPlease specify a valid tag.'))
# Clone the code to src/v0.0.1`
destination = ('%(project_path_src)s/v{}'.format(tag) % env)
cmd = ('git clone -b {} --single-branch %(project_repo)s {}'
.format(tag, destination) % env)
if exists(destination):
with cd(env.project_path_src):
cmd_mv = 'mv v{} backup_`date "+%Y-%m-%d"`_v{}'.format(tag, tag)
sudo(cmd_mv, user=env.server_user)
sudo(cmd, user=env.server_user)
_fix_perms(destination)
with cd(env.project_path_src):
# Create symlink
sudo('ln -nsf {} current'.format(destination), user=env.server_user)
def git_archive_tag():
""" Create a vTAG_NUMBER.tar archive file of the code
suitable for deployment (excludes .git folder)
Note: does not work with --remote=https://github.com/...)
"""
require('environment', provided_by=[production, staging])
last_tag = git_tags(last_only=True)
archive_name = "v{}.tar".format(last_tag)
local('git archive --format=tar --remote=. {} ../app > {}'
.format(last_tag, archive_name))
print("Created archive file: {}".format(archive_name))
# -----------------------------------------------------------------------------
MOTD_PROD = """
____ __ ____ _
| _ \ _ __ ___ _ __ _ __ ___ _ __ ____\ \ | _ \ _ __ ___ __| |
| | | | '__/ _ \| '_ \| '_ \ / _ \ '__| |_____\ \ | |_) | '__/ _ \ / _` |
| |_| | | | (_) | |_) | |_) | __/ | |_____/ / | __/| | | (_) | (_| |
|____/|_| \___/| .__/| .__/ \___|_| /_/ |_| |_| \___/ \__,_|
|_| |_|
"""
MOTD_STAG = """
____ __ ____
| _ \ _ __ ___ _ __ _ __ ___ _ __ \ \ | _ \ _____ __
| | | | '__/ _ \| '_ \| '_ \ / _ \ '__| _____\ \ | | | |/ _ \ \ / /
| |_| | | | (_) | |_) | |_) | __/ | |_____/ / | |_| | __/\ V /
|____/|_| \___/| .__/| .__/ \___|_| /_/ |____/ \___| \_/
|_| |_|
"""
| indera/redi-dropper-client | app/deploy/fabfile.py | Python | bsd-3-clause | 19,299 |
from .factory import factorize, ArgumentError, NonExistentTypeError, NonExistentModuleError | engine-cl/ng-factory | ng_factory/__init__.py | Python | bsd-3-clause | 91 |
from . import test_invoicing
| mycodeday/crm-platform | stock_dropshipping/tests/__init__.py | Python | gpl-3.0 | 29 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-12-21 01:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('simplesite', '0003_auto_20161006_2321'),
]
operations = [
migrations.AddField(
model_name='page',
name='alternative_url',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Alternative URL'),
),
]
| marsxn/simple-site | simplesite/migrations/0004_page_alternative_url.py | Python | mit | 514 |
#!/usr/bin/env python
""" A unittest script for the WgsDnaPrep module. """
import unittest
import json
from cutlass import WgsDnaPrep
from cutlass import MIMS, MimsException
from CutlassTestConfig import CutlassTestConfig
from CutlassTestUtil import CutlassTestUtil
# pylint: disable=W0703, C1801
class WgsDnaPrepTest(unittest.TestCase):
""" A unit test class for the WgsDnaPrep class. """
session = None
util = None
@classmethod
def setUpClass(cls):
""" Setup for the unittest. """
# Establish the session for each test method
cls.session = CutlassTestConfig.get_session()
cls.util = CutlassTestUtil()
def testImport(self):
""" Test the importation of the WgsDnaPrep module. """
success = False
try:
from cutlass import WgsDnaPrep
success = True
except Exception:
pass
self.failUnless(success)
self.failIf(WgsDnaPrep is None)
def testSessionCreate(self):
""" Test the creation of a WgsDnaPrep via the session. """
success = False
wgsDnaPrep = None
try:
wgsDnaPrep = self.session.create_object("wgs_dna_prep")
success = True
except Exception:
pass
self.failUnless(success)
self.failIf(wgsDnaPrep is None)
def testToJson(self):
""" Test the generation of JSON from a WgsDnaPrep instance. """
wgsDnaPrep = self.session.create_object("wgs_dna_prep")
success = False
comment = "Test comment"
wgsDnaPrep.comment = comment
wgsDnaPrep_json = None
try:
wgsDnaPrep_json = wgsDnaPrep.to_json()
success = True
except Exception:
pass
self.assertTrue(success, "Able to use 'to_json'.")
self.assertTrue(wgsDnaPrep_json is not None, "to_json() returned data.")
parse_success = False
try:
wgsDnaPrep_data = json.loads(wgsDnaPrep_json)
parse_success = True
except Exception:
pass
self.assertTrue(parse_success, "to_json() did not throw an exception.")
self.assertTrue(wgsDnaPrep_data is not None, "to_json() returned parsable JSON.")
self.assertTrue('meta' in wgsDnaPrep_data, "JSON has 'meta' key in it.")
self.assertEqual(wgsDnaPrep_data['meta']['comment'],
comment, "'comment' in JSON had expected value.")
def testId(self):
""" Test the id property. """
prep = self.session.create_object("wgs_dna_prep")
self.assertTrue(prep.id is None,
"New template WgsDnaPrep has no ID.")
with self.assertRaises(AttributeError):
prep.id = "test"
def testVersion(self):
""" Test the version property. """
prep = self.session.create_object("wgs_dna_prep")
self.assertTrue(prep.version is None,
"New template WgsDnaPrep has no version.")
with self.assertRaises(ValueError):
prep.version = "test"
def testComment(self):
""" Test the comment property. """
prep = self.session.create_object("wgs_dna_prep")
self.util.stringTypeTest(self, prep, "comment")
self.util.stringPropertyTest(self, prep, "comment")
def testFragSize(self):
""" Test the frag_size property. """
prep = self.session.create_object("wgs_dna_prep")
self.util.intTypeTest(self, prep, "frag_size")
self.util.intPropertyTest(self, prep, "frag_size")
def testFragSizeNegative(self):
""" Test the frag_size property with an illegal negative value. """
prep = self.session.create_object("wgs_dna_prep")
with self.assertRaises(Exception):
prep.frag_size = -1
def testLibLayout(self):
""" Test the lib_layout property. """
prep = self.session.create_object("wgs_dna_prep")
self.util.stringTypeTest(self, prep, "lib_layout")
self.util.stringPropertyTest(self, prep, "lib_layout")
def testLibSelection(self):
""" Test the lib_selection property. """
prep = self.session.create_object("wgs_dna_prep")
self.util.stringTypeTest(self, prep, "lib_selection")
self.util.stringPropertyTest(self, prep, "lib_selection")
def testNCBITaxonID(self):
""" Test the ncbi_taxon_id property. """
prep = self.session.create_object("wgs_dna_prep")
self.util.stringTypeTest(self, prep, "ncbi_taxon_id")
self.util.stringPropertyTest(self, prep, "ncbi_taxon_id")
def testPrepID(self):
""" Test the prep_id property. """
prep = self.session.create_object("wgs_dna_prep")
self.util.stringTypeTest(self, prep, "prep_id")
self.util.stringPropertyTest(self, prep, "prep_id")
def testSequencingCenter(self):
""" Test the sequencing_center property. """
prep = self.session.create_object("wgs_dna_prep")
self.util.stringTypeTest(self, prep, "sequencing_center")
self.util.stringPropertyTest(self, prep, "sequencing_center")
def testSequencingContact(self):
""" Test the sequencing_contact property. """
prep = self.session.create_object("wgs_dna_prep")
self.util.stringTypeTest(self, prep, "sequencing_contact")
self.util.stringPropertyTest(self, prep, "sequencing_contact")
def testSRSID(self):
""" Test the srs_id property. """
prep = self.session.create_object("wgs_dna_prep")
self.util.stringTypeTest(self, prep, "srs_id")
self.util.stringPropertyTest(self, prep, "srs_id")
def testStorageDuration(self):
""" Test the storage_duration property. """
prep = self.session.create_object("wgs_dna_prep")
self.util.intTypeTest(self, prep, "storage_duration")
self.util.intPropertyTest(self, prep, "storage_duration")
def testStorageDurationNegative(self):
""" Test the storage_duration property with an illegal negative value. """
prep = self.session.create_object("wgs_dna_prep")
with self.assertRaises(Exception):
prep.storage_duration = -1
def testTags(self):
""" Test the tags property. """
prep = self.session.create_object("wgs_dna_prep")
tags = prep.tags
self.assertTrue(type(tags) == list, "WgsDnaPrep tags() method returns a list.")
self.assertEqual(len(tags), 0, "Template wgsDnaPrep tags list is empty.")
new_tags = ["tagA", "tagB"]
prep.tags = new_tags
self.assertEqual(prep.tags, new_tags, "Can set tags on a WgsDnaPrep.")
json_str = prep.to_json()
doc = json.loads(json_str)
self.assertTrue('tags' in doc['meta'],
"JSON representation has 'tags' field in 'meta'.")
self.assertEqual(doc['meta']['tags'], new_tags,
"JSON representation had correct tags after setter.")
def testAddTag(self):
""" Test the add_tag() method. """
prep = self.session.create_object("wgs_dna_prep")
prep.add_tag("test")
self.assertEqual(prep.tags, ["test"], "Can add a tag to a wgsDnaPrep.")
json_str = prep.to_json()
doc = json.loads(json_str)
self.assertEqual(doc['meta']['tags'], ["test"],
"JSON representation had correct tags after add_tag().")
# Try adding the same tag yet again, shouldn't get a duplicate
with self.assertRaises(ValueError):
prep.add_tag("test")
json_str = prep.to_json()
doc2 = json.loads(json_str)
self.assertEqual(doc2['meta']['tags'], ["test"],
"JSON document did not end up with duplicate tags.")
def testMims(self):
""" Test the mims property. """
wgsDnaPrep = self.session.create_object("wgs_dna_prep")
self.assertTrue(wgsDnaPrep.mims is None,
"New template wgsDnaPrep has no MIMS data.")
invalid_test_mims = {
"a": 1,
"b": 2
}
with self.assertRaises(MimsException):
wgsDnaPrep.mims = invalid_test_mims
self.assertTrue(wgsDnaPrep.mims is None,
"Template wgsDnaPrep has no MIMS after invalid set attempt.")
valid_mims = {
"adapters": "test_adapters",
"annot_source": "test_annot_source",
"assembly": "test_assembly",
"assembly_name": "test_assembly_name",
"biome": "test_biome",
"collection_date": "test_collection_date",
"env_package": "test_env_package",
"extrachrom_elements": "test_extrachrom_elements",
"encoded_traits": "test_encoded_traits",
"experimental_factor": "test_experimental_factor",
"feature": "test_feature",
"findex": "test_findex",
"finishing_strategy": "test_finishing_strategy",
"geo_loc_name": "test_geo_loc_name",
"investigation_type": "test_investigation_type",
"lat_lon": "test_lat_long",
"lib_const_meth": "test_lib_const_meth",
"lib_reads_seqd": "test_lib_reads_seqd",
"lib_screen": "test_lib_screen",
"lib_size": 2000,
"lib_vector": "test_lib_vector",
"material": "test_material",
"nucl_acid_amp": "test_nucl_acid_amp",
"nucl_acid_ext": "test_nucl_acid_ext",
"project_name": "test_project_name",
"rel_to_oxygen": "test_rel_to_oxygen",
"rindex": "test_rindex",
"samp_collect_device": "test_samp_collect_device",
"samp_mat_process": "test_samp_map_process",
"samp_size": "test_samp_size",
"seq_meth": "test_seq_meth",
"sop": ["a", "b", "c"],
"source_mat_id": ["a", "b", "c"],
"submitted_to_insdc": True,
"url": ["a", "b", "c"]
}
# Assume failure
success = False
try:
wgsDnaPrep.mims = valid_mims
success = True
except Exception:
pass
self.assertTrue(success, "Valid MIMS data does not raise exception.")
self.assertTrue(wgsDnaPrep.mims is not None, "mims getter retrieves data.")
biome = wgsDnaPrep.mims['biome']
self.assertEqual(biome, valid_mims["biome"],
"Retrieved MIMS data appears to be okay.")
def testRequiredFields(self):
""" Test the required_fields() static method. """
required = WgsDnaPrep.required_fields()
self.assertEqual(type(required), tuple,
"required_fields() returns a tuple.")
self.assertTrue(len(required) > 0,
"required_field() did not return empty value.")
def testLoadSaveDeleteWgsDnaPrep(self):
""" Extensive test for the load, edit, save and delete functions. """
# attempt to save the prep at all points before and after adding
# the required fields
prep = self.session.create_object("wgs_dna_prep")
test_comment = "Test comment"
frag_size = 10
lib_layout = "asdfads"
lib_selection = "asdfhewofue"
mims = {
"adapters": "test_adapters",
"annot_source": "test_annot_source",
"assembly": "test_assembly",
"assembly_name": "test_assembly_name",
"biome": "test_biome",
"collection_date": "test_collection_date",
"env_package": "test_env_package",
"extrachrom_elements": "test_extrachrom_elements",
"encoded_traits": "test_encoded_traits",
"experimental_factor": "test_experimental_factor",
"feature": "test_feature",
"findex": "test_findex",
"finishing_strategy": "test_finishing_strategy",
"geo_loc_name": "test_geo_loc_name",
"investigation_type": "test_investigation_type",
"lat_lon": "test_lat_long",
"lib_const_meth": "test_lib_const_meth",
"lib_reads_seqd": "test_lib_reads_seqd",
"lib_screen": "test_lib_screen",
"lib_size": 2000,
"lib_vector": "test_lib_vector",
"material": "test_material",
"nucl_acid_amp": "test_nucl_acid_amp",
"nucl_acid_ext": "test_nucl_acid_ext",
"project_name": "test_project_name",
"rel_to_oxygen": "test_rel_to_oxygen",
"rindex": "test_rindex",
"samp_collect_device": "test_samp_collect_device",
"samp_mat_process": "test_samp_map_process",
"samp_size": "test_samp_size",
"seq_meth": "test_seq_meth",
"sop": ["a", "b", "c"],
"source_mat_id": ["a", "b", "c"],
"submitted_to_insdc": True,
"url": ["a", "b", "c"]
}
ncbi_taxon_id = "sadfadsfawefw"
prep_id = "asdsadewqrewq"
sequencing_center = "center for sequencing"
sequencing_contact = "me right now"
srs_id = "the id for the srs"
storage_duration = 10
test_links = {"prepared_from": []}
self.assertFalse(prep.save(), "WgsDnaPrep not saved successfully, no required fields")
prep.comment = test_comment
self.assertFalse(prep.save(), "WgsDnaPrep not saved successfully")
prep.frag_size = frag_size
self.assertFalse(prep.save(), "WgsDnaPrep not saved successfully")
prep.links = test_links
self.assertFalse(prep.save(), "WgsDnaPrep not saved successfully")
prep.lib_layout = lib_layout
prep.lib_selection = lib_selection
prep.mims = mims
prep.ncbi_taxon_id = ncbi_taxon_id
prep.prep_id = prep_id
prep.sequencing_center = sequencing_center
prep.sequencing_contact = sequencing_contact
prep.srs_id = srs_id
prep.storage_duration = storage_duration
# make sure prep does not delete if it does not exist
with self.assertRaises(Exception):
prep.delete()
self.assertTrue(prep.save() is True, "WgsDnaPrep was not saved successfully")
# load the prep that was just saved from the OSDF instance
prep_loaded = self.session.create_object("wgs_dna_prep")
prep_loaded = prep_loaded.load(prep.id)
# check all fields were saved and loaded successfully
self.assertEqual(prep.comment,
prep_loaded.comment,
"WgsDnaPrep comment not saved & loaded successfully")
self.assertEqual(prep.mims["biome"],
prep_loaded.mims["biome"],
"WgsDnaPrep mims not saved & loaded successfully")
# prep is deleted successfully
self.assertTrue(prep.delete(), "WgsDnaPrep was not deleted successfully")
# the prep of the initial ID should not load successfully
load_test = self.session.create_object("wgs_dna_prep")
with self.assertRaises(Exception):
load_test = load_test.load(prep.id)
if __name__ == '__main__':
unittest.main()
| ihmpdcc/cutlass | tests/test_wgs_dna_prep.py | Python | mit | 15,332 |
# -*- coding: utf-8 -*-
'''
# Copyright (c) 2015 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..model.item_delta_collection_page import ItemDeltaCollectionPage
from ..request.items_collection import ItemsCollectionResponse
class ItemDeltaCollectionResponse(ItemsCollectionResponse):
@property
def collection_page(self):
"""The collection page stored in the response JSON
Returns:
:class:`ItemDeltaCollectionPage<onedrivesdk.request.item_delta_collection.ItemDeltaCollectionPage>`:
The collection page
"""
token = self._prop_dict["@delta.token"] if "@delta.token" in self._prop_dict else None
delta_link = self._prop_dict["@odata.deltaLink"] if "@odata.deltaLink" in self._prop_dict else None
next_page_link = self._prop_dict["@odata.nextLink"] if "@odata.nextLink" in self._prop_dict else None
if self._collection_page:
self._collection_page._prop_list = self._prop_dict["value"]
self._collection_page._token = token
self._collection_page._delta_link = delta_link
self._collection_page._next_page_link = next_page_link
else:
self._collection_page = ItemDeltaCollectionPage(self._prop_dict["value"],
token,
delta_link,
next_page_link)
return self._collection_page
from ..request.item_delta import ItemDeltaRequest | OneDrive/onedrive-sdk-python | src/python3/request/item_delta_collection.py | Python | mit | 2,730 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/military/shared_military_outpost_guard_house_imperial.iff"
result.attribute_template_id = -1
result.stfName("building_name","military_guard_tower_1")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/building/military/shared_military_outpost_guard_house_imperial.py | Python | mit | 486 |
from __future__ import division, print_function
import myhdl
from myhdl import intbv, instance, delay
from rhea.system import Barebone
from . import SPIBus
@myhdl.block
def spi_controller_model(clock, ibus, spibus):
"""A model of an SPI controller
Arguments:
ibus (Barebone): internal bus
spibus (SPIBus): SPI interface (SPIBus)
"""
assert isinstance(ibus, Barebone)
assert isinstance(spibus, SPIBus)
@instance
def decode_ibus():
while True:
yield clock.posedge
if ibus.write:
yield spibus.writeread(ibus.get_write_data())
yield ibus.acktrans(spibus.get_read_data())
elif ibus.read:
yield spibus.writeread(0x55) # dummy write byte
yield ibus.acktrans(spibus.get_read_data())
return decode_ibus
class SPISlave(object):
def __init__(self):
self.reg = intbv(0)[8:]
@myhdl.block
def process(self, spibus):
sck, mosi, miso, csn = spibus()
@instance
def gproc():
while True:
yield csn.negedge
bcnt = 8
while not csn:
if bcnt > 0:
miso.next = self.reg[bcnt-1]
yield sck.posedge
bcnt -= bcnt
self.reg[bcnt] = mosi
else:
yield delay(10)
return gproc
| cfelton/rhea | rhea/cores/spi/spi_models.py | Python | mit | 1,479 |
items_found_can_i_use = None
can_i_use_file = None
can_i_use_popup_is_showing = False
can_i_use_list_from_main_menu = False
path_to_can_i_use_data = os.path.join(H_SETTINGS_FOLDER, "can_i_use", "can_i_use_data.json")
path_to_test_can_i_use_data = os.path.join(H_SETTINGS_FOLDER, "can_i_use", "can_i_use_data2.json")
url_can_i_use_json_data = "https://raw.githubusercontent.com/Fyrd/caniuse/master/data.json"
can_i_use_css = ""
with open(os.path.join(H_SETTINGS_FOLDER, "can_i_use", "style.css")) as css_file:
can_i_use_css = "<style>"+css_file.read()+"</style>"
def donwload_can_i_use_json_data() :
global can_i_use_file
if os.path.isfile(path_to_can_i_use_data) :
with open(path_to_can_i_use_data) as json_file:
try :
can_i_use_file = json.load(json_file)
except Exception as e :
print("Error: "+traceback.format_exc())
sublime.active_window().status_message("Can't use \"Can I use\" json data from: https://raw.githubusercontent.com/Fyrd/caniuse/master/data.json")
if Util.download_and_save(url_can_i_use_json_data, path_to_test_can_i_use_data) :
if os.path.isfile(path_to_can_i_use_data) :
if not Util.checksum_sha1_equalcompare(path_to_can_i_use_data, path_to_test_can_i_use_data) :
with open(path_to_test_can_i_use_data) as json_file:
try :
can_i_use_file = json.load(json_file)
if os.path.isfile(path_to_can_i_use_data) :
os.remove(path_to_can_i_use_data)
os.rename(path_to_test_can_i_use_data, path_to_can_i_use_data)
except Exception as e :
print("Error: "+traceback.format_exc())
sublime.active_window().status_message("Can't use new \"Can I use\" json data from: https://raw.githubusercontent.com/Fyrd/caniuse/master/data.json")
if os.path.isfile(path_to_test_can_i_use_data) :
os.remove(path_to_test_can_i_use_data)
else :
os.rename(path_to_test_can_i_use_data, path_to_can_i_use_data)
with open(path_to_can_i_use_data) as json_file :
try :
can_i_use_file = json.load(json_file)
except Exception as e :
print("Error: "+traceback.format_exc())
sublime.active_window().status_message("Can't use \"Can I use\" json data from: https://raw.githubusercontent.com/Fyrd/caniuse/master/data.json")
Util.create_and_start_thread(donwload_can_i_use_json_data, "DownloadCanIuseJsonData")
def find_in_can_i_use(word) :
global can_i_use_file
can_i_use_data = can_i_use_file.get("data")
word = word.lower()
return [value for key, value in can_i_use_data.items() if value["title"].lower().find(word) >= 0]
def back_to_can_i_use_list(action):
global can_i_use_popup_is_showing
if action.find("http") >= 0:
webbrowser.open(action)
return
view = sublime.active_window().active_view()
can_i_use_popup_is_showing = False
view.hide_popup()
if len(action.split(",")) > 1 and action.split(",")[1] == "main-menu" :
view.run_command("can_i_use", args={"from": "main-menu"})
else :
view.run_command("can_i_use")
def show_pop_can_i_use(index):
global can_i_use_file
global items_found_can_i_use
global can_i_use_popup_is_showing
if index < 0:
return
item = items_found_can_i_use[index]
browser_accepted = ["ie", "edge", "firefox", "chrome", "safari", "opera", "ios_saf", "op_mini", "android", "and_chr"]
browser_name = [
" IE",
" EDGE",
" Firefox",
" Chrome",
" Safari",
" Opera",
" iOS Safari",
" Opera Mini",
" Android Browser",
"Chrome for Android"
]
html_browser = ""
html_browser += "<div>"
html_browser += "<h1 class=\"title\">"+cgi.escape(item["title"])+" <a href=\""+item["spec"].replace(" ", "%20")+"\"><span class=\"status "+item["status"]+"\"> - "+item["status"].upper()+"</span></a></h1>"
html_browser += "<p class=\"description\">"+cgi.escape(item["description"])+"</p>"
html_browser += "<p class=\"\"><span class=\"support\">Global Support: <span class=\"support-y\">"+str(item["usage_perc_y"])+"%</span>"+( " + <span class=\"support-a\">"+str(item["usage_perc_a"])+"%</span> = " if float(item["usage_perc_a"]) > 0 else "" )+( "<span class=\"support-total\">"+str( "{:10.2f}".format(float(item["usage_perc_y"]) + float(item["usage_perc_a"])) )+"%</span>" if float(item["usage_perc_a"]) > 0 else "" )+"</span> "+( " ".join(["<span class=\"category\">"+category+"</span>" for category in item["categories"]]) )+"</p>"
html_browser += "</div>"
html_browser += "<div class=\"container-browser-list\">"
i = 0
for browser in browser_accepted :
browser_versions = can_i_use_file["agents"]
stat = item["stats"].get(browser)
stat_items_ordered = list()
for k in stat.keys() :
if k != "TP" :
stat_items_ordered.append(k)
if len(stat_items_ordered) >= 1 and stat_items_ordered[0] != "all" :
stat_items_ordered.sort(key=LooseVersion)
stat_items_ordered = stat_items_ordered[::-1]
html_p = "<p class=\"version-stat-item\"><span class=\"browser-name\">"+browser_name[i]+"</span> : "
j = 0
while j < len(stat_items_ordered) :
if j == 7:
break
class_name = stat.get(stat_items_ordered[j])
html_annotation_numbers = ""
requires_prefix = ""
can_be_enabled = ""
if re.search(r"\bx\b", class_name) :
requires_prefix = "x"
if re.search(r"\bd\b", class_name) :
can_be_enabled = "d"
if class_name.find("#") >= 0 :
numbers = class_name[class_name.find("#"):].strip().split(" ")
for number in numbers :
number = int(number.replace("#", ""))
html_annotation_numbers += "<span class=\"annotation-number\">"+str(number)+"</span>"
html_p += "<span class=\"version-stat "+stat.get(stat_items_ordered[j])+" \">"+( html_annotation_numbers if html_annotation_numbers else "" )+stat_items_ordered[j]+( "<span class=\"can-be-enabled\"> </span>" if can_be_enabled else "" )+( "<span class=\"requires-prefix\"> </span>" if requires_prefix else "" )+"</span> "
j = j + 1
html_p += "</p>"
html_browser += html_p
i = i + 1
html_browser += "</div>"
if item["notes_by_num"] :
html_browser += "<div>"
html_browser += "<h3>Notes</h3>"
notes_by_num = item["notes_by_num"]
notes_by_num_ordered = list()
for k in notes_by_num.keys() :
notes_by_num_ordered.append(k)
notes_by_num_ordered.sort()
i = 0
while i < len(notes_by_num_ordered) :
note = notes_by_num.get(notes_by_num_ordered[i])
html_p = "<p class=\"note\"><span class=\"annotation-number\">"+str(notes_by_num_ordered[i])+"</span>"+cgi.escape(note)+"</p>"
html_browser += html_p
i = i + 1
html_browser += "</div>"
if item["links"] :
html_browser += "<div>"
html_browser += "<h3>Links</h3>"
links = item["links"]
for link in links :
html_p = "<p class=\"link\"><a href=\""+link.get("url")+"\">"+cgi.escape(link.get("title"))+"</a></p>"
html_browser += html_p
html_browser += "</div>"
view = sublime.active_window().active_view()
can_i_use_popup_is_showing = True
view.show_popup("""
<html>
<head></head>
<body>
"""+can_i_use_css+"""
<div class=\"container-back-button\">
<a class=\"back-button\" href=\"back"""+( ",main-menu" if can_i_use_list_from_main_menu else "")+"""\">< Back</a>
<a class=\"view-on-site\" href=\"http://caniuse.com/#search="""+item["title"].replace(" ", "%20")+"""\"># View on \"Can I use\" site #</a>
</div>
<div class=\"content\">
"""+html_browser+"""
<div class=\"legend\">
<h3>Legend</h3>
<div class=\"container-legend-items\">
<span class=\"legend-item y\"> </span> = Supported
<span class=\"legend-item n\"> </span> = Not Supported
<span class=\"legend-item p a\"> </span> = Partial support
<span class=\"legend-item u\"> </span> = Support unknown
<span class=\"legend-item requires-prefix\"> </span> = Requires Prefix
<span class=\"legend-item can-be-enabled\"> </span> = Can Be Enabled
</div>
</div>
</div>
</body>
</html>""", sublime.COOPERATE_WITH_AUTO_COMPLETE, -1, 1250, 650, back_to_can_i_use_list)
class can_i_useCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
global items_found_can_i_use
global can_i_use_file
global can_i_use_list_from_main_menu
can_i_use_data = can_i_use_file.get("data")
if not can_i_use_data :
return
view = self.view
selection = view.sel()[0]
if args.get("from") != "main-menu" :
can_i_use_list_from_main_menu = False
word = view.substr(view.word(selection)).strip()
items_found_can_i_use = find_in_can_i_use(word)
sublime.active_window().show_quick_panel([item["title"] for item in items_found_can_i_use], show_pop_can_i_use)
else :
can_i_use_list_from_main_menu = True
items_found_can_i_use = find_in_can_i_use("")
sublime.active_window().show_quick_panel([item["title"] for item in items_found_can_i_use], show_pop_can_i_use)
def is_enabled(self, **args):
view = self.view
if args.get("from") == "main-menu" or javascriptCompletions.get("enable_can_i_use_menu_option") :
return True
return False
def is_visible(self, **args):
view = self.view
if args.get("from") == "main-menu" :
return True
if javascriptCompletions.get("enable_can_i_use_menu_option") :
if Util.split_string_and_find_on_multiple(view.scope_name(0), ["source.js", "text.html.basic", "source.css"]) < 0 :
return False
return True
return False
class can_i_use_hide_popupEventListener(sublime_plugin.EventListener):
def on_modified_async(self, view) :
global can_i_use_popup_is_showing
if can_i_use_popup_is_showing :
view.hide_popup()
can_i_use_popup_is_showing = False | pichillilorenzo/JavaScript-Completions | helper/can_i_use/can_i_use_command.py | Python | mit | 10,624 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Minimal Flask application example for development.
SPHINX-START
Run example development server:
.. code-block:: console
$ pip install -e .[all]
$ cd examples
$ ./app-setup.sh
$ python app.py
Open the schema from web:
.. code-block:: console
$ curl http://localhost:5000/schemas/record_schema.json
$ curl http://localhost:5000/schemas/biology/animal_record_schema.json
Teardown the application:
.. code-block:: console
$ ./app-teardown.sh
SPHINX-END
"""
from __future__ import absolute_import, print_function
import json
from flask import Flask
from invenio_jsonschemas import InvenioJSONSchemas
# Create Flask application
app = Flask(__name__)
# set the endpoint serving the JSON schemas
app.config['JSONSCHEMAS_ENDPOINT'] = '/schemas'
# Initialize the application with the InvenioJSONSchema extension.
# This registers the jsonschemas from examples/samplepkg/jsonschemas as
# samplepkg's setup.py has the "invenio_jsonschemas.schemas" entrypoint.
ext = InvenioJSONSchemas(app)
# list all registered schemas
print('SCHEMAS >> {}'.format(ext.list_schemas()))
for schema in ext.list_schemas():
print('=' * 50)
print('SCHEMA {}'.format(schema))
# retrieve the schema content
print(json.dumps(ext.get_schema(schema), indent=4))
# InvenioJSONSchemas registers a blueprint serving the JSON schemas
print('>> You can retrieve the schemas using the url in their "id".')
if __name__ == "__main__":
app.run()
| inveniosoftware/invenio-jsonschemas | examples/app.py | Python | mit | 1,706 |
import cnn
import numpy as np
if __name__ == '__main__':
#import input_data
import random
from PIL import Image
#mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
ocr = cnn.CNN()
ocr.build()
#ocr.predict()
show_image2 = Image.open('G:/Users/kakoi/Desktop/无标题.bmp')
show_array2 = np.asarray(show_image2).flatten()
input_image2 = show_array2 / 255
print(ocr.read(input_image2))
#input_image = mnist.test.images[random.randrange(0, 100)]
#show_image = input_image*255
#im = Image.fromarray(show_image.reshape([28,28]))
#im.show()
#print(ocr.read(input_image))
| kelifrisk/justforfun | python/cnn/mnist_resnet/main.py | Python | mit | 672 |
# coding=utf-8
from __future__ import unicode_literals
from .. import Provider as SsnProvider
import random
class Provider(SsnProvider):
#in order to create a valid SIN we need to provide a number that passes a simple modified Luhn Algorithmn checksum
#this function essentially reverses the checksum steps to create a random valid SIN (Social Insurance Number)
@classmethod
def ssn(cls):
#create an array of 8 elements initialized randomly
digits = random.sample(range(10), 8)
# All of the digits must sum to a multiple of 10.
# sum the first 8 and set 9th to the value to get to a multiple of 10
digits.append(10 - (sum(digits) % 10))
#digits is now the digital root of the number we want multiplied by the magic number 121 212 121
#reverse the multiplication which occurred on every other element
for i in range(1, len(digits), 2):
if digits[i] % 2 == 0:
digits[i] = (digits[i] / 2)
else:
digits[i] = (digits[i] + 9) / 2
#build the resulting SIN string
sin = ""
for i in range(0, len(digits), 1):
sin += str(digits[i])
#add a space to make it conform to normal standards in Canada
if i % 3 == 2:
sin += " "
#finally return our random but valid SIN
return sin
| venmo/faker | faker/providers/ssn/en_CA/__init__.py | Python | mit | 1,457 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'UserRole.role'
db.alter_column(u'ecg_balancing_userrole', 'role', self.gf('django.db.models.fields.CharField')(max_length=10))
def backwards(self, orm):
# Changing field 'UserRole.role'
db.alter_column(u'ecg_balancing_userrole', 'role', self.gf('django.db.models.fields.CharField')(max_length=5))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ecg_balancing.company': {
'Meta': {'object_name': 'Company'},
'activities': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'employees_number': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'foundation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'industry': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'managing_directors': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'model_creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owners': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'revenue': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'zipcode': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'ecg_balancing.companybalance': {
'Meta': {'object_name': 'CompanyBalance'},
'auditor': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'common_good': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'company': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'balance'", 'to': u"orm['ecg_balancing.Company']"}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_communication': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'matrix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'company_balances'", 'to': u"orm['ecg_balancing.ECGMatrix']"}),
'number_participated_employees': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'peer_companies': ('django.db.models.fields.related.ManyToManyField', [], {'max_length': '255', 'to': u"orm['ecg_balancing.Company']", 'null': 'True', 'symmetrical': 'False', 'blank': 'True'}),
'points': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'max_length': '4'}),
'process_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'prospect': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'worked_hours': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.SmallIntegerField', [], {'max_length': '4'})
},
u'ecg_balancing.companybalanceindicator': {
'Meta': {'object_name': 'CompanyBalanceIndicator'},
'company_balance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'company_balance'", 'to': u"orm['ecg_balancing.CompanyBalance']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'evaluation': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'company_balance'", 'to': u"orm['ecg_balancing.Indicator']"})
},
u'ecg_balancing.ecgmatrix': {
'Meta': {'object_name': 'ECGMatrix'},
'contact': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'default': "u'4.1'", 'max_length': '6'})
},
u'ecg_balancing.feedbackindicator': {
'Meta': {'object_name': 'FeedbackIndicator'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'feedback'", 'to': u"orm['ecg_balancing.Indicator']"}),
'message': ('django.db.models.fields.TextField', [], {}),
'receiver_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'receiver_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'sender_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'sender_name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'ecg_balancing.indicator': {
'Meta': {'object_name': 'Indicator'},
'contact': ('ecg_balancing.fields.CommaSeparatedEmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'ecg_value': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'editor': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'matrix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'indicators'", 'to': u"orm['ecg_balancing.ECGMatrix']"}),
'max_evaluation': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'parent_indicator'", 'null': 'True', 'to': u"orm['ecg_balancing.Indicator']"}),
'relevance': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'sole_proprietorship': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'stakeholder': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'subindicator_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'ecg_balancing.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'ecg_balancing.userrole': {
'Meta': {'object_name': 'UserRole'},
'company': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ecg_balancing.Company']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'role'", 'to': u"orm['auth.User']"})
}
}
complete_apps = ['ecg_balancing'] | sinnwerkstatt/ecg-balancing | ecg_balancing/migrations/0030_auto__chg_field_userrole_role.py | Python | mit | 12,407 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_tatooine_tour_aryon.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_base_female")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | obi-two/Rebelion | data/scripts/templates/object/mobile/shared_dressed_tatooine_tour_aryon.py | Python | mit | 454 |
from django.core.exceptions import ImproperlyConfigured
from django.db.models import get_model
class TimePeriodMixin(object):
'''Will add the currently active time period to the template context
or False if no active time period.
Configuration:
`time_period_model`: The model class that implements TimePeriodBase.
`time_period_queryset`: If not set TimePeriod.current is used
In Django 1.5 and above:
If the app that implements `TimePeriod` is the same as the one the
current request is on and that app's urls has `app_name` configured
in `urls.py` model can be automatically found.
I.e.: url(r'^test/', include('test_app.urls', namespace='test',
app_name='test_app')),
Raises:
ImproperlyConfigured: If no model has been defined
'''
_model = None
time_period_model = None
time_period_queryset = None
def get_time_period_model(self):
if self._model: return self._model
model = self.time_period_model
if model is None:
if hasattr(self.request, 'resolver_match'):
model = get_model(self.request.resolver_match.app_name,
'TimePeriod')
if not model:
raise ImproperlyConfigured(
'`time_period_model` is not set for TimePeriod.'
)
self._model = model
return model
def get_time_period_queryset(self):
if self.time_period_queryset is None:
model = self.get_time_period_model()
return model.current
else:
return self.time_period_queryset
def get_time_period(self):
model = self.get_time_period_model()
queryset = self.get_time_period_queryset()
try:
return queryset.get()
except model.DoesNotExist:
return False
def get_context_data(self, **kwargs):
context = super(TimePeriodMixin, self).get_context_data(**kwargs)
context['time_period'] = self.get_time_period()
return context
| Mercy-Nekesa/sokoapp | sokoapp/contests/views.py | Python | mit | 2,091 |
# -*- coding: utf-8 -*-
import sys
from influxdb import chunked_json
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
class TestChunkJson(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestChunkJson, cls).setUpClass()
def test_load(self):
"""
Tests reading a sequence of JSON values from a string
"""
example_response = \
'{"results": [{"series": [{"measurement": "sdfsdfsdf", ' \
'"columns": ["time", "value"], "values": ' \
'[["2009-11-10T23:00:00Z", 0.64]]}]}, {"series": ' \
'[{"measurement": "cpu_load_short", "columns": ["time", "value"],'\
'"values": [["2009-11-10T23:00:00Z", 0.64]]}]}]}'
res = list(chunked_json.loads(example_response))
# import ipdb; ipdb.set_trace()
# self.assertTrue(res)
self.assertListEqual(
[
{
'results': [
{'series': [{
'values': [['2009-11-10T23:00:00Z', 0.64]],
'measurement': 'sdfsdfsdf',
'columns':
['time', 'value']}]},
{'series': [{
'values': [['2009-11-10T23:00:00Z', 0.64]],
'measurement': 'cpu_load_short',
'columns': ['time', 'value']}]}
]
}
],
res
)
| georgijd/influxdb-python | influxdb/tests/chunked_json_test.py | Python | mit | 1,565 |
from nose.tools import raises
from tests.test_stack import TestConfig, app_from_config
from tg.util import no_warn
from tg.configuration import config
from tg.configuration import milestones
from tg.decorators import Decoration, decode_params
import tg
import json
def make_app():
base_config = TestConfig(folder = 'rendering',
values = {'use_sqlalchemy': False,
'use_legacy_renderer': False,
# this is specific to mako
# to make sure inheritance works
'use_dotted_templatenames': False,
'use_toscawidgets': False,
'use_toscawidgets2': False
}
)
return app_from_config(base_config)
class TestTGController(object):
def setup(self):
self.app = make_app()
def test_simple_jsonification(self):
resp = self.app.get('/j/json')
expected = {"a": "hello world", "b": True}
assert expected == resp.json_body
def test_multi_dispatch_json(self):
resp = self.app.get('/j/xml_or_json', headers={'accept':'application/json'})
assert '''"status": "missing"''' in resp
assert '''"name": "John Carter"''' in resp
assert '''"title": "officer"''' in resp
def test_json_with_object(self):
resp = self.app.get('/j/json_with_object')
assert '''"Json": "Rocks"''' in str(resp.body)
@no_warn
def test_json_with_bad_object(self):
try:
resp = self.app.get('/j/json_with_bad_object')
assert False
except Exception as e:
assert "is not JSON serializable" in str(e), str(e)
def test_multiple_engines(self):
default_renderer = config['default_renderer']
resp = self.app.get('/multiple_engines')
assert default_renderer in resp, resp
def test_decode_params_json(self):
params = {'name': 'Name', 'surname': 'Surname'}
resp = self.app.post_json('/echo_json', params)
assert resp.json_body == params
@raises(ValueError)
def test_decode_params_notjson(self):
@decode_params('xml')
def _fakefunc():
pass
class TestExposeInheritance(object):
def setup(self):
self.app = make_app()
def test_inherited_expose_template(self):
resp1 = self.app.get('/sub1/index')
resp2 = self.app.get('/sub2/index')
assert resp1.body == resp2.body
def test_inherited_expose_override(self):
resp1 = self.app.get('/sub1/index_override')
resp2 = self.app.get('/sub2/index_override')
assert resp1.body != resp2.body
def test_inherited_expose_hooks(self):
resp1 = self.app.get('/sub1/data')
assert ('"v"' in resp1 and '"parent_value"' in resp1)
resp2 = self.app.get('/sub2/data')
assert ('"v"' in resp2 and '"parent_value"' in resp2 and '"child_value"' in resp2)
class TestExposeLazyInheritance(object):
def test_lazy_inheritance(self):
milestones.renderers_ready._reset()
class BaseController(tg.TGController):
@tg.expose('template.html')
def func(self):
pass
class SubController(BaseController):
@tg.expose(inherit=True)
def func(self):
pass
milestones.renderers_ready.reach()
deco = Decoration.get_decoration(SubController.func)
assert len(deco.engines) == 1, deco.engines
assert deco.engines['text/html'][1] == 'template.html', deco.engines
def test_lazy_inheritance_with_template(self):
milestones.renderers_ready._reset()
class BaseController(tg.TGController):
@tg.expose('template.html')
def func(self):
pass
class SubController(BaseController):
@tg.expose('new_template.html', inherit=True)
def func(self):
pass
milestones.renderers_ready.reach()
deco = Decoration.get_decoration(SubController.func)
assert len(deco.engines) == 1, deco.engines
assert deco.engines['text/html'][1] == 'new_template.html', deco.engines
def test_lazy_inheritance_with_nested_template(self):
milestones.renderers_ready._reset()
class BaseController(tg.TGController):
@tg.expose('template.html')
@tg.expose('template.html', content_type='text/plain')
def func(self):
pass
class SubController(BaseController):
@tg.expose('new_template.html', inherit=True)
@tg.expose('new_template.html', content_type='text/plain')
def func(self):
pass
class SubSubController(SubController):
@tg.expose('new2_template.html', inherit=True)
def func(self):
pass
milestones.renderers_ready.reach()
deco = Decoration.get_decoration(SubSubController.func)
assert len(deco.engines) == 2, deco.engines
assert deco.engines['text/html'][1] == 'new2_template.html', deco.engines
assert deco.engines['text/plain'][1] == 'new_template.html', deco.engines
def test_lazy_inheritance_with_3nested_template(self):
milestones.renderers_ready._reset()
class BaseController(tg.TGController):
@tg.expose('template.html')
@tg.expose('template.html', content_type='text/plain')
@tg.expose('template.html', content_type='text/javascript')
def func(self):
pass
class SubController(BaseController):
@tg.expose('new_template.html', inherit=True)
@tg.expose('new_template.html', content_type='text/plain')
@tg.expose('new_template.html', content_type='text/javascript')
def func(self):
pass
class SubSubController(SubController):
@tg.expose('new2_template.html', inherit=True)
@tg.expose('new2_template.html', content_type='text/javascript')
def func(self):
pass
class SubSubSubController(SubSubController):
@tg.expose('new3_template.html', inherit=True)
def func(self):
pass
milestones.renderers_ready.reach()
deco = Decoration.get_decoration(SubSubSubController.func)
assert len(deco.engines) == 3, deco.engines
assert deco.engines['text/html'][1] == 'new3_template.html', deco.engines
assert deco.engines['text/plain'][1] == 'new_template.html', deco.engines
assert deco.engines['text/javascript'][1] == 'new2_template.html', deco.engines
| lucius-feng/tg2 | tests/test_stack/rendering/test_decorators.py | Python | mit | 6,861 |
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from ipaddress import ip_network
from operator import itemgetter
from indico.util.i18n import _
from indico.web.forms.fields import MultiStringField
class MultiIPNetworkField(MultiStringField):
"""A field to enter multiple IPv4 or IPv6 networks.
The field data is a set of ``IPNetwork``s not bound to a DB session.
The ``unique`` and ``sortable`` parameters of the parent class cannot be used with this class.
"""
def __init__(self, *args, **kwargs):
super(MultiIPNetworkField, self).__init__(*args, field=('subnet', _("subnet")), **kwargs)
self._data_converted = False
self.data = None
def _value(self):
if self.data is None:
return []
elif self._data_converted:
data = [{self.field_name: unicode(network)} for network in self.data or []]
return sorted(data, key=itemgetter(self.field_name))
else:
return self.data
def process_data(self, value):
if value is not None:
self._data_converted = True
self.data = value
def _fix_network(self, network):
network = network.encode('ascii', 'ignore')
if network.startswith('::ffff:'):
# convert ipv6-style ipv4 to regular ipv4
# the ipaddress library doesn't deal with such IPs properly!
network = network[7:]
return unicode(network)
def process_formdata(self, valuelist):
self._data_converted = False
super(MultiIPNetworkField, self).process_formdata(valuelist)
self.data = {ip_network(self._fix_network(entry[self.field_name])) for entry in self.data}
self._data_converted = True
def pre_validate(self, form):
pass # nothing to do
| mvidalgarcia/indico | indico/modules/networks/fields.py | Python | mit | 2,008 |
from ..output import outputfile, outputstr
from ..utils import multiple_index, limit_text
from ..utils import _index_function_gen, asciireplace, generate_func, generate_func_any, ROW_OBJ
from ..exceptions.messages import ApiObjectMsg as msg
from types import FunctionType
from tabulate import tabulate
from threading import RLock
SelectionLock = RLock()
NonHashMergeLock = RLock()
class Selection(object):
__slots__ = ["__rows__", "__apimother__"]
def __init__(self, selection, api_mother):
self.__rows__ = (selection)
self.__apimother__ = api_mother
def __add__(self, sel):
return Selection((
tuple(self.rows) + tuple(sel.rows)), self.__apimother__)
def __len__(self):
return len(self.rows)
def __getitem__(self, v):
if isinstance(v, slice):
return Selection(self.rows[v], self.__apimother__)
if isinstance(v, int):
return (self.rows[v])
elif isinstance(v, str):
return (x.getcolumn(v) for x in self.rows)
elif isinstance(v, tuple):
return (multiple_index(x, v) for x in self.rows)
elif isinstance(v, FunctionType):
return Selection(_index_function_gen(self, v), self.__apimother__)
else:
raise TypeError(msg.getitemmsg.format(type(v)))
@property
def rows(self):
if not isinstance(self.__rows__, tuple):
self.process()
return self.__rows__
else:
return self.__rows__
def process(self):
"""Processes the Selection, then returns it
Use this if chaining selections but you still need the parent
for later usage. Or if their are mulitple chains from the
same parent selection
"""
if not isinstance(self.__rows__, tuple):
with SelectionLock:
self.__rows__ = tuple(self.__rows__)
return self
@property
def columns(self):
return self.__apimother__.columns
@property
def __columnsmap__(self):
return self.__apimother__.__columnsmap__
@property
def columns_mapping(self):
return {v:k for k,v in self.__columnsmap__.items()}
@property
def columns_attributes(self):
return list(self.__columnsmap__.keys())
@property
def addcolumn(self):
"""Adds a column
:param columnname: Name of the column to add.
:param columndata: The default value of the new column.
:param add_to_columns: Determines whether this column should
be added to the internal tracker.
:type columnname: :class:`str`
:type add_to_columns: :class:`bool`
Note: Rows that are being accessed by another thread will error out
if accessed during the brief time addcolumn is updating.
Note: This will affect the entire MainSelection, not just this
selection
"""
return self.__apimother__.addcolumn
@property
def delcolumn(self):
return self.__apimother__.delcolumn
@property
def rename_column(self):
return self.__apimother__.rename_column
def transform(self, column, func):
for row in self.rows:
row.setcolumn(column, func(row.getcolumn(column)))
return self
def _merge(self, args):
maps = []
for con in (self,) + args:
maps.append({(x.__hashvalue__()):x for x in con.rows})
master = {}
for d in maps:
master.update(d)
keys = set(master.keys())
for key in keys:
yield master[key]
def merge(self, *args, force_safety=True):
"""Merges selections
Note: This merge's algorithm relies on the uniqueness of the rows.
duplicate rows will be only represented by 1 row.
Note: This Merge relies on all data in a row being hashable, use non_hash_merge if you
can't guarantee this.
"""
try:
if force_safety:
if (not all(self.__apimother__ is x.__apimother__ for x in args)):
raise ValueError("Merge by default only accepts rows from same origin")
return Selection(tuple(self._merge(args)), self.__apimother__)
except TypeError as exc:
raise TypeError(
"{} - Use the non_hash_merge to merge rows with non-hashable datatypes.".format(exc))
def safe_merge(self, *args):
"""This is much slower but is hashes rows as processed instead of preprocessing them"""
out = self
for x in args:
out = out + x
return out
def non_hash_merge(self, *args):
"""This merge uses the exploits the __output__ flag of a row instead of it's hashed contents
This allows merging of of rows that contain unhashable mutable data such as sets or dict.
This doesn't remove duplicate rows but is slightly faster and can handle all datatyps.
Note: This merge is effectively single-threaded and editing the outputflag during
running will effect results of the merge and may have unattended conquences on the
state of this selection.
"""
with NonHashMergeLock:
if not all(self.__apimother__ is x.__apimother__ for x in args):
raise ValueError("non_hash_merge only accepts rows from same origin")
outputstore = tuple(x.__output__ for x in self.__apimother__)
self.__apimother__.no_output()
for x in ((self,) + args):
for row in x:
+row
result = self.__apimother__.outputtedrows
for x, row in zip(outputstore, self.__apimother__.rows):
if x:
+row
else:
-row
return result
def _find_all(self, func):
for x in self.rows:
if func(x):
yield x
def single_find(self, selectionfirstarg_data=None, **kwargs):
"""Find a single row based off search criteria given.
will raise error if returns more than one result'"""
try:
result = None
func = generate_func(selectionfirstarg_data, kwargs)
g = self._find_all(func)
result = next(g)
next(g)
raise ValueError(msg.singlefindmsg)
except StopIteration:
return result
def single_find_any(self, selectionfirstarg_data=None, **kwargs):
"""Find a single row based off search criteria given.
Only one condition needs to be Trues.
will raise error if returns more than one result"""
try:
result = None
func = generate_func_any(selectionfirstarg_data, kwargs)
g = self._find_all(func)
result = next(g)
next(g)
raise ValueError(msg.singlefindmsg)
except StopIteration:
return result
def find(self, selectionfirstarg_data=None, **kwargs):
try:
func = generate_func(selectionfirstarg_data, kwargs)
g = self._find_all(func)
return next(g)
except StopIteration:
return None
def find_any(self, selectionfirstarg_data=None, **kwargs):
try:
func = generate_func_any(selectionfirstarg_data, kwargs)
g = self._find_all(func)
return next(g)
except StopIteration:
return None
def fast_find(self, **kwargs):
"""Much faster find. Returns the last row the fulfilled any kwargs. Only accept one kwargs.
Note: All keynames must be unique to be used effectively, else latest row will be returned"""
if len(kwargs) != 1:
raise ValueError(msg.badfastfind)
k, v = tuple(kwargs.items())[0]
index_value = self.index(k)
return index_value.get(v)
def find_all(self, selectionfirstarg_data=None, **kwargs):
func = generate_func(selectionfirstarg_data, kwargs)
return tuple(self._find_all(func))
def find_all_any(self, selectionfirstarg_data=None, **kwargs):
func = generate_func_any(selectionfirstarg_data, kwargs)
return tuple(self._find_all(func))
def flip_output(self):
"""flips all output boolean for all rows in this selection"""
for x in self.rows:
~x
return self
def no_output(self):
"""Sets all rows to not output"""
for x in self.rows:
-x
return self
def all_output(self):
"""Sets all rows to output"""
for x in self.rows:
+x
return self
def lenoutput(self):
return len(tuple(filter(lambda x: x.outputrow, self.rows)))
def len_no_output(self):
return len(tuple(filter(lambda x: not x.outputrow, self.rows)))
def enable(self, selectionfirstarg_data=None, **kwargs):
v = generate_func(selectionfirstarg_data, kwargs)
for x in self.rows:
if bool(v(x)):
+x
return self
def disable(self, selectionfirstarg_data=None, **kwargs):
v = generate_func(selectionfirstarg_data, kwargs)
for x in self.rows:
if bool(v(x)):
-x
return self
def flip(self, selectionfirstarg_data=None, **kwargs):
v = generate_func(selectionfirstarg_data, kwargs)
for x in self.rows:
if bool(v(x)):
~x
return self
def select(self, selectionfirstarg_data=None, **kwargs):
"""Method for selecting part of the csv document.
generates a function based of the parameters given.
All conditions must be true for a row to be selected
Uses Lazy Loading, doesn't process till needed.
"""
if not selectionfirstarg_data and not kwargs:
return Selection(self.__rows__, self.__apimother__)
func = generate_func(selectionfirstarg_data, kwargs)
return self[func]
def any(self, selectionfirstarg_data=None, **kwargs):
"""Method for selecting part of the csv document.
generates a function based of the parameters given.
only one condition must be True for the row to be
selected.
Uses Lazy Loading, doesn't process till needed.
"""
if not selectionfirstarg_data and not kwargs:
return Selection(self.__rows__, self.__apimother__)
func = generate_func_any(selectionfirstarg_data, kwargs)
return self[func]
def safe_select(self, selectionfirstarg_data=None, **kwargs):
"""Method for selecting part of the csv document.
generates a function based off the parameters given.
This instantly processes the select instead of
lazily loading it at a later time.
Preventing race conditions under most uses cases.
if the same select is being worked on in multiple
threads or other cases such as rows being edited
before the selected is processed.
"""
if not selectionfirstarg_data and not kwargs:
return Selection(self.__rows__, self.__apimother__)
func = generate_func(selectionfirstarg_data, kwargs)
return self._safe_select(func)
def safe_any(self, selectionfirstarg_data=None, **kwargs):
"""Method for selecting part of the csv document.
generates a function based off the parameters given.
only one condition must be True for the row to be selected.
This instantly processes the select instead of
lazily loading it at a later time.
Preventing race conditions under most uses cases.
if the same select is being worked on in multiple
threads or other cases such as rows being edited
before the selected is processed.
"""
if not selectionfirstarg_data and not kwargs:
return Selection(self.__rows__, self.__apimother__)
func = generate_func_any(selectionfirstarg_data, kwargs)
return self._safe_select(func)
def grab(self, *args):
"""Grabs specified columns from every row
:returns: :class:`tuple` of the result.
"""
arg = tuple(args)
if len(arg) > 1:
return tuple(self[arg])
elif len(arg) == 1:
return tuple(self[arg[0]])
else:
raise ValueError(msg.badgrab)
def remove_duplicates(self, soft=True):
"""Removes duplicates rows
if soft is true, return a selection
else: edit this object
Note: All rows must contain hashable data
"""
if soft:
return self.merge(self)
else:
self.__rows__ = self.merge(self).rows
def unique(self, *args):
"""Grabs specified columns from every row
:returns: :class:`set` of the result.
"""
arg = tuple(args)
if len(arg) > 1:
return set(self[arg])
elif len(arg) == 1:
return set(self[arg[0]])
else:
raise ValueError(msg.badgrab)
def _safe_select(self, func):
return Selection(tuple(_index_function_gen(self, func)), self.__apimother__)
def index(self, keyname, keyvalue=None):
""" Indexs a Column to a dict """
if keyvalue is None:
return dict(self[keyname, ROW_OBJ])
else:
return dict(self[keyname, keyvalue])
@property
def outputtedrows(self):
return self.safe_select(lambda x: x.outputrow)
@property
def nonoutputtedrows(self):
return self.safe_select(lambda x: not x.outputrow)
def tabulate(self, limit=100, format="grid", only_ascii=True,
columns=None, text_limit=None, remove_newline=True):
data = [x.longcolumn() for x in self.rows[:limit]]
sortedcolumns = self.columns if not columns else columns
if remove_newline:
for i, longcolumn in enumerate(data):
for key in longcolumn:
if isinstance(longcolumn[key], str):
longcolumn[key] = longcolumn[key].replace("\n", "")
result = tabulate(
[sortedcolumns] + [[limit_text(x[c], text_limit)
for c in sortedcolumns] for x in data],
headers="firstrow",
tablefmt=format)
if only_ascii:
return asciireplace(result)
return result
def output(self, f=None, columns=None, quote_all=None, encoding="utf-8"):
if not columns:
columns = self.columns
outputfile(f, self.rows, columns,
quote_all=quote_all, encoding=encoding)
def outputs(self, columns=None, quote_all=None, encoding="utf-8"):
"""Outputs to str"""
if not columns:
columns = self.columns
return outputstr(self.rows, columns, quote_all=quote_all, encoding=encoding)
| Dolphman/PSV | psv/core/objects/selections.py | Python | mit | 15,237 |
import heapq
# Wrapper class around heapq so that we can directly pass the key and dont have to make tuples
class Heap( object ):
def __init__( self, initial = None, key = lambda x : x ):
self.key = key
if initial:
self._data = [ ( key( item ), item ) for item in initial ]
heapq.heapify( self._data )
else:
self._data = [ ]
def push( self, item ):
heapq.heappush( self._data, ( self.key( item ), item ) )
def pop( self ):
if len( self._data ) > 0:
return heapq.heappop( self._data )[ 1 ]
else:
return None
def top( self ):
return self._data[ 0 ][ 1 ]
def size( self ):
return len( self._data )
| cvquant/trade-analysis | cdefs/heap.py | Python | mit | 754 |
SOCIAL_AUTH_VK_LOGIN_URL = 'https://oauth.vk.com/authorize'
SOCIAL_AUTH_VK_OAUTH2_KEY = '3911688'
SOCIAL_AUTH_VK_OAUTH2_SECRET = '2Y5FYZB3cqDsPteHXBBO'
SOCIAL_AUTH_VK_OAUTH2_SCOPE = ['friends']
AUTHENTICATION_BACKENDS = (
'social.backends.vk.VKOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/registration/'
SOCIAL_AUTH_LOGIN_ERROR_URL = '/login-error/'
SOCIAL_AUTH_LOGIN_URL = '/login/'
SOCIAL_AUTH_NEW_USER_REDIRECT_URL = '/registration/'
SOCIAL_AUTH_STRATEGY = 'social.strategies.django_strategy.DjangoStrategy'
SOCIAL_AUTH_STORAGE = 'social.apps.django_app.default.models.DjangoStorage'
SOCIAL_AUTH_NEW_ASSOCIATION_REDIRECT_URL = '/registration/'
SOCIAL_AUTH_DISCONNECT_REDIRECT_URL = '/registration/'
SOCIAL_AUTH_INACTIVE_USER_URL = '/inactive-user/'
"""These URLs are used on different steps of the auth process, some for successful results
and others for error situations.
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/logged-in/'
Used to redirect the user once the auth process ended successfully.
The value of ?next=/foo is used if it was present
SOCIAL_AUTH_LOGIN_ERROR_URL = '/login-error/'
URL where the user will be redirected in case of an error
Is used as a fallback for LOGIN_ERROR_URL
SOCIAL_AUTH_NEW_USER_REDIRECT_URL = '/new-users-redirect-url/'
Used to redirect new registered users, will be used in place of SOCIAL_AUTH_LOGIN_REDIRECT_URL if defined.
SOCIAL_AUTH_NEW_ASSOCIATION_REDIRECT_URL = '/new-association-redirect-url/'
Like SOCIAL_AUTH_NEW_USER_REDIRECT_URL but for new associated accounts (user is already logged in).
Used in place of SOCIAL_AUTH_LOGIN_REDIRECT_URL
SOCIAL_AUTH_DISCONNECT_REDIRECT_URL = '/account-disconnected-redirect-url/'
The user will be redirected to this URL when a social account is disconnected
SOCIAL_AUTH_INACTIVE_USER_URL = '/inactive-user/'
Inactive users can be redirected to this URL when trying to authenticate.
Successful URLs will default to SOCIAL_AUTH_LOGIN_URL while
error URLs will fallback to SOCIAL_AUTH_LOGIN_ERROR_URL."""
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.mail.mail_validation',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details'
) | ox1omon/movement_fefu | movement/settings/social.py | Python | mit | 2,521 |
#!/usr/bin/env python3
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2019 The Starwels developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function, division
import struct
import re
import os
import os.path
import sys
import hashlib
import datetime
import time
from collections import namedtuple
from binascii import hexlify, unhexlify
settings = {}
##### Switch endian-ness #####
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
def uint32(x):
return x & 0xffffffff
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return b''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return b''.join(out_words)
def calc_hdr_hash(blk_hdr):
hash1 = hashlib.sha256()
hash1.update(blk_hdr)
hash1_o = hash1.digest()
hash2 = hashlib.sha256()
hash2.update(hash1_o)
hash2_o = hash2.digest()
return hash2_o
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hexlify(hash).decode('utf-8')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
# When getting the list of block hashes, undo any byte reversals.
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r")
for line in f:
line = line.rstrip()
if settings['rev_hash_bytes'] == 'true':
line = hex_switchEndian(line)
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
# The block map shouldn't give or receive byte-reversed hashes.
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
self.inFn = 0
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(self.outFname, (int(time.time()), self.highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + self.hash_str)
self.lastDate = blkDate
if self.outF:
self.outF.close()
if self.setFileTime:
os.utime(self.outFname, (int(time.time()), self.highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
self.outFname = self.settings['output_file']
else:
self.outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
print("Output file " + self.outFname)
self.outF = open(self.outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file " + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
print("Invalid magic: " + hexlify(inMagic).decode('utf-8'))
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
self.hash_str = calc_hash_str(blk_hdr)
if not self.hash_str in blkmap:
# Because blocks can be written to files out-of-order as of 0.10, the script
# may encounter blocks it doesn't know about. Treat as debug output.
if settings['debug_output'] == 'true':
print("Skipping unknown block " + self.hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[self.hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
# Force hash byte format setting to be lowercase to make comparisons easier.
# Also place upfront in case any settings need to know about it.
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
if 'netmagic' not in settings:
settings['netmagic'] = 'f9beb4d9'
if 'genesis' not in settings:
settings['genesis'] = '000000003d69a915e9da53348c5c272978bb743442e3a6341c11061c125811a2'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000 * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
if 'debug_output' not in settings:
settings['debug_output'] = 'false'
settings['max_out_sz'] = int(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = unhexlify(settings['netmagic'].encode('utf-8'))
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
settings['debug_output'] = settings['debug_output'].lower()
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
# Block hash map won't be byte-reversed. Neither should the genesis hash.
if not settings['genesis'] in blkmap:
print("Genesis block not found in hashlist")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
| starwels/starwels | contrib/linearize/linearize-data.py | Python | mit | 10,077 |
import yaml
class UiMap():
def __init__(self, path=None):
self._map = {}
self.observer = None
if path: self.load(path)
def load(self, path):
with open(path, 'r') as f:
tree = yaml.load(f)
for key in tree:
if key.find('__') != 0:
self.add_tree(key, tree[key])
self.driver_name = tree['__driver']
module_name = 'uimap.drivers.' + self.driver_name
self.driver = getattr(getattr(__import__(module_name), 'drivers'), self.driver_name).Driver(self)
def add_tree(self, name, tree, parent=None):
if type(tree)==dict:
self.add_control(name, tree['__char'], parent)
for key in tree:
if key.find('__') != 0:
self.add_tree(key, tree[key], name)
else:
self.add_control(name, tree, parent)
def add_control(self, name, character, parent):
parent = parent and self._map[parent]
ctl = Control(self, name, character, parent)
self._map[name] = ctl
return ctl
def all_controls(self):
return self._map.keys()
def sendkeys(self, keys):
return self.driver.sendkeys(None, keys)
def fire(self, stage, event):
if self.observer:
event.stage = stage
self.observer(event)
def __getattr__(self, control):
if control in self._map:
return self._map[control]
raise AttributeError(control)
class Event(object):
def __init__(self, control, action):
self.control = control
self.action = action
self.args = None
self.stage = 'find'
self.ret = None
class Control():
def __init__(self, uimap, name, character, parent):
self.uimap = uimap
self.parent = parent
self.name = name
self.character = character
def __getattr__(self, action):
uimap = self.uimap
driver = uimap.driver
event = UiMap.Event(self, action)
if not hasattr(driver, action):
raise AttributeError()
uimap.fire('find', event)
if not driver.wait_exist(self):
raise Exception(self.name + ' not exist')
def do_action(*args):
event.args = args
uimap.fire('before', event)
event.ret = getattr(driver, action)(self, *args)
uimap.fire('done', event)
return event.ret
return do_action
def exist(self):
return self.uimap.driver.exist(self)
def wait_exist(self, **kwargs):
event = UiMap.Event(self, 'wait_exist')
self.uimap.fire('before', event)
event.ret = self.uimap.driver.wait_exist(self, **kwargs)
self.uimap.fire('done', event)
return event.ret
def wait_vanish(self, **kwargs):
event = UiMap.Event(self, 'wait_vanish')
self.uimap.fire('before', event)
event.ret = self.uimap.driver.wait_vanish(self, **kwargs)
self.uimap.fire('done', event)
return event.ret
def __str__(self):
return self.name
def __repr__(self):
return '<Control "' + self.name + '">'
| renorzr/uimap | src/uimap/__init__.py | Python | mit | 3,216 |
'''Operating System Task
'''
import types
from systemCall import *
class Task(object):
taskId = 0
def __init__(self, target):
self.tid = Task.taskId
Task.taskId += 1
self.target = target
self.sendVal = None
self.name = target.__name__
self.stack = []
def run(self):
while True:
try:
result = self.target.send(self.sendVal)
if isinstance(result, SystemCall):
return result
if isinstance(result, types.GeneratorType):
self.stack.append(self.target)
self.sendVal = None
self.target = result
else:
print(self.stack)
if not self.stack:
return
self.sendVal = result
self.target = self.stack.pop()
except StopIteration:
if not self.stack:
raise
self.sendVal = None
self.target = self.stack.pop()
def terminate(self):
return self.target.close()
def __str__(self):
return '<Task name = %s %#x>' % (self.name, id(self))
if __name__ == '__main__':
def simpleTask():
print('[SIMPLE TASK]step 1')
yield
print('[SIMPLE TASK]step 2')
yield
task = Task(simpleTask())
task.run()
task.run() | JShadowMan/package | python/coroutine/operatingSystem/task.py | Python | mit | 1,526 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_daclif_gallamby.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/mobile/shared_dressed_daclif_gallamby.py | Python | mit | 448 |
from django.conf.urls.defaults import *
urlpatterns = patterns('budget.transactions.views',
url(r'^$', 'transaction_list', name='budget_transaction_list'),
url(r'^add/$', 'transaction_add', name='budget_transaction_add'),
url(r'^edit/(?P<transaction_id>\d+)/$', 'transaction_edit', name='budget_transaction_edit'),
url(r'^delete/(?P<transaction_id>\d+)/$', 'transaction_delete', name='budget_transaction_delete'),
)
| MVReddy/django-mybudget | budget/transactions/urls.py | Python | mit | 433 |
"""
Carry a Cheese
"""
if __name__ == '__main__':
while True:
x = sorted(map(int, raw_input().split()))
if sum(x) == 0:
break
n = int(raw_input())
r = [int(raw_input()) for _ in xrange(n)]
for i in xrange(n):
if (2 * r[i]) ** 2 > x[0] ** 2 + x[1] ** 2:
print "OK"
else:
print "NA"
| miyazaki-tm/aoj | Volume1/0107.py | Python | mit | 397 |
#encoding=utf-8
from __future__ import unicode_literals
import sys
sys.path.append("../")
import jieba
seg_list = jieba.cut("我来到北京清华大学", cut_all=True)
print("Full Mode: " + "/ ".join(seg_list)) # 全模式
seg_list = jieba.cut("我来到北京清华大学", cut_all=False)
print("Default Mode: " + "/ ".join(seg_list)) # 默认模式
seg_list = jieba.cut("他来到了网易杭研大厦")
print(", ".join(seg_list))
seg_list = jieba.cut_for_search("小明硕士毕业于中国科学院计算所,后在日本京都大学深造") # 搜索引擎模式
print(", ".join(seg_list))
| Yinzo/jieba | test/demo.py | Python | mit | 604 |
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from os import path, chdir, getcwd
from cement.utils.misc import minimal_logger
from ..core import io, hooks, fileoperations
from ..core.abstractcontroller import AbstractBaseController
from ..lib import utils
from ..objects.exceptions import NoEnvironmentForBranchError, \
InvalidOptionsError
from ..operations import commonops, deployops, composeops
from ..resources.strings import strings, flag_text
LOG = minimal_logger(__name__)
class DeployController(AbstractBaseController):
class Meta(AbstractBaseController.Meta):
label = 'deploy'
description = strings['deploy.info']
arguments = [
(['environment_name'], dict(
action='store', nargs='?', default=[],
help=flag_text['deploy.env'])),
(['--modules'], dict(help=flag_text['deploy.modules'], nargs='*')),
(['-g', '--env-group-suffix'], dict(help=flag_text['deploy.group_suffix'])),
(['--version'], dict(help=flag_text['deploy.version'])),
(['-l', '--label'], dict(help=flag_text['deploy.label'])),
(['-m', '--message'], dict(help=flag_text['deploy.message'])),
(['-nh', '--nohang'], dict(
action='store_true', help=flag_text['deploy.nohang'])),
(['--staged'], dict(
action='store_true', help=flag_text['deploy.staged'])),
(['--timeout'], dict(type=int, help=flag_text['general.timeout'])),
(['--source'], dict(type=utils.check_source, help=flag_text['deploy.source'])),
]
usage = AbstractBaseController.Meta.usage.replace('{cmd}', label)
def do_command(self):
self.message = self.app.pargs.message
self.staged = self.app.pargs.staged
self.timeout = self.app.pargs.timeout
self.nohang = self.app.pargs.nohang
self.modules = self.app.pargs.modules
self.source = self.app.pargs.source
self.app_name = self.get_app_name()
self.env_name = self.app.pargs.environment_name
self.version = self.app.pargs.version
self.label = self.app.pargs.label
group_name = self.app.pargs.env_group_suffix
if self.modules and len(self.modules) > 0:
self.multiple_app_deploy()
return
if self.nohang:
self.timeout = 0
if self.version and (self.message or self.label):
raise InvalidOptionsError(strings['deploy.invalidoptions'])
if not self.env_name:
self.env_name = commonops.get_current_branch_environment()
if not self.env_name:
self.message = strings['branch.noenv'].replace('eb {cmd}',
self.Meta.label)
io.log_error(self.message)
raise NoEnvironmentForBranchError()
# ToDo add support for deploying to multiples?
# for arg in self.app.pargs.environment_name:
# # deploy to every environment listed
# ## Right now you can only list one
process_app_versions = fileoperations.env_yaml_exists()
deployops.deploy(self.app_name, self.env_name, self.version, self.label,
self.message, group_name=group_name, process_app_versions=process_app_versions,
staged=self.staged, timeout=self.timeout, source=self.source)
def complete_command(self, commands):
# TODO: edit this if we ever support multiple env deploys
super(DeployController, self).complete_command(commands)
## versionlabels on --version
cmd = commands[-1]
if cmd in ['--version']:
app_name = fileoperations.get_application_name()
io.echo(*commonops.get_app_version_labels(app_name))
def multiple_app_deploy(self):
missing_env_yaml = []
top_dir = getcwd()
for module in self.modules:
if not path.isdir(path.join(top_dir, module)):
continue
chdir(path.join(top_dir, module))
if not fileoperations.env_yaml_exists():
missing_env_yaml.append(module)
chdir(top_dir)
# We currently do not want to support multiple deploys when some of the
# modules do not contain env.yaml files
if len(missing_env_yaml) > 0:
module_list = ''
for module_name in missing_env_yaml:
module_list = module_list + module_name + ', '
io.echo(strings['deploy.modulemissingenvyaml'].replace('{modules}', module_list[:-2]))
return
self.compose_deploy()
return
def compose_deploy(self):
app_name = None
modules = self.app.pargs.modules
group_name = self.app.pargs.env_group_suffix
env_names = []
stages_version_labels = {}
stages_env_names = {}
top_dir = getcwd()
for module in modules:
if not path.isdir(path.join(top_dir, module)):
io.log_error(strings['deploy.notadirectory'].replace('{module}', module))
continue
chdir(path.join(top_dir, module))
if not group_name:
group_name = commonops.get_current_branch_group_suffix()
if group_name not in stages_version_labels.keys():
stages_version_labels[group_name] = []
stages_env_names[group_name] = []
if not app_name:
app_name = self.get_app_name()
io.echo('--- Creating application version for module: {0} ---'.format(module))
# Re-run hooks to get values from .elasticbeanstalk folders of apps
hooks.set_region(None)
hooks.set_ssl(None)
hooks.set_profile(None)
if not app_name:
app_name = self.get_app_name()
process_app_version = fileoperations.env_yaml_exists()
version_label = commonops.create_app_version(app_name, process=process_app_version)
stages_version_labels[group_name].append(version_label)
environment_name = fileoperations.get_env_name_from_env_yaml()
if environment_name is not None:
commonops.set_environment_for_current_branch(environment_name.
replace('+', '-{0}'.
format(group_name)))
env_name = commonops.get_current_branch_environment()
stages_env_names[group_name].append(env_name)
env_names.append(env_name)
else:
io.echo(strings['deploy.noenvname'].replace('{module}', module))
stages_version_labels[group_name].pop(version_label)
chdir(top_dir)
if len(stages_version_labels) > 0:
for stage in stages_version_labels.keys():
request_id = composeops.compose_no_events(app_name, stages_version_labels[stage],
group_name=stage)
if request_id is None:
io.error("Unable to compose modules.")
return
commonops.wait_for_compose_events(request_id, app_name, env_names, self.timeout)
else:
io.log_warning(strings['compose.novalidmodules']) | AccelAI/accel.ai | flask-aws/lib/python2.7/site-packages/ebcli/controllers/deploy.py | Python | mit | 7,976 |
#!/usr/bin/env python
import time
import sys
import logging
from socketIO_client import SocketIO
APPKEY = '5697113d4407a3cd028abead'
TOPIC = 'test'
ALIAS = 'test'
logger = logging.getLogger('messenger')
class Messenger:
def __init__(self, appkey, alias, customid):
self.__logger = logging.getLogger('messenger.Messenger')
self.__logger.info('init')
self.appkey = appkey
self.customid = customid
self.alias = alias
self.socketIO = SocketIO('sock.yunba.io', 3000)
self.socketIO.on('socketconnectack', self.on_socket_connect_ack)
self.socketIO.on('connack', self.on_connack)
self.socketIO.on('puback', self.on_puback)
self.socketIO.on('suback', self.on_suback)
self.socketIO.on('message', self.on_message)
self.socketIO.on('set_alias_ack', self.on_set_alias)
self.socketIO.on('get_topic_list_ack', self.on_get_topic_list_ack)
self.socketIO.on('get_alias_list_ack', self.on_get_alias_list_ack)
# self.socketIO.on('puback', self.on_publish2_ack)
self.socketIO.on('recvack', self.on_publish2_recvack)
self.socketIO.on('get_state_ack', self.on_get_state_ack)
self.socketIO.on('alias', self.on_alias)
def __del__(self):
self.__logger.info('del')
def loop(self):
self.socketIO.wait(seconds=0.002)
def on_socket_connect_ack(self, args):
self.__logger.debug('on_socket_connect_ack: %s', args)
self.socketIO.emit('connect', {'appkey': self.appkey, 'customid': self.customid})
def on_connack(self, args):
self.__logger.debug('on_connack: %s', args)
self.socketIO.emit('set_alias', {'alias': self.alias})
def on_puback(self, args):
self.__logger.debug('on_puback: %s', args)
def on_suback(self, args):
self.__logger.debug('on_suback: %s', args)
def on_message(self, args):
self.__logger.debug('on_message: %s', args)
def on_set_alias(self, args):
self.__logger.debug('on_set_alias: %s', args)
def on_get_alias(self, args):
self.__logger.debug('on_get_alias: %s', args)
def on_alias(self, args):
self.__logger.debug('on_alias: %s', args)
def on_get_topic_list_ack(self, args):
self.__logger.debug('on_get_topic_list_ack: %s', args)
def on_get_alias_list_ack(self, args):
self.__logger.debug('on_get_alias_list_ack: %s', args)
def on_publish2_ack(self, args):
self.__logger.debug('on_publish2_ack: %s', args)
def on_publish2_recvack(self, args):
self.__logger.debug('on_publish2_recvack: %s', args)
def on_get_state_ack(self, args):
self.__logger.debug('on_get_state_ack: %s', args)
def publish(self, msg, topic, qos):
self.__logger.debug('publish: %s', msg)
self.socketIO.emit('publish', {'topic': topic, 'msg': msg, 'qos': qos})
def publish_to_alias(self, alias, msg):
self.__logger.debug('publish_to_alias: %s %s', alias, msg)
self.socketIO.emit('publish_to_alias', {'alias': alias, 'msg': msg})
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
m = Messenger(APPKEY, ALIAS, ALIAS);
while True:
m.loop()
time.sleep(0.02)
| yunbademo/yunba-smartoffice | python/messenger.py | Python | mit | 3,266 |
"""A dummy module for testing purposes."""
import logging
import os
import uuid
import lambdautils.state as state
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def partition_key(event):
return event.get("client_id", str(uuid.uuid4()))
def input_filter(event, *args, **kwargs):
if os.environ.get("mydummyvar") != "mydummyval":
raise ValueError("Unable to retrieve 'mydummyvar' from environment")
event["input_filter"] = True
val = state.get_state(event["id"])
if val:
logger.info("Retrieved state key '{}': '{}'".format(event["id"], val))
return False
else:
logger.info("State key '{}' not found".format(event["id"]))
state.set_state(event["id"], "hello there")
return True
def output_filter_1(event, *args, **kwargs):
event["output_filter_1"] = True
return True
def output_mapper_1(event, *args, **kwargs):
event["output_mapper_1"] = True
return event
def output_mapper_2(event, *args, **kwargs):
event["output_mapper_2"] = True
return event
def output_mapper_2b(event, *args, **kwargs):
event["output_mapper_2b"] = True
return event
def output_filter_2b(event, *args, **kwargs):
return True
def batch_mapper(events, *args, **kwargs):
for ev in events:
ev["batch_mapped"] = True
return events
| humilis/humilis-kinesis-mapper | tests/integration/mycode/mypkg/__init__.py | Python | mit | 1,346 |
"""
Test constrained energy of a male_low_poly's texture.
Written by Zachary Ferguson
"""
from PIL import Image
import scipy.sparse
import scipy.sparse.linalg
import numpy
import includes
import obj_reader
from seam_minimizer import compute_energies
###########################
# Values for the problem. #
###########################
mesh = obj_reader.load_obj("../models/cube_edit.obj")
print("")
texture = Image.open("../textures/cube_10.png").transpose(
Image.FLIP_TOP_BOTTOM)
width = texture.size[0]
height = texture.size[1]
textureData = numpy.array(texture)
energies = compute_energies(mesh, textureData)
A = energies.BLE.Q
Ac = energies.LSQ.Q
bc = -energies.LSQ.L
E = energies.L.Q
# Solve for the texture values
print("Solving:")
Ac = scipy.sparse.csc_matrix(Ac)
bc = scipy.sparse.csc_matrix(bc)
detAc = scipy.linalg.det(Ac.toarray())
print("det(A) = %.3f" % detAc)
# assert abs(detA) > 1e-8
x = bc.toarray()
# x = scipy.sparse.linalg.spsolve(A, b).toarray()
#
# print("100%")
#
# # Convert back to image format
# x = x.round()
# for i in range(x.shape[0]):
# x[i] = x[i].clip(0, 255)
# x = x.astype("uint8")
textureData = x.reshape((height, width, -1))
# Save the solved texture
texture = Image.fromarray(textureData)
texture = texture.transpose(Image.FLIP_TOP_BOTTOM)
texture.save("test_constraints.png")
# texture.show()
| zfergus2/Wrapping-Textures | tests/test_constraints.py | Python | mit | 1,354 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/roba_hill/shared_lair_roba_hill.iff"
result.attribute_template_id = -1
result.stfName("lair_n","roba_hill")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/tangible/lair/roba_hill/shared_lair_roba_hill.py | Python | mit | 449 |
"""ldif3 - generate and parse LDIF data (see RFC 2849)."""
from __future__ import unicode_literals
import base64
import re
import logging
from collections import OrderedDict
try: # pragma: nocover
from urlparse import urlparse
from urllib import urlopen
except ImportError: # pragma: nocover
from urllib.parse import urlparse
from urllib.request import urlopen
__version__ = '3.2.2'
__all__ = [
# constants
'LDIF_PATTERN',
# classes
'LDIFWriter',
'LDIFParser',
]
log = logging.getLogger('ldif3')
ATTRTYPE_PATTERN = r'[\w;.-]+(;[\w_-]+)*'
ATTRVALUE_PATTERN = r'(([^,]|\\,)+|".*?")'
ATTR_PATTERN = ATTRTYPE_PATTERN + r'[ ]*=[ ]*' + ATTRVALUE_PATTERN
RDN_PATTERN = ATTR_PATTERN + r'([ ]*\+[ ]*' + ATTR_PATTERN + r')*[ ]*'
DN_PATTERN = RDN_PATTERN + r'([ ]*,[ ]*' + RDN_PATTERN + r')*[ ]*'
DN_REGEX = re.compile('^%s$' % DN_PATTERN)
LDIF_PATTERN = ('^((dn(:|::) %(DN_PATTERN)s)|(%(ATTRTYPE_PATTERN)'
's(:|::) .*)$)+' % vars())
MOD_OPS = ['add', 'delete', 'replace']
CHANGE_TYPES = ['add', 'delete', 'modify', 'modrdn']
def is_dn(s):
"""Return True if s is a LDAP DN."""
if s == '':
return True
rm = DN_REGEX.match(s)
return rm is not None and rm.group(0) == s
UNSAFE_STRING_PATTERN = (
'(^[^\x01-\x09\x0b-\x0c\x0e-\x1f\x21-\x39\x3b\x3d-\x7f]'
'|[^\x01-\x09\x0b-\x0c\x0e-\x7f])')
UNSAFE_STRING_RE = re.compile(UNSAFE_STRING_PATTERN)
def lower(l):
"""Return a list with the lowercased items of l."""
return [i.lower() for i in l or []]
class LDIFWriter(object):
"""Write LDIF entry or change records to file object.
:type output_file: file-like object in binary mode
:param output_file: File for output
:type base64_attrs: List[string]
:param base64_attrs: List of attribute types to be base64-encoded in any
case
:type cols: int
:param cols: Specifies how many columns a line may have before it is
folded into many lines
:type line_sep: bytearray
:param line_sep: line separator
:type encoding: string
:param encoding: Encoding to use for converting values to bytes. Note that
the spec requires the dn field to be UTF-8 encoded, so it does not
really make sense to use anything else. Default: ``'utf8'``.
"""
def __init__(
self,
output_file,
base64_attrs=[],
cols=76,
line_sep=b'\n',
encoding='utf8'):
self._output_file = output_file
self._base64_attrs = lower(base64_attrs)
self._cols = cols
self._line_sep = line_sep
self._encoding = encoding
self.records_written = 0 #: number of records that have been written
def _fold_line(self, line):
"""Write string line as one or more folded lines."""
if len(line) <= self._cols:
self._output_file.write(line)
self._output_file.write(self._line_sep)
else:
pos = self._cols
self._output_file.write(line[0:self._cols])
self._output_file.write(self._line_sep)
while pos < len(line):
self._output_file.write(b' ')
end = min(len(line), pos + self._cols - 1)
self._output_file.write(line[pos:end])
self._output_file.write(self._line_sep)
pos = end
def _needs_base64_encoding(self, attr_type, attr_value):
"""Return True if attr_value has to be base-64 encoded.
This is the case because of special chars or because attr_type is in
self._base64_attrs
"""
return attr_type.lower() in self._base64_attrs or \
isinstance(attr_value, bytes) or \
UNSAFE_STRING_RE.search(attr_value) is not None
def _unparse_attr(self, attr_type, attr_value):
"""Write a single attribute type/value pair."""
if self._needs_base64_encoding(attr_type, attr_value):
if not isinstance(attr_value, bytes):
attr_value = attr_value.encode(self._encoding)
encoded = base64.encodestring(attr_value)\
.replace(b'\n', b'')\
.decode('ascii')
line = ':: '.join([attr_type, encoded])
else:
line = ': '.join([attr_type, attr_value])
self._fold_line(line.encode('ascii'))
def _unparse_entry_record(self, entry):
"""
:type entry: Dict[string, List[string]]
:param entry: Dictionary holding an entry
"""
for attr_type in sorted(entry.keys()):
for attr_value in entry[attr_type]:
self._unparse_attr(attr_type, attr_value)
def _unparse_changetype(self, mod_len):
"""Detect and write the changetype."""
if mod_len == 2:
changetype = 'add'
elif mod_len == 3:
changetype = 'modify'
else:
raise ValueError("modlist item of wrong length")
self._unparse_attr('changetype', changetype)
def _unparse_change_record(self, modlist):
"""
:type modlist: List[Tuple]
:param modlist: List of additions (2-tuple) or modifications (3-tuple)
"""
mod_len = len(modlist[0])
self._unparse_changetype(mod_len)
for mod in modlist:
if len(mod) != mod_len:
raise ValueError("Subsequent modlist item of wrong length")
if mod_len == 2:
mod_type, mod_vals = mod
elif mod_len == 3:
mod_op, mod_type, mod_vals = mod
self._unparse_attr(MOD_OPS[mod_op], mod_type)
for mod_val in mod_vals:
self._unparse_attr(mod_type, mod_val)
if mod_len == 3:
self._output_file.write(b'-' + self._line_sep)
def unparse(self, dn, record):
"""Write an entry or change record to the output file.
:type dn: string
:param dn: distinguished name
:type record: Union[Dict[string, List[string]], List[Tuple]]
:param record: Either a dictionary holding an entry or a list of
additions (2-tuple) or modifications (3-tuple).
"""
self._unparse_attr('dn', dn)
if isinstance(record, dict):
self._unparse_entry_record(record)
elif isinstance(record, list):
self._unparse_change_record(record)
else:
raise ValueError("Argument record must be dictionary or list")
self._output_file.write(self._line_sep)
self.records_written += 1
class LDIFParser(object):
"""Read LDIF entry or change records from file object.
:type input_file: file-like object in binary mode
:param input_file: file to read the LDIF input from
:type ignored_attr_types: List[string]
:param ignored_attr_types: List of attribute types that will be ignored
:type process_url_schemes: List[bytearray]
:param process_url_schemes: List of URL schemes to process with urllib.
An empty list turns off all URL processing and the attribute is
ignored completely.
:type line_sep: bytearray
:param line_sep: line separator
:type encoding: string
:param encoding: Encoding to use for converting values to unicode strings.
If decoding failes, the raw bytestring will be used instead. You can
also pass ``None`` which will skip decoding and always produce
bytestrings. Note that this only applies to entry values. ``dn`` and
entry keys will always be unicode strings.
:type strict: boolean
:param strict: If set to ``False``, recoverable parse errors will produce
log warnings rather than exceptions.
"""
def _strip_line_sep(self, s):
"""Strip trailing line separators from s, but no other whitespaces."""
if s[-2:] == b'\r\n':
return s[:-2]
elif s[-1:] == b'\n':
return s[:-1]
else:
return s
def __init__(
self,
input_file,
ignored_attr_types=[],
process_url_schemes=[],
line_sep=b'\n',
encoding='utf8',
strict=True):
self._input_file = input_file
self._process_url_schemes = lower(process_url_schemes)
self._ignored_attr_types = lower(ignored_attr_types)
self._line_sep = line_sep
self._encoding = encoding
self._strict = strict
self.line_counter = 0 #: number of lines that have been read
self.byte_counter = 0 #: number of bytes that have been read
self.records_read = 0 #: number of records that have been read
def _iter_unfolded_lines(self):
"""Iter input unfoled lines. Skip comments."""
line = self._input_file.readline()
while line:
self.line_counter += 1
self.byte_counter += len(line)
line = self._strip_line_sep(line)
nextline = self._input_file.readline()
while nextline and nextline[:1] == b' ':
line += self._strip_line_sep(nextline)[1:]
nextline = self._input_file.readline()
if not line.startswith(b'#'):
yield line
line = nextline
def _iter_blocks(self):
"""Iter input lines in blocks separated by blank lines."""
lines = []
for line in self._iter_unfolded_lines():
if line:
lines.append(line)
elif lines:
self.records_read += 1
yield lines
lines = []
if lines:
self.records_read += 1
yield lines
def _decode_value(self, attr_type, attr_value):
if attr_type == u'dn':
try:
return attr_type, attr_value.decode('utf8')
except UnicodeError as err:
self._error(err)
return attr_type, attr_value.decode('utf8', 'ignore')
elif self._encoding is not None:
try:
return attr_type, attr_value.decode(self._encoding)
except UnicodeError:
pass
return attr_type, attr_value
def _parse_attr(self, line):
"""Parse a single attribute type/value pair."""
colon_pos = line.index(b':')
attr_type = line[0:colon_pos].decode('ascii')
if line[colon_pos:].startswith(b'::'):
attr_value = base64.decodestring(line[colon_pos + 2:])
elif line[colon_pos:].startswith(b':<'):
url = line[colon_pos + 2:].strip()
attr_value = b''
if self._process_url_schemes:
u = urlparse(url)
if u[0] in self._process_url_schemes:
attr_value = urlopen(url.decode('ascii')).read()
else:
attr_value = line[colon_pos + 1:].strip()
return self._decode_value(attr_type, attr_value)
def _error(self, msg):
if self._strict:
raise ValueError(msg)
else:
log.warning(msg)
def _check_dn(self, dn, attr_value):
"""Check dn attribute for issues."""
if dn is not None:
self._error('Two lines starting with dn: in one record.')
if not is_dn(attr_value):
self._error('No valid string-representation of '
'distinguished name %s.' % attr_value)
def _check_changetype(self, dn, changetype, attr_value):
"""Check changetype attribute for issues."""
if dn is None:
self._error('Read changetype: before getting valid dn: line.')
if changetype is not None:
self._error('Two lines starting with changetype: in one record.')
if attr_value not in CHANGE_TYPES:
self._error('changetype value %s is invalid.' % attr_value)
def _parse_entry_record(self, lines):
"""Parse a single entry record from a list of lines."""
dn = None
entry = OrderedDict()
for line in lines:
attr_type, attr_value = self._parse_attr(line)
if attr_type == 'dn':
self._check_dn(dn, attr_value)
dn = attr_value
elif attr_type == 'version' and dn is None:
pass # version = 1
else:
if dn is None:
self._error('First line of record does not start '
'with "dn:": %s' % attr_type)
if attr_value is not None and \
attr_type.lower() not in self._ignored_attr_types:
if attr_type in entry:
entry[attr_type].append(attr_value)
else:
entry[attr_type] = [attr_value]
return dn, entry
def parse(self):
"""Iterate LDIF entry records.
:rtype: Iterator[Tuple[string, Dict]]
:return: (dn, entry)
"""
for block in self._iter_blocks():
yield self._parse_entry_record(block)
| GluuFederation/community-edition-setup | pylib/ldif3/ldif3.py | Python | mit | 13,061 |
# -*- coding: utf-8 -*-
"""Heroku specific settings. These are used to deploy opencomparison to
Heroku's platform.
"""
from os import environ
from memcacheify import memcacheify
from postgresify import postgresify
from S3 import CallingFormat
from .base import *
########## CACHE
CACHE_TIMEOUT = 60 * 60 * 24 * 30
CACHES = memcacheify()
########## WSGI SERVER
INSTALLED_APPS += ['gunicorn']
########## EMAIL
DEFAULT_FROM_EMAIL = environ.get('DEFAULT_FROM_EMAIL',
'Django Packages <[email protected]>')
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = environ.get('EMAIL_HOST', 'smtp.sendgrid.com')
EMAIL_HOST_PASSWORD = os.environ.get('SENDGRID_PASSWORD', '')
EMAIL_HOST_USER = os.environ.get('SENDGRID_USERNAME', '')
EMAIL_PORT = environ.get('EMAIL_PORT', 587)
EMAIL_SUBJECT_PREFIX = environ.get('EMAIL_SUBJECT_PREFIX', '[Django Packages] ')
EMAIL_USE_TLS = True
SERVER_EMAIL = EMAIL_HOST_USER
########## SECRET
SECRET_KEY = environ.get('SECRET_KEY', '')
########## SITE
SITE_TITLE = environ.get('SITE_TITLE')
FRAMEWORK_TITLE = environ.get('FRAMEWORK_TITLE')
########## STORAGE
INSTALLED_APPS += ['storages']
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = environ.get('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = environ.get('AWS_STORAGE_BUCKET_NAME')
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
AWS_HEADERS = {
'Expires': 'Thu, 15 Apr 2020 20:00:00 GMT',
'Cache-Control': 'max-age=86400',
}
AWS_QUERYSTRING_AUTH = False
STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
MEDIA_URL = STATIC_URL
########### Permissions
RESTRICT_PACKAGE_EDITORS = False
RESTRICT_GRID_EDITORS = False
########### Errors
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## DATABASE CONFIGURATION
# Setting PGSQL_POOLING to True means:
# We use django_postgrespool to handle the database connection.
# What this means is we use SqlAlchemy to handle the pool to PGSQL on Heroku, meaning we don't have
# to reestablish connection to the database as often. Which means a faster app. The downside is there
# is some risk as it's still a new project.
#
# Setting PGSQL_POOLING to False means:
# We use the standard Django pgsql connection. The pooling isn't as good but we have more stability.
PGSQL_POOLING = False
if PGSQL_POOLING:
import dj_database_url
DATABASES = {'default': dj_database_url.config()}
DATABASES['default']['ENGINE'] = 'django_postgrespool'
SOUTH_DATABASE_ADAPTERS = {
'default': 'south.db.postgresql_psycopg2'
}
DATABASE_POOL_ARGS = {
'max_overflow': 10,
'pool_size': 5,
'recycle': 300
}
else:
from postgresify import postgresify
DATABASES = postgresify()
########## END DATABASE CONFIGURATION
########## sslify
MIDDLEWARE_CLASSES = ('sslify.middleware.SSLifyMiddleware',) + MIDDLEWARE_CLASSES
########## end sslify
########## django-secure
INSTALLED_APPS += ["djangosecure", ]
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_FRAME_DENY = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = True
########## end django-secure
########## templates
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
########## end templates
########## appenlight-client
import appenlight_client.client as e_client
APPENLIGHT = e_client.get_config({'appenlight.api_key': environ.get('APPENLIGHT_KEY', '')})
MIDDLEWARE_CLASSES = (
'appenlight_client.django_middleware.AppenlightMiddleware',
) + MIDDLEWARE_CLASSES
########## end appenlight-client | QLGu/djangopackages | settings/heroku.py | Python | mit | 4,843 |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
import os
import tests.fixtures.mock_data as mock_data
class MockStdout():
def __init__(self):
self.stdout = {}
class PipeMock(object):
def __init__(self):
self.stdout = MockStdout()
class CommandManagerMock(object):
@staticmethod
def run_command(command_argumentlist, working_directory=os.getcwd()):
return True
@staticmethod
def run_command_check_output(command_argumentlist, stdin=None, working_directory=os.getcwd()):
if command_argumentlist == ['rpm', '-qa', '--queryformat', '\t%{name} %{version}-%{release}']:
return mock_data.rpm_query_package_list_output
if command_argumentlist == ['rpm', '-ql', 'docker']:
return mock_data.rpm_query_file_list
if command_argumentlist == ['rpm', '-qa', '--queryformat', '%{name}\n', '-c', 'docker']:
return mock_data.rpm_query_conffile_list
if command_argumentlist == ['rpm', "--query", "--package", "--queryformat", "%{name}", "/tmp/docker.pkg"]:
return "docker"
if command_argumentlist == ['rpm', "--query", "--package", "--queryformat", "%{version}", "/tmp/docker.pkg"]:
return "1.0-5"
if command_argumentlist == ['rpm', "--query", "--package", "/tmp/docker.pkg", "-l"]:
return mock_data.rpm_query_file_list
if command_argumentlist == ['rpm', "--query", "--package", "/tmp/docker.pkg", "-c"]:
return mock_data.rpm_query_conffile_list
if command_argumentlist == ['dpkg-query', '-W', '-f=${Package}\\n${Version}\\n${Status}\\n${conffiles}\\t']:
return mock_data.dpkg_query_package_list_output
if command_argumentlist == ['dpkg-query', '-L', "docker"]:
return mock_data.dpkg_query_file_list
if command_argumentlist == ['dpkg-query', '-W', '-f=${conffiles}\\n', "docker"]:
return mock_data.dpkg_query_conffile_list
if command_argumentlist == ['dpkg', '-f', '/tmp/docker.pkg', 'Package']:
return "docker"
if command_argumentlist == ['dpkg', '-f', '/tmp/docker.pkg', 'Version']:
return "1.0-5"
if command_argumentlist == ['dpkg', '-c', '/tmp/docker.pkg']:
return mock_data.dpkg_query_file_list_package
if command_argumentlist == ['pacman', '-Q', '--color', 'never']:
return mock_data.pacman_query_package_list_output
if command_argumentlist == ['pacman', '-Ql', 'docker']:
return mock_data.pacman_query_file_list
if command_argumentlist == ['pacman', '--query', '--file', '/tmp/docker.pkg']:
return "docker 1:17.04.0-1"
if command_argumentlist == ['pacman', '-Qlp', '/tmp/docker.pkg']:
return mock_data.pacman_query_file_list_package
@staticmethod
def run_command_popen(command_argumentlist, stdout=None):
if command_argumentlist == ['rpm2cpio', "/tmp/docker.pkg"]:
return PipeMock()
if command_argumentlist == ['cpio', "-id", "--quiet"]:
return {}
| pombredanne/swidGenerator | tests/fixtures/command_manager_mock.py | Python | mit | 3,142 |
Subsets and Splits