text
stringlengths
4
1.02M
meta
dict
""" We have four main abstractions: Users, Collections, Memberships, and Roles. Users represent people, like students in a school, teachers for a classroom, or volunteers setting up informal installations. There are two main user types, ``FacilityUser`` and ``DeviceOwner``. A ``FacilityUser`` belongs to a particular facility, and has permissions only with respect to other data that is associated with that facility. A ``DeviceOwner`` is not associated with a particular facility, and has global permissions for data on the local device. ``FacilityUser`` accounts (like other facility data) may be synced across multiple devices, whereas a DeviceOwner account is specific to a single installation of Kolibri. Collections form a hierarchy, with Collections able to belong to other Collections. Collections are subdivided into several pre-defined levels (``Facility`` > ``Classroom`` > ``LearnerGroup``). A ``FacilityUser`` (but not a ``DeviceOwner``) can be marked as a member of a ``Collection`` through a ``Membership`` object. Being a member of a Collection also means being a member of all the Collections above that Collection in the hierarchy. Another way in which a ``FacilityUser`` can be associated with a particular ``Collection`` is through a ``Role`` object, which grants the user a role with respect to the ``Collection`` and all the collections below it. A ``Role`` object also stores the "kind" of the role (currently, one of "admin" or "coach"), which affects what permissions the user gains through the ``Role``. """ from __future__ import absolute_import, print_function, unicode_literals import logging as logger import uuid import six from django.contrib.auth.models import AbstractBaseUser, AnonymousUser from django.core import validators from django.core.exceptions import ValidationError from django.db import models from django.db.models.query import F from django.db.utils import IntegrityError from django.utils.encoding import python_2_unicode_compatible from django.utils.translation import ugettext_lazy as _ from kolibri.core.errors import KolibriValidationError from kolibri.core.fields import DateTimeTzField from kolibri.utils.time import local_now from morango.certificates import Certificate from morango.models import SyncableModel from morango.query import SyncableModelQuerySet from morango.utils.morango_mptt import MorangoMPTTModel from morango.utils.uuids import UUIDField from mptt.models import TreeForeignKey from .constants import collection_kinds, role_kinds from .errors import ( InvalidRoleKind, UserDoesNotHaveRoleError, UserHasRoleOnlyIndirectlyThroughHierarchyError, UserIsMemberOnlyIndirectlyThroughHierarchyError, UserIsNotFacilityUser, UserIsNotMemberError ) from .filters import HierarchyRelationsFilter from .permissions.auth import ( AllCanReadFacilityDataset, AnonUserCanReadFacilitiesThatAllowSignUps, AnybodyCanCreateIfNoDeviceOwner, AnybodyCanCreateIfNoFacility, CoachesCanManageGroupsForTheirClasses, CoachesCanManageMembershipsForTheirGroups, CollectionSpecificRoleBasedPermissions, FacilityAdminCanEditForOwnFacilityDataset ) from .permissions.base import BasePermissions, RoleBasedPermissions from .permissions.general import IsAdminForOwnFacility, IsFromSameFacility, IsOwn, IsSelf logging = logger.getLogger(__name__) def _has_permissions_class(obj): return hasattr(obj, "permissions") and isinstance(obj.permissions, BasePermissions) class FacilityDataSyncableModel(SyncableModel): morango_profile = "facilitydata" class Meta: abstract = True @python_2_unicode_compatible class FacilityDataset(FacilityDataSyncableModel): """ ``FacilityDataset`` stores high-level metadata and settings for a particular ``Facility``. It is also the model that all models storing facility data (data that is associated with a particular facility, and that inherits from ``AbstractFacilityDataModel``) foreign key onto, to indicate that they belong to this particular ``Facility``. """ permissions = ( AllCanReadFacilityDataset() | FacilityAdminCanEditForOwnFacilityDataset() ) # Morango syncing settings morango_model_name = "facilitydataset" description = models.TextField(blank=True) location = models.CharField(max_length=200, blank=True) # Facility specific configuration settings learner_can_edit_username = models.BooleanField(default=True) learner_can_edit_name = models.BooleanField(default=True) learner_can_edit_password = models.BooleanField(default=True) learner_can_sign_up = models.BooleanField(default=True) learner_can_delete_account = models.BooleanField(default=True) learner_can_login_with_no_password = models.BooleanField(default=False) def __str__(self): facilities = self.collection_set.filter(kind=collection_kinds.FACILITY) if facilities: return "FacilityDataset for {}".format(Facility.objects.get(id=facilities[0].id)) else: return "FacilityDataset (no associated Facility)" def calculate_source_id(self): # if we don't already have a source ID, get one by generating a new root certificate, and using its ID if not self._morango_source_id: self._morango_source_id = Certificate.generate_root_certificate("full-facility").id return self._morango_source_id @staticmethod def compute_namespaced_id(partition_value, source_id_value, model_name): assert partition_value.startswith(FacilityDataset.ID_PLACEHOLDER) assert model_name == FacilityDataset.morango_model_name # we use the source_id as the ID for the FacilityDataset return source_id_value def calculate_partition(self): return "{id}:allusers-ro".format(id=self.ID_PLACEHOLDER) def get_root_certificate(self): return Certificate.objects.get(id=self.id) def get_owned_certificates(self): # return all certificates associated with this facility dataset for which we have the private key return Certificate.objects.filter(tree_id=self.get_root_certificate().tree_id).exclude(_private_key=None) class AbstractFacilityDataModel(FacilityDataSyncableModel): """ Base model for Kolibri "Facility Data", which is data that is specific to a particular ``Facility``, such as ``FacilityUsers``, ``Collections``, and other data associated with those users and collections. """ dataset = models.ForeignKey(FacilityDataset) class Meta: abstract = True def calculate_source_id(self): # by default, we'll use randomly generated source IDs; this can be overridden as desired return None def clean_fields(self, *args, **kwargs): # ensure that we have, or can infer, a dataset for the model instance self.ensure_dataset(validating=True) super(AbstractFacilityDataModel, self).clean_fields(*args, **kwargs) def full_clean(self, *args, **kwargs): kwargs["exclude"] = kwargs.get("exclude", []) + getattr(self, "FIELDS_TO_EXCLUDE_FROM_VALIDATION", []) super(AbstractFacilityDataModel, self).full_clean(*args, **kwargs) def save(self, *args, **kwargs): # before saving, ensure we have a dataset, and convert any validation errors into integrity errors, # since by this point the `clean_fields` method should already have prevented this situation from arising try: self.ensure_dataset() except KolibriValidationError as e: raise IntegrityError(str(e)) super(AbstractFacilityDataModel, self).save(*args, **kwargs) def ensure_dataset(self, *args, **kwargs): """ If no dataset has yet been specified, try to infer it. If a dataset has already been specified, to prevent inconsistencies, make sure it matches the inferred dataset, otherwise raise a ``KolibriValidationError``. If we have no dataset and it can't be inferred, we raise a ``KolibriValidationError`` exception as well. """ inferred_dataset = self.infer_dataset(*args, **kwargs) if self.dataset_id: # make sure currently stored dataset matches inferred dataset, if any if inferred_dataset and inferred_dataset != self.dataset: raise KolibriValidationError("This model is not associated with the correct FacilityDataset.") else: # use the inferred dataset, if there is one, otherwise throw an error if inferred_dataset: self.dataset = inferred_dataset else: raise KolibriValidationError("FacilityDataset ('dataset') not provided, and could not be inferred.") def infer_dataset(self, *args, **kwargs): """ This method is used by `ensure_dataset` to "infer" which dataset should be associated with this instance. It should be overridden in any subclass of ``AbstractFacilityDataModel``, to define a model-specific inference. """ raise NotImplementedError("Subclasses of AbstractFacilityDataModel must override the `infer_dataset` method.") class KolibriAbstractBaseUser(AbstractBaseUser): """ Our custom user type, derived from ``AbstractBaseUser`` as described in the Django docs. Draws liberally from ``django.contrib.auth.AbstractUser``, except we exclude some fields we don't care about, like email. This model is an abstract model, and is inherited by both ``FacilityUser`` and ``DeviceOwner``. """ class Meta: abstract = True USERNAME_FIELD = "username" username = models.CharField( _('username'), max_length=30, help_text=_('Required. 30 characters or fewer. Letters and digits only'), validators=[ validators.RegexValidator( r'^\w+$', _('Enter a valid username. This value can contain only letters, numbers, and underscores.') ), ], ) full_name = models.CharField(_('full name'), max_length=120, blank=True) date_joined = DateTimeTzField(_('date joined'), default=local_now, editable=False) is_staff = False is_superuser = False is_facility_user = False def get_short_name(self): return self.full_name.split(' ', 1)[0] def is_member_of(self, coll): """ Determine whether this user is a member of the specified ``Collection``. :param coll: The ``Collection`` for which we are checking this user's membership. :return: ``True`` if this user is a member of the specified ``Collection``, otherwise False. :rtype: bool """ raise NotImplementedError("Subclasses of KolibriAbstractBaseUser must override the `is_member_of` method.") def get_roles_for_user(self, user): """ Determine all the roles this user has in relation to the target user, and return a set containing the kinds of roles. :param user: The target user for which this user has the roles. :return: The kinds of roles this user has with respect to the target user. :rtype: set of ``kolibri.auth.constants.role_kinds.*`` strings """ raise NotImplementedError("Subclasses of KolibriAbstractBaseUser must override the `get_roles_for_user` method.") def get_roles_for_collection(self, coll): """ Determine all the roles this user has in relation to the specified ``Collection``, and return a set containing the kinds of roles. :param coll: The target ``Collection`` for which this user has the roles. :return: The kinds of roles this user has with respect to the specified ``Collection``. :rtype: set of ``kolibri.auth.constants.role_kinds.*`` strings """ raise NotImplementedError("Subclasses of KolibriAbstractBaseUser must override the `get_roles_for_collection` method.") def has_role_for_user(self, kinds, user): """ Determine whether this user has (at least one of) the specified role kind(s) in relation to the specified user. :param user: The user that is the target of the role (for which this user has the roles). :param kinds: The kind (or kinds) of role to check for, as a string or iterable. :type kinds: string from ``kolibri.auth.constants.role_kinds.*`` :return: ``True`` if this user has the specified role kind with respect to the target user, otherwise ``False``. :rtype: bool """ raise NotImplementedError("Subclasses of KolibriAbstractBaseUser must override the `has_role_for_user` method.") def has_role_for_collection(self, kinds, coll): """ Determine whether this user has (at least one of) the specified role kind(s) in relation to the specified ``Collection``. :param kinds: The kind (or kinds) of role to check for, as a string or iterable. :type kinds: string from kolibri.auth.constants.role_kinds.* :param coll: The target ``Collection`` for which this user has the roles. :return: ``True`` if this user has the specified role kind with respect to the target ``Collection``, otherwise ``False``. :rtype: bool """ raise NotImplementedError("Subclasses of KolibriAbstractBaseUser must override the `has_role_for_collection` method.") def can_create_instance(self, obj): """ Checks whether this user (self) has permission to create a particular model instance (obj). This method should be overridden by classes that inherit from ``KolibriAbstractBaseUser``. In general, unless an instance has already been initialized, this method should not be called directly; instead, it should be preferred to call ``can_create``. :param obj: An (unsaved) instance of a Django model, to check permissions for. :return: ``True`` if this user should have permission to create the object, otherwise ``False``. :rtype: bool """ raise NotImplementedError("Subclasses of KolibriAbstractBaseUser must override the `can_create_instance` method.") def can_create(self, Model, data): """ Checks whether this user (self) has permission to create an instance of Model with the specified attributes (data). This method defers to the ``can_create_instance`` method, and in most cases should not itself be overridden. :param Model: A subclass of ``django.db.models.Model`` :param data: A ``dict`` of data to be used in creating an instance of the Model :return: ``True`` if this user should have permission to create an instance of Model with the specified data, else ``False``. :rtype: bool """ try: instance = Model(**data) instance.clean_fields(exclude=getattr(Model, "FIELDS_TO_EXCLUDE_FROM_VALIDATION", None)) instance.clean() except TypeError as e: logging.error("TypeError while validating model before checking permissions: {}".format(e.args)) return False # if the data provided does not fit the Model, don't continue checking except ValidationError as e: return False # if the data does not validate, don't continue checking # now that we have an instance, defer to the permission-checking method that works with instances return self.can_create_instance(instance) def can_read(self, obj): """ Checks whether this user (self) has permission to read a particular model instance (obj). This method should be overridden by classes that inherit from ``KolibriAbstractBaseUser``. :param obj: An instance of a Django model, to check permissions for. :return: ``True`` if this user should have permission to read the object, otherwise ``False``. :rtype: bool """ raise NotImplementedError("Subclasses of KolibriAbstractBaseUser must override the `can_read` method.") def can_update(self, obj): """ Checks whether this user (self) has permission to update a particular model instance (obj). This method should be overridden by classes that inherit from KolibriAbstractBaseUser. :param obj: An instance of a Django model, to check permissions for. :return: ``True`` if this user should have permission to update the object, otherwise ``False``. :rtype: bool """ raise NotImplementedError("Subclasses of KolibriAbstractBaseUser must override the `can_update` method.") def can_delete(self, obj): """ Checks whether this user (self) has permission to delete a particular model instance (obj). This method should be overridden by classes that inherit from KolibriAbstractBaseUser. :param obj: An instance of a Django model, to check permissions for. :return: ``True`` if this user should have permission to delete the object, otherwise ``False``. :rtype: bool """ raise NotImplementedError("Subclasses of KolibriAbstractBaseUser must override the `can_delete` method.") def get_roles_for(self, obj): """ Helper function that defers to ``get_roles_for_user`` or ``get_roles_for_collection`` based on the type of object passed in. """ if isinstance(obj, KolibriAbstractBaseUser): return self.get_roles_for_user(obj) elif isinstance(obj, Collection): return self.get_roles_for_collection(obj) else: raise ValueError("The `obj` argument to `get_roles_for` must be either an instance of KolibriAbstractBaseUser or Collection.") def has_role_for(self, kinds, obj): """ Helper function that defers to ``has_role_for_user`` or ``has_role_for_collection`` based on the type of object passed in. """ if isinstance(obj, KolibriAbstractBaseUser): return self.has_role_for_user(kinds, obj) elif isinstance(obj, Collection): return self.has_role_for_collection(kinds, obj) else: raise ValueError("The `obj` argument to `has_role_for` must be either an instance of KolibriAbstractBaseUser or Collection.") def filter_readable(self, queryset): """ Filters a queryset down to only the elements that this user should have permission to read. :param queryset: A ``QuerySet`` instance that the filtering should be applied to. :return: Filtered ``QuerySet`` including only elements that are readable by this user. """ raise NotImplementedError("Subclasses of KolibriAbstractBaseUser must override the `can_delete` method.") class KolibriAnonymousUser(AnonymousUser, KolibriAbstractBaseUser): """ Custom anonymous user that also exposes the same interface as KolibriAbstractBaseUser, for consistency. """ class Meta: abstract = True def is_member_of(self, coll): return False def get_roles_for_user(self, user): return set([]) def get_roles_for_collection(self, coll): return set([]) def has_role_for_user(self, kinds, user): return False def has_role_for_collection(self, kinds, coll): return False def can_create_instance(self, obj): # check the object permissions, if available, just in case permissions are granted to anon users if _has_permissions_class(obj): return obj.permissions.user_can_create_object(self, obj) else: return False def can_read(self, obj): # check the object permissions, if available, just in case permissions are granted to anon users if _has_permissions_class(obj): return obj.permissions.user_can_read_object(self, obj) else: return False def can_update(self, obj): # check the object permissions, if available, just in case permissions are granted to anon users if _has_permissions_class(obj): return obj.permissions.user_can_update_object(self, obj) else: return False def can_delete(self, obj): # check the object permissions, if available, just in case permissions are granted to anon users if _has_permissions_class(obj): return obj.permissions.user_can_delete_object(self, obj) else: return False def filter_readable(self, queryset): # check the object permissions, if available, just in case permissions are granted to anon users if _has_permissions_class(queryset.model): return queryset.model.permissions.readable_by_user_filter(self, queryset).distinct() else: return queryset.none() @python_2_unicode_compatible class FacilityUser(KolibriAbstractBaseUser, AbstractFacilityDataModel): """ ``FacilityUser`` is the fundamental object of the auth app. These users represent the main users, and can be associated with a hierarchy of ``Collections`` through ``Memberships`` and ``Roles``, which then serve to help determine permissions. """ # Morango syncing settings morango_model_name = "facilityuser" permissions = ( IsSelf() | # FacilityUser can be read and written by itself IsAdminForOwnFacility() | # FacilityUser can be read and written by a facility admin RoleBasedPermissions( # FacilityUser can be read by admin or coach, and updated by admin, but not created/deleted by non-facility admin target_field=".", can_be_created_by=(), # we can't check creation permissions by role, as user doesn't exist yet can_be_read_by=(role_kinds.ADMIN, role_kinds.COACH), can_be_updated_by=(role_kinds.ADMIN,), can_be_deleted_by=(), # don't want a classroom admin deleting a user completely, just removing them from the class ) ) facility = models.ForeignKey("Facility") is_facility_user = True class Meta: unique_together = (("username", "facility"),) def calculate_partition(self): return "{dataset_id}:user-ro:{user_id}".format(dataset_id=self.dataset_id, user_id=self.ID_PLACEHOLDER) def infer_dataset(self, *args, **kwargs): return self.facility.dataset def is_member_of(self, coll): if self.dataset_id != coll.dataset_id: return False if coll.kind == collection_kinds.FACILITY: return True # FacilityUser is always a member of her own facility return HierarchyRelationsFilter(FacilityUser.objects.all()).filter_by_hierarchy( target_user=F("id"), ancestor_collection=coll.id, ).filter(id=self.id).exists() def get_roles_for_user(self, user): if not hasattr(user, "dataset_id") or self.dataset_id != user.dataset_id: return set([]) role_instances = HierarchyRelationsFilter(Role).filter_by_hierarchy( ancestor_collection=F("collection"), source_user=F("user"), target_user=user, ).filter(user=self) return set([instance["kind"] for instance in role_instances.values("kind").distinct()]) def get_roles_for_collection(self, coll): if self.dataset_id != coll.dataset_id: return set([]) role_instances = HierarchyRelationsFilter(Role).filter_by_hierarchy( ancestor_collection=F("collection"), source_user=F("user"), descendant_collection=coll, ).filter(user=self) return set([instance["kind"] for instance in role_instances.values("kind").distinct()]) def has_role_for_user(self, kinds, user): if not kinds: return False if not hasattr(user, "dataset_id") or self.dataset_id != user.dataset_id: return False return HierarchyRelationsFilter(Role).filter_by_hierarchy( ancestor_collection=F("collection"), source_user=F("user"), role_kind=kinds, target_user=user, ).filter(user=self).exists() def has_role_for_collection(self, kinds, coll): if not kinds: return False if self.dataset_id != coll.dataset_id: return False return HierarchyRelationsFilter(Role).filter_by_hierarchy( ancestor_collection=F("collection"), source_user=F("user"), role_kind=kinds, descendant_collection=coll, ).filter(user=self).exists() def can_create_instance(self, obj): # a FacilityUser's permissions are determined through the object's permission class if _has_permissions_class(obj): return obj.permissions.user_can_create_object(self, obj) else: return False def can_read(self, obj): # a FacilityUser's permissions are determined through the object's permission class if _has_permissions_class(obj): return obj.permissions.user_can_read_object(self, obj) else: return False def can_update(self, obj): # a FacilityUser's permissions are determined through the object's permission class if _has_permissions_class(obj): return obj.permissions.user_can_update_object(self, obj) else: return False def can_delete(self, obj): # a FacilityUser's permissions are determined through the object's permission class if _has_permissions_class(obj): return obj.permissions.user_can_delete_object(self, obj) else: return False def filter_readable(self, queryset): if _has_permissions_class(queryset.model): return queryset.model.permissions.readable_by_user_filter(self, queryset).distinct() else: return queryset.none() def __str__(self): return '"{user}"@"{facility}"'.format(user=self.full_name or self.username, facility=self.facility) class DeviceOwnerManager(models.Manager): def create_superuser(self, username, password, **extra_fields): if not username: raise ValueError('The given username must be set') user = DeviceOwner(username=username) user.set_password(password) user.save() return user @python_2_unicode_compatible class DeviceOwner(KolibriAbstractBaseUser): """ When a user first installs Kolibri on a device, they will be prompted to create a ``DeviceOwner``, a special kind of user which is associated with that device only, and who must give permission to make broad changes to the Kolibri installation on that device (such as creating a ``Facility``, or changing configuration settings). Actions not relating to user data but specifically to a device -- like upgrading Kolibri, changing whether the device is a Classroom Server or Classroom Client, or determining manually which data should be synced -- must be performed by a ``DeviceOwner``. A ``DeviceOwner`` is a superuser, and has full access to do anything she wants with data on the device. """ permissions = AnybodyCanCreateIfNoDeviceOwner() objects = DeviceOwnerManager() id = UUIDField(primary_key=True, editable=False, default=uuid.uuid4) # DeviceOwners can access the Django admin interface is_staff = True is_superuser = True def is_member_of(self, coll): return False # a DeviceOwner is not a member of any Collection def get_roles_for_user(self, user): return set([role_kinds.ADMIN]) # a DeviceOwner has admin role for all users on the device def get_roles_for_collection(self, coll): return set([role_kinds.ADMIN]) # a DeviceOwner has admin role for all collections on the device def has_role_for_user(self, kinds, user): if isinstance(kinds, six.string_types): kinds = [kinds] return role_kinds.ADMIN in kinds # a DeviceOwner has admin role for all users on the device def has_role_for_collection(self, kinds, coll): if isinstance(kinds, six.string_types): kinds = [kinds] return role_kinds.ADMIN in kinds # a DeviceOwner has admin role for all collections on the device def can_create_instance(self, obj): # DeviceOwners are superusers, and can do anything return True def can_read(self, obj): # DeviceOwners are superusers, and can do anything return True def can_update(self, obj): # DeviceOwners are superusers, and can do anything return True def can_delete(self, obj): # DeviceOwners are superusers, and can do anything return True def filter_readable(self, queryset): return queryset def __str__(self): return self.full_name or self.username def has_perm(self, perm, obj=None): # ensure the DeviceOwner has full access to the Django admin return True def has_perms(self, perm_list, obj=None): # ensure the DeviceOwner has full access to the Django admin return True def has_module_perms(self, app_label): # ensure the DeviceOwner has full access to the Django admin return True @python_2_unicode_compatible class Collection(MorangoMPTTModel, AbstractFacilityDataModel): """ ``Collections`` are hierarchical groups of ``FacilityUsers``, used for grouping users and making decisions about permissions. ``FacilityUsers`` can have roles for one or more ``Collections``, by way of obtaining ``Roles`` associated with those ``Collections``. ``Collections`` can belong to other ``Collections``, and user membership in a ``Collection`` is conferred through ``Memberships``. ``Collections`` are subdivided into several pre-defined levels. """ # Morango syncing settings morango_model_name = "collection" # Collection can be read by anybody from the facility; writing is only allowed by an admin for the collection. # Furthermore, no FacilityUser can create or delete a Facility. Permission to create a collection is governed # by roles in relation to the new collection's parent collection (see CollectionSpecificRoleBasedPermissions). permissions = ( IsFromSameFacility(read_only=True) | CollectionSpecificRoleBasedPermissions() | AnybodyCanCreateIfNoFacility() | AnonUserCanReadFacilitiesThatAllowSignUps() | CoachesCanManageGroupsForTheirClasses() ) _KIND = None # Should be overridden in subclasses to specify what "kind" they are name = models.CharField(max_length=100) parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True) kind = models.CharField(max_length=20, choices=collection_kinds.choices) def __init__(self, *args, **kwargs): if self._KIND: kwargs["kind"] = self._KIND super(Collection, self).__init__(*args, **kwargs) def calculate_partition(self): return "{dataset_id}:allusers-ro".format(dataset_id=self.dataset_id) def clean_fields(self, *args, **kwargs): self._ensure_kind() super(Collection, self).clean_fields(*args, **kwargs) def save(self, *args, **kwargs): self._ensure_kind() super(Collection, self).save(*args, **kwargs) def _ensure_kind(self): """ Make sure the "kind" is set correctly on the model, corresponding to the appropriate subclass of ``Collection``. """ if self._KIND: self.kind = self._KIND def get_members(self): if self.kind == collection_kinds.FACILITY: return FacilityUser.objects.filter(dataset=self.dataset) # FacilityUser is always a member of her own facility return HierarchyRelationsFilter(FacilityUser).filter_by_hierarchy( target_user=F("id"), ancestor_collection=self, ) def add_role(self, user, role_kind): """ Create a ``Role`` associating the provided user with this collection, with the specified kind of role. If the Role object already exists, just return that, without changing anything. :param user: The ``FacilityUser`` to associate with this ``Collection``. :param role_kind: The kind of role to give the user with respect to this ``Collection``. :return: The ``Role`` object (possibly new) that associates the user with the ``Collection``. """ # ensure the specified role kind is valid if role_kind not in (kind[0] for kind in role_kinds.choices): raise InvalidRoleKind("'{role_kind}' is not a valid role kind.".format(role_kind=role_kind)) # ensure the provided user is a FacilityUser if not isinstance(user, FacilityUser): raise UserIsNotFacilityUser("You can only add roles for FacilityUsers.") # create the necessary role, if it doesn't already exist role, created = Role.objects.get_or_create(user=user, collection=self, kind=role_kind) return role def remove_role(self, user, role_kind): """ Remove any ``Role`` objects associating the provided user with this ``Collection``, with the specified kind of role. :param user: The ``FacilityUser`` to dissociate from this ``Collection`` (for the specific role kind). :param role_kind: The kind of role to remove from the user with respect to this ``Collection``. """ # ensure the specified role kind is valid if role_kind not in (kind[0] for kind in role_kinds.choices): raise InvalidRoleKind("'{role_kind}' is not a valid role kind.".format(role_kind=role_kind)) # ensure the provided user is a FacilityUser if not isinstance(user, FacilityUser): raise UserIsNotFacilityUser("You can only remove roles for FacilityUsers.") # make sure the user has the role to begin with if not user.has_role_for_collection(role_kind, self): raise UserDoesNotHaveRoleError("User does not have this role for this collection.") # delete the appropriate role, if it exists results = Role.objects.filter(user=user, collection=self, kind=role_kind).delete() # if no Roles were deleted, the user's role must have been indirect (via the collection hierarchy) if results[0] == 0: raise UserHasRoleOnlyIndirectlyThroughHierarchyError( "Role cannot be removed, as user has it only indirectly, through the collection hierarchy.") def add_member(self, user): """ Create a ``Membership`` associating the provided user with this ``Collection``. If the ``Membership`` object already exists, just return that, without changing anything. :param user: The ``FacilityUser`` to add to this ``Collection``. :return: The ``Membership`` object (possibly new) that associates the user with the ``Collection``. """ # ensure the provided user is a FacilityUser if not isinstance(user, FacilityUser): raise UserIsNotFacilityUser("You can only add memberships for FacilityUsers.") # create the necessary membership, if it doesn't already exist membership, created = Membership.objects.get_or_create(user=user, collection=self) return membership def remove_member(self, user): """ Remove any ``Membership`` objects associating the provided user with this ``Collection``. :param user: The ``FacilityUser`` to remove from this ``Collection``. :return: ``True`` if a ``Membership`` was removed, ``False`` if there was no matching ``Membership`` to remove. """ # ensure the provided user is a FacilityUser if not isinstance(user, FacilityUser): raise UserIsNotFacilityUser("You can only remove memberships for FacilityUsers.") if not user.is_member_of(self): raise UserIsNotMemberError("The user is not a member of the collection, and cannot be removed.") # delete the appropriate membership, if it exists results = Membership.objects.filter(user=user, collection=self).delete() # if no Memberships were deleted, the user's membership must have been indirect (via the collection hierarchy) if results[0] == 0: raise UserIsMemberOnlyIndirectlyThroughHierarchyError( "Membership cannot be removed, as user is a member only indirectly, through the collection hierarchy.") def infer_dataset(self, *args, **kwargs): if self.parent: # subcollections inherit dataset from root of their tree # (we can't call `get_root` directly on self, as it won't work if self hasn't yet been saved) return self.parent.get_root().dataset else: return None # the root node (i.e. Facility) must be explicitly tied to a dataset def __str__(self): return '"{name}" ({kind})'.format(name=self.name, kind=self.kind) @python_2_unicode_compatible class Membership(AbstractFacilityDataModel): """ A ``FacilityUser`` can be marked as a member of a ``Collection`` through a ``Membership`` object. Being a member of a ``Collection`` also means being a member of all the ``Collections`` above that ``Collection`` in the tree (i.e. if you are a member of a ``LearnerGroup``, you are also a member of the ``Classroom`` that contains that ``LearnerGroup``, and of the ``Facility`` that contains that ``Classroom``). """ # Morango syncing settings morango_model_name = "membership" permissions = ( IsOwn(read_only=True) | # users can read their own Memberships RoleBasedPermissions( # Memberships can be read and written by admins, and read by coaches, for the member user target_field="user", can_be_created_by=(role_kinds.ADMIN,), can_be_read_by=(role_kinds.ADMIN, role_kinds.COACH), can_be_updated_by=(), # Membership objects shouldn't be updated; they should be deleted and recreated as needed can_be_deleted_by=(role_kinds.ADMIN,), ) | CoachesCanManageMembershipsForTheirGroups() # Membership can be written by coaches under the coaches' group ) user = models.ForeignKey('FacilityUser', blank=False, null=False) # Note: "It's recommended you use mptt.fields.TreeForeignKey wherever you have a foreign key to an MPTT model. # https://django-mptt.github.io/django-mptt/models.html#treeforeignkey-treeonetoonefield-treemanytomanyfield collection = TreeForeignKey("Collection") class Meta: unique_together = (("user", "collection"),) def calculate_partition(self): return '{dataset_id}:user-ro:{user_id}'.format(dataset_id=self.dataset_id, user_id=self.user_id) def calculate_source_id(self): return '{collection_id}'.format(collection_id=self.collection_id) def infer_dataset(self, *args, **kwargs): user_dataset = self.user.dataset collection_dataset = self.collection.dataset if user_dataset != collection_dataset: raise KolibriValidationError("Collection and user for a Membership object must be in same dataset.") return user_dataset def __str__(self): return "{user}'s membership in {collection}".format(user=self.user, collection=self.collection) @python_2_unicode_compatible class Role(AbstractFacilityDataModel): """ A ``FacilityUser`` can have a role for a particular ``Collection`` through a ``Role`` object, which also stores the "kind" of the ``Role`` (currently, one of "admin" or "coach"). Having a role for a ``Collection`` also implies having that role for all sub-collections of that ``Collection`` (i.e. all the ``Collections`` below it in the tree). """ # Morango syncing settings morango_model_name = "role" permissions = ( IsOwn(read_only=True) | # users can read their own Roles RoleBasedPermissions( # Memberships can be read and written by admins, and read by coaches, for the role collection target_field="collection", can_be_created_by=(role_kinds.ADMIN,), can_be_read_by=(role_kinds.ADMIN, role_kinds.COACH), can_be_updated_by=(), # Role objects shouldn't be updated; they should be deleted and recreated as needed can_be_deleted_by=(role_kinds.ADMIN,), ) ) user = models.ForeignKey('FacilityUser', related_name="roles", blank=False, null=False) # Note: "It's recommended you use mptt.fields.TreeForeignKey wherever you have a foreign key to an MPTT model. # https://django-mptt.github.io/django-mptt/models.html#treeforeignkey-treeonetoonefield-treemanytomanyfield collection = TreeForeignKey("Collection") kind = models.CharField(max_length=20, choices=role_kinds.choices) class Meta: unique_together = (("user", "collection", "kind"),) def calculate_partition(self): return '{dataset_id}:user-ro:{user_id}'.format(dataset_id=self.dataset_id, user_id=self.user_id) def calculate_source_id(self): return '{collection_id}:{kind}'.format(collection_id=self.collection_id, kind=self.kind) def infer_dataset(self, *args, **kwargs): user_dataset = self.user.dataset collection_dataset = self.collection.dataset if user_dataset != collection_dataset: raise KolibriValidationError("The collection and user for a Role object must be in the same dataset.") return user_dataset def __str__(self): return "{user}'s {kind} role for {collection}".format(user=self.user, kind=self.kind, collection=self.collection) # class CollectionProxyManager(models.Manager.from_queryset(SyncableModelQuerySet)): class CollectionProxyManager(models.Manager.from_queryset(SyncableModelQuerySet)): # should this be from_queryset or just MorangoManager def get_queryset(self): return super(CollectionProxyManager, self).get_queryset().filter(kind=self.model._KIND) @python_2_unicode_compatible class Facility(Collection): # don't require that we have a dataset set during validation, so we're not forced to generate one unnecessarily FIELDS_TO_EXCLUDE_FROM_VALIDATION = ["dataset"] morango_model_name = "facility" _KIND = collection_kinds.FACILITY objects = CollectionProxyManager() class Meta: proxy = True @classmethod def get_default_facility(cls): # temporary approach to a default facility; later, we can make this more refined return cls.objects.all().first() def save(self, *args, **kwargs): if self.parent: raise IntegrityError("Facility must be the root of a collection tree, and cannot have a parent.") super(Facility, self).save(*args, **kwargs) def ensure_dataset(self, *args, **kwargs): # if we're just validating, we don't want to trigger creation of a FacilityDataset if kwargs.get("validating"): return super(Facility, self).ensure_dataset(*args, **kwargs) def infer_dataset(self, *args, **kwargs): # if we don't yet have a dataset, create a new one for this facility if not self.dataset_id: self.dataset = FacilityDataset.objects.create() return self.dataset def get_classrooms(self): """ Returns a QuerySet of Classrooms under this Facility. :return: A Classroom QuerySet. """ return Classroom.objects.filter(parent=self) def add_admin(self, user): return self.add_role(user, role_kinds.ADMIN) def add_admins(self, users): return [self.add_admin(user) for user in users] def remove_admin(self, user): self.remove_role(user, role_kinds.ADMIN) def add_coach(self, user): return self.add_role(user, role_kinds.COACH) def add_coaches(self, users): return [self.add_coach(user) for user in users] def remove_coach(self, user): self.remove_role(user, role_kinds.COACH) def __str__(self): return self.name @python_2_unicode_compatible class Classroom(Collection): morango_model_name = "classroom" _KIND = collection_kinds.CLASSROOM objects = CollectionProxyManager() class Meta: proxy = True def save(self, *args, **kwargs): if not self.parent: raise IntegrityError("Classroom cannot be the root of a collection tree, and must have a parent.") super(Classroom, self).save(*args, **kwargs) def get_facility(self): """ Gets the ``Classroom``'s parent ``Facility``. :return: A ``Facility`` instance. """ return Facility.objects.get(id=self.parent_id) def get_learner_groups(self): """ Returns a ``QuerySet`` of ``LearnerGroups`` associated with this ``Classroom``. :return: A ``LearnerGroup`` ``QuerySet``. """ return LearnerGroup.objects.filter(parent=self) def add_admin(self, user): return self.add_role(user, role_kinds.ADMIN) def add_admins(self, users): return [self.add_admin(user) for user in users] def remove_admin(self, user): self.remove_role(user, role_kinds.ADMIN) def add_coach(self, user): return self.add_role(user, role_kinds.COACH) def add_coaches(self, users): return [self.add_coach(user) for user in users] def remove_coach(self, user): self.remove_role(user, role_kinds.COACH) def __str__(self): return self.name @python_2_unicode_compatible class LearnerGroup(Collection): morango_model_name = "learnergroup" _KIND = collection_kinds.LEARNERGROUP objects = CollectionProxyManager() class Meta: proxy = True def save(self, *args, **kwargs): if not self.parent: raise IntegrityError("LearnerGroup cannot be the root of a collection tree, and must have a parent.") super(LearnerGroup, self).save(*args, **kwargs) def get_classroom(self): """ Gets the ``LearnerGroup``'s parent ``Classroom``. :return: A ``Classroom`` instance. """ return Classroom.objects.get(id=self.parent_id) def add_learner(self, user): return self.add_member(user) def add_learners(self, users): return [self.add_learner(user) for user in users] def remove_learner(self, user): return self.remove_member(user) def __str__(self): return self.name
{ "content_hash": "7b1e0df970e21a1aa5470c448592a24e", "timestamp": "", "source": "github", "line_count": 1087, "max_line_length": 144, "avg_line_length": 42.889604415823364, "alnum_prop": 0.6760901739559426, "repo_name": "rtibbles/kolibri", "id": "a45e96a3989e5172e2d50394d15f08111186a721", "size": "46621", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "kolibri/auth/models.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "27623" }, { "name": "HTML", "bytes": "4406" }, { "name": "JavaScript", "bytes": "510659" }, { "name": "Makefile", "bytes": "3914" }, { "name": "Python", "bytes": "664765" }, { "name": "Shell", "bytes": "10337" }, { "name": "Vue", "bytes": "481473" } ], "symlink_target": "" }
class PageViewCount: __slots__ = ('project', 'page_name', 'views', 'bytes_returned') def __init__(self, project, page_name, views, bytes_returned): self.project = str(project) self.page_name = str(page_name) self.views = int(views) self.bytes_returned = int(bytes_returned) @classmethod def from_line(cls, line): project, page_name, views, bytes_returned = line.strip().split(" ") page_name = page_name.split("#")[0] # No anchors return cls(project, page_name, int(views), int(bytes_returned)) def to_line(self): return " ".join([self.project, self.page_name, str(self.views), str(self.bytes_returned)]) def __lt__(self, other): return (self.project, self.page_name) < \ (other.project, other.page_name)
{ "content_hash": "170f04c7368fe9c9862ab7d71f4b70fc", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 75, "avg_line_length": 35.291666666666664, "alnum_prop": 0.5844155844155844, "repo_name": "mediawiki-utilities/python-mwviews", "id": "adee3a9bae8385cead92c919c4c0b012ed6ba1eb", "size": "849", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "src/mwviews/page_view_count.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "24157" } ], "symlink_target": "" }
""" kombu.common ============ Common Utilities. :copyright: (c) 2009 - 2012 by Ask Solem. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import from __future__ import with_statement import socket import sys from collections import defaultdict, deque from functools import partial from itertools import count from . import serialization from .entity import Exchange, Queue from .log import Log from .messaging import Consumer as _Consumer from .utils import uuid __all__ = ["Broadcast", "entry_to_queue", "maybe_declare", "uuid", "itermessages", "send_reply", "isend_reply", "collect_replies", "insured", "ipublish"] declared_entities = defaultdict(lambda: set()) insured_logger = Log("kombu.insurance") class Broadcast(Queue): """Convenience class used to define broadcast queues. Every queue instance will have a unique name, and both the queue and exchange is configured with auto deletion. :keyword name: This is used as the name of the exchange. :keyword queue: By default a unique id is used for the queue name for every consumer. You can specify a custom queue name here. :keyword \*\*kwargs: See :class:`~kombu.entity.Queue` for a list of additional keyword arguments supported. """ def __init__(self, name=None, queue=None, **kwargs): return super(Broadcast, self).__init__( name=queue or "bcast.%s" % (uuid(), ), **dict({"alias": name, "auto_delete": True, "exchange": Exchange(name, type="fanout"), }, **kwargs)) def maybe_declare(entity, channel, retry=False, **retry_policy): if retry: return _imaybe_declare(entity, channel, **retry_policy) return _maybe_declare(entity, channel) def _maybe_declare(entity, channel): declared = declared_entities[channel.connection.client] if not entity.is_bound: entity = entity.bind(channel) if not entity.can_cache_declaration or entity not in declared: entity.declare() declared.add(entity) return True return False def _imaybe_declare(entity, channel, **retry_policy): entity = entity(channel) return channel.connection.client.ensure(entity, _maybe_declare, **retry_policy)(entity, channel) def itermessages(conn, channel, queue, limit=1, timeout=None, Consumer=_Consumer, **kwargs): acc = deque() def on_message(body, message): acc.append((body, message)) with Consumer(channel, [queue], callbacks=[on_message], **kwargs): for _ in eventloop(conn, limit=limit, timeout=timeout, ignore_timeouts=True): try: yield acc.popleft() except IndexError: pass def eventloop(conn, limit=None, timeout=None, ignore_timeouts=False): """Best practice generator wrapper around ``Connection.drain_events``. Able to drain events forever, with a limit, and optionally ignoring timeout errors (a timeout of 1 is often used in environments where the socket can get "stuck", and is a best practice for Kombu consumers). **Examples** ``eventloop`` is a generator:: >>> from kombu.common import eventloop >>> it = eventloop(connection, timeout=1, ignore_timeouts=True) >>> it.next() # one event consumed, or timed out. >>> for _ in eventloop(connection, timeout=1, ignore_timeouts=True): ... pass # loop forever. It also takes an optional limit parameter, and timeout errors are propagated by default:: for _ in eventloop(connection, limit=1, timeout=1): pass .. seealso:: :func:`itermessages`, which is an event loop bound to one or more consumers, that yields any messages received. """ for i in limit and xrange(limit) or count(): try: yield conn.drain_events(timeout=timeout) except socket.timeout: if timeout and not ignore_timeouts: raise except socket.error: pass def send_reply(exchange, req, msg, producer=None, **props): content_type = req.content_type serializer = serialization.registry.type_to_name[content_type] maybe_declare(exchange, producer.channel) producer.publish(msg, exchange=exchange, **dict({"routing_key": req.properties["reply_to"], "correlation_id": req.properties.get("correlation_id"), "serializer": serializer}, **props)) def isend_reply(pool, exchange, req, msg, props, **retry_policy): return ipublish(pool, send_reply, (exchange, req, msg), props, **retry_policy) def collect_replies(conn, channel, queue, *args, **kwargs): no_ack = kwargs.setdefault("no_ack", True) received = False for body, message in itermessages(conn, channel, queue, *args, **kwargs): if not no_ack: message.ack() received = True yield body if received: channel.after_reply_message_received(queue.name) def _ensure_errback(exc, interval): insured_logger.error( "Connection error: %r. Retry in %ss\n" % (exc, interval), exc_info=sys.exc_info()) def revive_connection(connection, channel, on_revive=None): if on_revive: on_revive(channel) def revive_producer(producer, channel, on_revive=None): revive_connection(producer.connection, channel) if on_revive: on_revive(channel) def insured(pool, fun, args, kwargs, errback=None, on_revive=None, **opts): """Ensures function performing broker commands completes despite intermittent connection failures.""" errback = errback or _ensure_errback with pool.acquire(block=True) as conn: conn.ensure_connection(errback=errback) # we cache the channel for subsequent calls, this has to be # reset on revival. channel = conn.default_channel revive = partial(revive_connection, conn, on_revive=on_revive) insured = conn.autoretry(fun, channel, errback=errback, on_revive=revive, **opts) retval, _ = insured(*args, **dict(kwargs, connection=conn)) return retval def ipublish(pool, fun, args=(), kwargs={}, errback=None, on_revive=None, **retry_policy): with pool.acquire(block=True) as producer: errback = errback or _ensure_errback revive = partial(revive_producer, producer, on_revive=on_revive) f = producer.connection.ensure(producer, fun, on_revive=revive, errback=errback, **retry_policy) return f(*args, **dict(kwargs, producer=producer)) def entry_to_queue(queue, **options): binding_key = options.get("binding_key") or options.get("routing_key") e_durable = options.get("exchange_durable") if e_durable is None: e_durable = options.get("durable") e_auto_delete = options.get("exchange_auto_delete") if e_auto_delete is None: e_auto_delete = options.get("auto_delete") q_durable = options.get("queue_durable") if q_durable is None: q_durable = options.get("durable") q_auto_delete = options.get("queue_auto_delete") if q_auto_delete is None: q_auto_delete = options.get("auto_delete") e_arguments = options.get("exchange_arguments") q_arguments = options.get("queue_arguments") b_arguments = options.get("binding_arguments") exchange = Exchange(options.get("exchange"), type=options.get("exchange_type"), delivery_mode=options.get("delivery_mode"), routing_key=options.get("routing_key"), durable=e_durable, auto_delete=e_auto_delete, arguments=e_arguments) return Queue(queue, exchange=exchange, routing_key=binding_key, durable=q_durable, exclusive=options.get("exclusive"), auto_delete=q_auto_delete, no_ack=options.get("no_ack"), queue_arguments=q_arguments, binding_arguments=b_arguments)
{ "content_hash": "7b57fb6680bff260621e0bf89a6753fb", "timestamp": "", "source": "github", "line_count": 250, "max_line_length": 77, "avg_line_length": 33.56, "alnum_prop": 0.6195470798569725, "repo_name": "kumar303/rockit", "id": "f16c38832b0050f116782077291827e5d279384b", "size": "8390", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "vendor-local/kombu/common.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "JavaScript", "bytes": "4587" }, { "name": "Puppet", "bytes": "6677" }, { "name": "Python", "bytes": "4139254" }, { "name": "Ruby", "bytes": "1462" }, { "name": "Shell", "bytes": "3065" } ], "symlink_target": "" }
import os import copy import glob import json import logging from pupa.exceptions import DuplicateItemError from pupa.utils import get_pseudo_id, combine_dicts, utcnow from opencivicdata.models import LegislativeSession from pupa.exceptions import UnresolvedIdError, DataImportError def omnihash(obj): """ recursively hash unhashable objects """ if isinstance(obj, set): return hash(frozenset(omnihash(e) for e in obj)) elif isinstance(obj, (tuple, list)): return hash(tuple(omnihash(e) for e in obj)) elif isinstance(obj, dict): return hash(frozenset((k, omnihash(v)) for k, v in obj.items())) else: return hash(obj) def items_differ(jsonitems, dbitems, subfield_dict): """ check whether or not jsonitems and dbitems differ """ # short circuit common cases if len(jsonitems) == len(dbitems) == 0: # both are empty return False elif len(jsonitems) != len(dbitems): # if lengths differ, they're definitely different return True jsonitems = copy.deepcopy(jsonitems) keys = jsonitems[0].keys() # go over dbitems looking for matches for dbitem in dbitems: match = None for i, jsonitem in enumerate(jsonitems): # check if all keys (excluding subfields) match for k in keys: if k not in subfield_dict and getattr(dbitem, k) != jsonitem.get(k, None): break else: # all fields match so far, possibly equal, just check subfields now for k in subfield_dict: jsonsubitems = jsonitem[k] dbsubitems = list(getattr(dbitem, k).all()) if items_differ(jsonsubitems, dbsubitems, subfield_dict[k][2]): break else: # these items are equal, so let's mark it for removal match = i break if match is not None: # item exists in both, remove from jsonitems jsonitems.pop(match) else: # exists in db but not json return True # if we get here, jsonitems has to be empty because we asserted that the length was # the same and we found a match for each thing in dbitems, here's a safety check just in case if jsonitems: # pragma: no cover return True return False class BaseImporter(object): """ BaseImporter Override: get_object(data) limit_spec(spec) [optional, required if pseudo_ids are used] prepare_for_db(data) [optional] postimport() [optional] """ _type = None model_class = None related_models = {} preserve_order = set() def __init__(self, jurisdiction_id): self.jurisdiction_id = jurisdiction_id self.json_to_db_id = {} self.json_to_sources = {} self.duplicates = {} self.pseudo_id_cache = {} self.session_cache = {} self.logger = logging.getLogger("pupa") self.info = self.logger.info self.debug = self.logger.debug self.warning = self.logger.warning self.error = self.logger.error self.critical = self.logger.critical def get_session_id(self, identifier): if identifier not in self.session_cache: self.session_cache[identifier] = LegislativeSession.objects.get( identifier=identifier, jurisdiction_id=self.jurisdiction_id).id return self.session_cache[identifier] # no-ops to be overriden def prepare_for_db(self, data): return data def postimport(self): pass def resolve_json_id(self, json_id): """ Given an id found in scraped JSON, return a DB id for the object. params: json_id: id from json returns: database id raises: ValueError if id couldn't be resolved """ if not json_id: return None if json_id.startswith('~'): # keep caches of all the pseudo-ids to avoid doing 1000s of lookups during import if json_id not in self.pseudo_id_cache: spec = get_pseudo_id(json_id) spec = self.limit_spec(spec) try: self.pseudo_id_cache[json_id] = self.model_class.objects.get(**spec).id except self.model_class.DoesNotExist: raise UnresolvedIdError('cannot resolve pseudo id to {}: {}'.format( self.model_class.__name__, json_id)) except self.model_class.MultipleObjectsReturned: raise UnresolvedIdError( 'multiple objects returned for pseudo id to {}: {}'.format( self.model_class.__name__, json_id) ) # return the cached object return self.pseudo_id_cache[json_id] # get the id that the duplicate points to, or use self json_id = self.duplicates.get(json_id, json_id) try: return self.json_to_db_id[json_id] except KeyError: raise UnresolvedIdError('cannot resolve id: {}'.format(json_id)) def import_directory(self, datadir): """ import a JSON directory into the database """ def json_stream(): # load all json, mapped by json_id for fname in glob.glob(os.path.join(datadir, self._type + '_*.json')): with open(fname) as f: yield json.load(f) return self.import_data(json_stream()) def _prepare_imports(self, dicts): """ filters the import stream to remove duplicates also serves as a good place to override if anything special has to be done to the order of the import stream (see OrganizationImporter) """ # hash(json): id seen_hashes = {} for data in dicts: json_id = data.pop('_id') # map duplicates (using omnihash to tell if json dicts are identical-ish) objhash = omnihash(data) if objhash not in seen_hashes: seen_hashes[objhash] = json_id yield json_id, data else: self.duplicates[json_id] = seen_hashes[objhash] def import_data(self, data_items): """ import a bunch of dicts together """ # keep counts of all actions record = { 'insert': 0, 'update': 0, 'noop': 0, 'start': utcnow(), 'records': { 'insert': [], 'update': [], 'noop': [], } } for json_id, data in self._prepare_imports(data_items): if data.get('source_identified', False): data_sources = set([(s['url'], s.get('note', '')) for s in data['sources']]) obj_id, what = self.import_item(data) self.json_to_db_id[json_id] = obj_id if data.get('source_identified', False): self.json_to_sources[json_id] = data_sources record['records'][what].append(obj_id) record[what] += 1 # all objects are loaded, a perfect time to do inter-object resolution and other tasks self.postimport() record['end'] = utcnow() return {self._type: record} def import_item(self, data): """ function used by import_data """ what = 'noop' # remove the JSON _id (may still be there if called directly) data.pop('_id', None) # add fields/etc. data = self.prepare_for_db(data) try: obj = self.get_object(data) except self.model_class.DoesNotExist: obj = None # pull related fields off related = {} for field in self.related_models: related[field] = data.pop(field) # obj existed, check if we need to do an update if obj: _matched_obj_data = copy.deepcopy(obj.__dict__) if obj.id in self.json_to_db_id.values(): if data.get('source_identified', False): obj_sources = set([(s.url, s.note) for s in obj.sources.all()]) _matched_obj_data['sources'] = copy.deepcopy(obj_sources) possible_dupes = [k for k, v in self.json_to_db_id.items() if v == obj.id] for pd in possible_dupes: pd_sources = self.json_to_sources[pd] if len(pd_sources & obj_sources) > 0: raise DuplicateItemError(data, obj) else: raise DuplicateItemError(data, obj) # check base object for changes for key, value in data.items(): obj_value = getattr(obj, key) if key == "extras": obj_value = json.loads(obj_value) if obj_value != value: new_extras = combine_dicts(value, obj_value) if new_extras != obj_value: setattr(obj, key, new_extras) what = 'update' elif key == "name" and ('other_names' in related): existing_names = [oname.name for oname in obj.other_names.all()] existing_names.append(obj_value) if value in existing_names: continue else: if obj_value != value: self.debug('differing property: {k} ({v})'.format(k=key, v=getattr(obj, key))) self.debug('new value: {v}'.format(v=value)) setattr(obj, key, value) what = 'update' if what == 'update': obj.save() updated = self._update_related(obj, related, self.related_models) if updated: what = 'update' # need to create the data else: what = 'insert' try: obj = self.model_class.objects.create(**data) except TypeError as e: raise DataImportError('{} while importing {} as {}'.format(e, data, self.model_class)) self._create_related(obj, related, self.related_models) return obj.id, what def _update_related(self, obj, related, subfield_dict): """ update DB objects related to a base object obj: a base object to create related related: dict mapping field names to lists of related objects subfield_list: where to get the next layer of subfields """ # keep track of whether or not anything was updated updated = False # for each related field - check if there are differences for field, items in related.items(): # get items from database dbitems = list(getattr(obj, field).all()) dbitems_count = len(dbitems) # default to doing nothing do_delete = do_update = False if items and dbitems_count: # we have items, so does db, check for conflict do_delete = do_update = items_differ(items, dbitems, subfield_dict[field][2]) elif items and not dbitems_count: # we have items, db doesn't, just update do_update = True elif not items and dbitems_count: # db has items, we don't, just delete do_delete = True # otherwise: no items or dbitems, so nothing is done if do_delete: updated = True getattr(obj, field).all().delete() if do_update: updated = True self._create_related(obj, {field: items}, subfield_dict) return updated def _create_related(self, obj, related, subfield_dict): """ create DB objects related to a base object obj: a base object to create related related: dict mapping field names to lists of related objects subfield_list: where to get the next layer of subfields """ for field, items in related.items(): subobjects = [] all_subrelated = [] Subtype, reverse_id_field, subsubdict = subfield_dict[field] for order, item in enumerate(items): # pull off 'subrelated' (things that are related to this obj) subrelated = {} for subfield in subsubdict: subrelated[subfield] = item.pop(subfield) if field in self.preserve_order: item['order'] = order item[reverse_id_field] = obj.id try: subobjects.append(Subtype(**item)) all_subrelated.append(subrelated) except Exception as e: raise DataImportError('{} while importing {} as {}'.format(e, item, Subtype)) # add all subobjects at once (really great for actions & votes) try: Subtype.objects.bulk_create(subobjects) except Exception as e: raise DataImportError('{} while importing {} as {}'.format(e, subobjects, Subtype)) # after import the subobjects, import their subsubobjects for subobj, subrel in zip(subobjects, all_subrelated): self._create_related(subobj, subrel, subsubdict)
{ "content_hash": "a89b036a9d591135a7165a0249d141c0", "timestamp": "", "source": "github", "line_count": 368, "max_line_length": 99, "avg_line_length": 37.87771739130435, "alnum_prop": 0.5343998852141474, "repo_name": "influence-usa/pupa", "id": "bcae2a113fea3cdd9f3c7f6db4308e862d98287c", "size": "13939", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pupa/importers/base.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "240929" }, { "name": "Shell", "bytes": "90" } ], "symlink_target": "" }
""" IxNetwork package tests that require actual IxNetwork chassis and active ports. Note that in many places there are (relatively) long delays to make sure the tests work in all setups. Test setup: Two IXN ports connected back to back. @author [email protected] """ import json import time from ixnetwork.test.test_base import IxnTestBase from ixnetwork.ixn_statistics_view import IxnPortStatistics, IxnTrafficItemStatistics, IxnFlowStatistics class IxnTestOnline(IxnTestBase): ports = [] def testReservePorts(self): self._reserve_ports('test_config', wait_for_up=False) def testPortsOnline(self): self._reserve_ports('test_config') for port in self.ports: assert(port.is_online()) for port in self.ports: port.release() def testReload(self): self._reserve_ports('test_config') for port in self.ports: port.release() self.ixn.root.get_object_by_name('Port 2').reserve(self.config.get('IXN', 'port1')) self.ixn.root.get_object_by_name('Port 1').reserve(self.config.get('IXN', 'port2')) self._reserve_ports('test_config') def testReleasePorts(self): self._reserve_ports('test_config') for port in self.ports: port.release() def testInterfaces(self): self._reserve_ports('test_config') for port in self.ports: port.send_arp_ns() for interface in port.get_children('interface'): gateway = interface.get_child('ipv4', 'ipv6').get_attribute('gateway') interface.ping(gateway) def testProtocolsActions(self): self._reserve_ports('test_config') self.ixn.send_arp_ns() self.ixn.protocols_start() time.sleep(16) self.ixn.protocols_stop() time.sleep(16) self.ixn.protocol_start('ospf') time.sleep(16) self.ixn.protocol_stop('ospf') def testGUITraffic(self): # Sometimes ARP fails on IxVM? To be sure, send automatic ARP (seems more stable...) self._reserve_ports('test_config_arp_on_link_up') self.ixn.regenerate() self.ixn.traffic_apply() self.ixn.l23_traffic_start() time.sleep(8) self.ixn.l23_traffic_stop() port_stats = IxnPortStatistics(self.ixn.root) port_stats.read_stats() print(json.dumps(port_stats.get_all_stats(), indent=1)) print(json.dumps(port_stats.get_object_stats('Port 1'), indent=1)) assert(int(port_stats.get_stat('Port 1', 'Frames Tx.')) >= 1600) self.ixn.l23_traffic_start(blocking=True) ti_stats = IxnTrafficItemStatistics(self.ixn.root) ti_stats.read_stats() assert(int(ti_stats.get_object_stats('Traffic Item 1')['Tx Frames']) == 1600) flow_stats = IxnFlowStatistics(self.ixn.root) flow_stats.read_stats() assert(int(flow_stats.get_stat('Port 2/Port 1/Traffic Item 1', 'Tx Frames')) == 800) def testNgpf(self): self._reserve_ports('ngpf_config') topologies = self.ixn.root.get_children('topology') self.ixn.protocols_start() time.sleep(8) assert(topologies[0].get_attribute('status') == 'started') self.ixn.protocols_stop() time.sleep(2) assert(topologies[0].get_attribute('status') == 'notStarted') # No need to test since protocol start/stop methods will raise exception if the operation failed. topologies[0].start() topologies[1].start() topologies[0].stop() topologies[1].stop() device_group = topologies[0].get_child('deviceGroup') device_group.start() device_group.stop() ethernet = device_group.get_child('ethernet') ethernet.start() ethernet.stop() def _reserve_ports(self, config_file, wait_for_up=True): self._load_config(config_file) self.ports = self.ixn.root.get_children('vport') self.ixn.root.get_object_by_name('Port 1').reserve(self.config.get('IXN', 'port1'), wait_for_up=False) self.ixn.root.get_object_by_name('Port 2').reserve(self.config.get('IXN', 'port2'), wait_for_up=False) if wait_for_up: for port in self.ports: port.wait_for_up(60)
{ "content_hash": "5aaf2041808646494abe2129677ec4b8", "timestamp": "", "source": "github", "line_count": 121, "max_line_length": 110, "avg_line_length": 35.50413223140496, "alnum_prop": 0.6277932960893855, "repo_name": "shmir/IxNetwork", "id": "a2df99880b661f904e108e5bda5a01ebc756b586", "size": "4296", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "ixnetwork/test/test_online.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "54593" } ], "symlink_target": "" }
import eventlet import errno import imp import logging import os import os.path import sys from daemon.daemon import DaemonContext from daemon.runner import DaemonRunner, make_pidlockfile from django.conf import settings as django_settings from django.core.management import call_command from eventlet import wsgi from optparse import OptionParser from sentry import VERSION def settings_from_file(filename, silent=False): """ Configures django settings from an arbitrary (non sys.path) filename. """ mod = imp.new_module('config') mod.__file__ = filename try: execfile(filename, mod.__dict__) except IOError, e: if silent and e.errno in (errno.ENOENT, errno.EISDIR): return False e.strerror = 'Unable to load configuration file (%s)' % e.strerror raise tuple_settings = ("INSTALLED_APPS", "TEMPLATE_DIRS") if not django_settings.configured: django_settings.configure() for setting in dir(mod): if setting == setting.upper(): setting_value = getattr(mod, setting) if setting in tuple_settings and type(setting_value) == str: setting_value = (setting_value,) # In case the user forgot the comma. setattr(django_settings, setting, setting_value) class SentryServer(DaemonRunner): pidfile_timeout = 10 start_message = u"started with pid %(pid)d" def __init__(self, host=None, port=None, pidfile=None, logfile=None, daemonize=False, debug=False): from sentry.conf import settings if not logfile: logfile = settings.WEB_LOG_FILE logfile = os.path.realpath(logfile) pidfile = os.path.realpath(pidfile or settings.WEB_PID_FILE) if daemonize: detach_process = True else: detach_process = False self.daemon_context = DaemonContext(detach_process=detach_process) self.daemon_context.stdout = open(logfile, 'w+') self.daemon_context.stderr = open(logfile, 'w+', buffering=0) self.debug = debug self.pidfile = make_pidlockfile(pidfile, self.pidfile_timeout) self.daemon_context.pidfile = self.pidfile self.host = host or settings.WEB_HOST self.port = port or settings.WEB_PORT # HACK: set app to self so self.app.run() works self.app = self def execute(self, action): self.action = action # Upgrade needs to happen before forking upgrade() if self.daemon_context.detach_process is False and self.action == 'start': # HACK: self.run() else: self.do_action() def run(self): from sentry.wsgi import application def inner_run(): wsgi.server(eventlet.listen((self.host, self.port)), application) if self.debug: from django.utils import autoreload autoreload.main(inner_run) else: inner_run() def cleanup(days=30, logger=None, site=None, server=None, level=None): """ Deletes a portion of the trailing data in Sentry based on their creation dates. For example, if ``days`` is 30, this would attempt to clean up all data thats older than 30 days. :param logger: limit all deletion scopes to messages from the specified logger. :param site: limit the message deletion scope to the specified site. :param server: limit the message deletion scope to the specified server. :param level: limit all deleteion scopes to messages that are greater than or equal to level. """ # TODO: we should collect which messages above were deleted # and potentially just send out post_delete signals where # GroupedMessage can update itself accordingly from sentry.models import GroupedMessage, Message, MessageCountByMinute, \ MessageFilterValue, FilterValue from sentry.utils.query import RangeQuerySetWrapper, SkinnyQuerySet import datetime ts = datetime.datetime.now() - datetime.timedelta(days=days) # Message qs = SkinnyQuerySet(Message).filter(datetime__lte=ts) if logger: qs = qs.filter(logger=logger) if site: qs = qs.filter(site=site) if server: qs = qs.filter(server_name=server) if level: qs = qs.filter(level__gte=level) groups_to_check = set() for obj in RangeQuerySetWrapper(qs): print ">>> Removing <%s: id=%s>" % (obj.__class__.__name__, obj.pk) obj.delete() groups_to_check.add(obj.group_id) if not (server or site): # MessageCountByMinute qs = SkinnyQuerySet(MessageCountByMinute).filter(date__lte=ts) if logger: qs = qs.filter(group__logger=logger) if level: qs = qs.filter(group__level__gte=level) for obj in RangeQuerySetWrapper(qs): print ">>> Removing <%s: id=%s>" % (obj.__class__.__name__, obj.pk) obj.delete() # GroupedMessage qs = SkinnyQuerySet(GroupedMessage).filter(last_seen__lte=ts) if logger: qs = qs.filter(logger=logger) if level: qs = qs.filter(level__gte=level) for obj in RangeQuerySetWrapper(qs): for key, value in SkinnyQuerySet(MessageFilterValue).filter(group=obj).values_list('key', 'value'): if not MessageFilterValue.objects.filter(key=key, value=value).exclude(group=obj).exists(): print ">>> Removing <FilterValue: key=%s, value=%s>" % (key, value) FilterValue.objects.filter(key=key, value=value).delete() print ">>> Removing <%s: id=%s>" % (obj.__class__.__name__, obj.pk) obj.delete() # attempt to cleanup any groups that may now be empty groups_to_delete = [] for group_id in groups_to_check: if not Message.objects.filter(group=group_id).exists(): groups_to_delete.append(group_id) if groups_to_delete: for obj in SkinnyQuerySet(GroupedMessage).filter(pk__in=groups_to_delete): for key, value in SkinnyQuerySet(MessageFilterValue).filter(group=obj).values_list('key', 'value'): if not MessageFilterValue.objects.filter(key=key, value=value).exclude(group=obj).exists(): print ">>> Removing <FilterValue: key=%s, value=%s>" % (key, value) FilterValue.objects.filter(key=key, value=value).delete() print ">>> Removing <%s: id=%s>" % (obj.__class__.__name__, obj.pk) obj.delete() def upgrade(interactive=True): from sentry.conf import settings call_command('syncdb', database=settings.DATABASE_USING or 'default', interactive=interactive) if 'south' in django_settings.INSTALLED_APPS: call_command('migrate', database=settings.DATABASE_USING or 'default', interactive=interactive) def main(): command_list = ('start', 'stop', 'restart', 'cleanup', 'upgrade') args = sys.argv if len(args) < 2 or args[1] not in command_list: print "usage: sentry [command] [options]" print print "Available subcommands:" for cmd in command_list: print " ", cmd sys.exit(1) parser = OptionParser(version="%%prog %s" % VERSION) parser.add_option('--config', metavar='CONFIG') if args[1] == 'start': parser.add_option('--host', metavar='HOSTNAME') parser.add_option('--port', type=int, metavar='PORT') parser.add_option('--daemon', action='store_true', default=False, dest='daemonize') parser.add_option('--no-daemon', action='store_false', default=False, dest='daemonize') parser.add_option('--debug', action='store_true', default=False, dest='debug') parser.add_option('--pidfile', dest='pidfile') parser.add_option('--logfile', dest='logfile') elif args[1] == 'stop': parser.add_option('--pidfile', dest='pidfile') parser.add_option('--logfile', dest='logfile') elif args[1] == 'cleanup': parser.add_option('--days', default='30', type=int, help='Numbers of days to truncate on.') parser.add_option('--logger', help='Limit truncation to only entries from logger.') parser.add_option('--site', help='Limit truncation to only entries from site.') parser.add_option('--server', help='Limit truncation to only entries from server.') parser.add_option('--level', help='Limit truncation to only entries greater than or equal to level (e.g. DEBUG).') (options, args) = parser.parse_args() # Install default server values if not django_settings.configured: os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry.conf.server' if options.config: # assumed to be a file settings_from_file(options.config) else: config_path = os.path.expanduser(os.path.join('~', '.sentry', 'sentry.conf.py')) if os.path.exists(config_path): settings_from_file(config_path) if getattr(options, 'debug', False): django_settings.DEBUG = True if args[0] == 'upgrade': upgrade() elif args[0] == 'start': app = SentryServer(host=options.host, port=options.port, pidfile=options.pidfile, logfile=options.logfile, daemonize=options.daemonize, debug=options.debug) app.execute(args[0]) elif args[0] == 'restart': app = SentryServer() app.execute(args[0]) elif args[0] == 'stop': app = SentryServer(pidfile=options.pidfile, logfile=options.logfile) app.execute(args[0]) elif args[0] == 'cleanup': level = options.level if level is not None and not level.isdigit(): level = getattr(logging, level.upper()) cleanup(days=options.days, logger=options.logger, site=options.site, server=options.server, level=level) sys.exit(0) if __name__ == '__main__': main()
{ "content_hash": "a69b71ed896298caf1046b66091fae26", "timestamp": "", "source": "github", "line_count": 276, "max_line_length": 111, "avg_line_length": 37.242753623188406, "alnum_prop": 0.609981515711645, "repo_name": "m0sth8/django-sentry", "id": "a2b9c3fcf925b0f3e4fac62ea181de530c2b979d", "size": "10301", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "sentry/scripts/runner.py", "mode": "33188", "license": "bsd-3-clause", "language": [], "symlink_target": "" }
from collections import OrderedDict import copy import pytest from zenconf import MergedConfig, walk_recursive class TestMergedConfig(object): PREFIX = "TEST" DEFAULTS = { 'logging': { 'version': 1, 'loggers': { 'MYAPP': { '-handlers': ['syslog', 'stderr'], # leading underscore # should be stripped 'propagate': True, 'log_level': 'DEBUG', }, } }, 'list_items': [ { 'mything': { 'sources': [ 'source1', 'source2' ] } }, { 'ANOTHER_THING': { 'sources': [ 'another-source1' ] } } ] } # nested ordered dicts for where order is important ORDERED_DEFAULTS = OrderedDict([ ('logging', OrderedDict([ ('version', 1), ('handlers', OrderedDict([ ('syslog', OrderedDict([ ('level', 'DEBUG'), ('class', 'logging.handlers.SysLogHandler'), ('address', '/dev/log'), ('formatter', 'verbose') ])), ('stderr', OrderedDict([ ('level', 'DEBUG'), ('class', 'logging.StreamHandler'), ('formatter', 'verbose') ])) ]) ), ('loggers', OrderedDict([ # renamed the following to lowercase for simplicity in the # ordering test ('myapp', OrderedDict([ ('handlers', ['syslog', 'stderr']), ('propagate', True), ('log_level', 'DEBUG'), ]) )]) )]) ) ]) ENV_VARS = { PREFIX + "_LOGGING__LOGGERS__MYAPP__LOG_LEVEL": "INFO", # should take # precedence "LOGGING__VERSION": 2 # No prefix so # should be ignored } CLI_OPTS = { "--logging--loggers--myapp--propagate": False } @pytest.fixture def merged_config(self): """ Returns an initialised MergedConfig instance :return: """ merged_config = MergedConfig(app_name=TestMergedConfig.PREFIX) return merged_config def test_initialisation(self, merged_config): assert merged_config._app_name.endswith('_') def test_walk_recursive(self, merged_config): result = walk_recursive( lambda k: str.lower(k), TestMergedConfig.DEFAULTS) def assert_lowercase_keys(item): if isinstance(item, list): for i in item: assert_lowercase_keys(i) elif isinstance(item, dict): for k, v in item.iteritems(): assert k == k.lower() if isinstance(v, dict) or isinstance(v, list): assert_lowercase_keys(v) assert_lowercase_keys(result) def test_add(self, merged_config): """ Test added dicts are correctly (recursively) normalised. :param merged_config: :return: """ merged_config.add(TestMergedConfig.DEFAULTS) merged_config.add(TestMergedConfig.ENV_VARS, strip_app_name=True, filter_by_app_name=True) merged_config.add(TestMergedConfig.CLI_OPTS, strip_app_name=True) def assert_valid_keys(item): for k, v in item.iteritems(): assert k == k.lower() assert '-' not in k assert not k.startswith('_') if isinstance(v, dict): assert_valid_keys(v) for config in merged_config._sources: assert_valid_keys(config) def test_merged_config(self, merged_config): """ The get_merged_config function is so small it's what we'd use to test merge_dict, so might as well save the boilerplate and just test via get_merged_config. :param merged_config: :return: """ merged_config.add(TestMergedConfig.DEFAULTS) merged_config.add(TestMergedConfig.ENV_VARS, strip_app_name=True, filter_by_app_name=True) merged_config.add(TestMergedConfig.CLI_OPTS, strip_app_name=True) config = merged_config.get_merged_config() assert config['logging']['version'] == 1 assert config['logging']['loggers']['myapp']['log_level'] == "INFO" assert not config['logging']['loggers']['myapp']['propagate'] assert len(config['logging']['loggers']['myapp']['handlers']) == 2 def test_dict_ordering(self, merged_config): """ Test that ordering is preserved in dictionaries supplied to MergedConfig :param merged_config: :return: """ merged_config.add(TestMergedConfig.ORDERED_DEFAULTS) merged_config.add(TestMergedConfig.ENV_VARS, strip_app_name=True, filter_by_app_name=True) merged_config.add(TestMergedConfig.CLI_OPTS, strip_app_name=True) config = merged_config.get_merged_config() expected_config = copy.deepcopy(TestMergedConfig.ORDERED_DEFAULTS) expected_config['logging']['loggers']['myapp']['propagate'] = False expected_config['logging']['loggers']['myapp']['log_level'] = "INFO" assert config == expected_config def test_key_normalisation_function(self, merged_config): """ Test that a custom key normalisation function will be applied :param merged_config: :return: """ upper_dict = { 'key_1': 1, 'KEY_2': 2 } merged_config.add(upper_dict, key_normalisation_func=lambda k: str.upper(k)) config = merged_config.get_merged_config() assert 'KEY_1' in config assert 'KEY_2' in config # def test_dont_clobber_existing_data(self, merged_config): # """ # Test that we don't clobber existing list entries # # :param merged_config: # :return: # """ # merged_config.add(TestMergedConfig.DEFAULTS) # # # I think the way to do this is to add an option to merge_dict that # # will look up key values in list items if the current item is a list. # # there will still be unsupported edge cases, but this should work for # # most uses... # merged_config.add({"LIST_ITEMS__MYTHING__SOURCES": "newname"}) # # config = merged_config.get_merged_config() # # print config # # # assert config['list_items'][0]['mything']['name'] == 'newname' # # assert config['list_items'][1]['another_thing']['name'] == 'name2'
{ "content_hash": "ec9de1b0b3781dfa6088e0316a3b6360", "timestamp": "", "source": "github", "line_count": 222, "max_line_length": 80, "avg_line_length": 33.153153153153156, "alnum_prop": 0.4922554347826087, "repo_name": "nws-cip/zenconf", "id": "0d8990eac10413a98279b94089f65ca7d3b0b87a", "size": "7360", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_merged_config.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "19403" } ], "symlink_target": "" }
"""List changes starred by one's account""" # pylint: disable=invalid-name import argparse import logging from libpycr.exceptions import PyCRError from libpycr.gerrit.client import Gerrit from libpycr.meta import GerritAccountBuiltin from libpycr.pager import Pager from libpycr.utils.commandline import expect_account_as_positional from libpycr.utils.output import Formatter, NEW_LINE from libpycr.utils.system import fail class LsStarred(GerritAccountBuiltin): """Implement the LS-STARRED command""" # Logger for this command log = logging.getLogger(__name__) @property def name(self): return 'ls-starred' @property def description(self): return 'list starred changes' @staticmethod def tokenize(idx, change): """Token generator for the output Yields a stream of tokens: tuple of (Token, string). :param idx: index of the change in the list of changes to fetch :type idx: int :param change: the change :type change: ChangeInfo :yield: tuple[Token, str] """ if idx: yield NEW_LINE for token in change.tokenize(): yield token @staticmethod def parse_command_line(arguments): """Parse the LS-STARRED command command-line arguments Returns the account id that is provided on the command line. If no account is provided, returns None. :param arguments: a list of command-line arguments to parse :type arguments: list[str] :rtype: str """ parser = argparse.ArgumentParser( description='List account starred changes') expect_account_as_positional(parser) cmdline = parser.parse_args(arguments) # fetch changes details return cmdline.account def run(self, arguments, *args, **kwargs): account_id = self.parse_command_line(arguments) try: changes = Gerrit.get_starred_changes(account_id or 'self') except PyCRError as why: fail('cannot list account starred changes', why) with Pager(command=self.name): for idx, change in enumerate(changes): print Formatter.format(self.tokenize(idx, change))
{ "content_hash": "e92ed772a08589d88b2dc74748928c99", "timestamp": "", "source": "github", "line_count": 82, "max_line_length": 74, "avg_line_length": 27.670731707317074, "alnum_prop": 0.6535918907007492, "repo_name": "JcDelay/pycr", "id": "53187548a310b4b3c0356f53dfc466d1f55efafd", "size": "2269", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "libpycr/builtin/accounts/ls-starred.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "136454" } ], "symlink_target": "" }
from module_kits.vtk_kit.utils import DVOrientationWidget import operator import vtk import wx class OverlaySliceViewer: """Class for viewing 3D binary masks in a slice-view. Supports arbitrary number of overlays in user-definable colours. """ has_active_slices = False def __init__(self, rwi, renderer): self.rwi = rwi self.renderer = renderer istyle = vtk.vtkInteractorStyleTrackballCamera() rwi.SetInteractorStyle(istyle) # we unbind the existing mousewheel handler so it doesn't # interfere rwi.Unbind(wx.EVT_MOUSEWHEEL) rwi.Bind(wx.EVT_MOUSEWHEEL, self._handler_mousewheel) #This is a collection of 1- or 3-component image plane widgets. Each entry corresponds to a single overlay. self.ipw_triads = {} self.add_overlay(0, [0, 0, 0, 0.1]) #Almost-transparent black - for showing the pickable plane stored at id = 0. # we only set the picker on the visible IPW, else the # invisible IPWs block picking! self.picker = vtk.vtkCellPicker() self.picker.SetTolerance(0.005) self.ipw_triads[0][0].SetPicker(self.picker) self.outline_source = vtk.vtkOutlineCornerFilter() m = vtk.vtkPolyDataMapper() m.SetInput(self.outline_source.GetOutput()) a = vtk.vtkActor() a.SetMapper(m) a.PickableOff() self.outline_actor = a self.dv_orientation_widget = DVOrientationWidget(rwi) # this can be used by clients to store the current world # position self.current_world_pos = (0,0,0) self.current_index_pos = (0,0,0) def add_overlay(self, id, rgba_colour): """Creates and ads a new (set of) image plane widgets corresponding to a new overlay. id : the string id which will be used to identify this overlay for future lookups. rgba_colour : a length 4 vector giving the red,green,blue,opacity value for this overlay. Range = [0,1] """ if self.ipw_triads.has_key(id): raise ValueError('The overlay id = "%s" is already in use! Cannot this id - aborting.' % id) else: new_ipw_triad = [vtk.vtkImagePlaneWidget() for _ in range(3)] lut = new_ipw_triad[0].GetLookupTable() lut.SetNumberOfTableValues(2) if len(self.ipw_triads) == 0: lut.SetTableValue(0,0,0,0,0.1) #Almost-transparent black - for showing the pickable plane else: lut.SetTableValue(0,0,0,0,0) #Transparent: for non-interfering overlay on existing layers lut.SetTableValue(1,rgba_colour[0],rgba_colour[1],rgba_colour[2],rgba_colour[3]) #Specified RGBA for binary "true" lut.Build() for ipw in new_ipw_triad: ipw.SetInteractor(self.rwi) ipw.SetLookupTable(lut) self.ipw_triads[id] = new_ipw_triad base_ipw_triad = self.ipw_triads[0] # now actually connect the sync_overlay observer for i,ipw in enumerate(base_ipw_triad): ipw.AddObserver('InteractionEvent',lambda vtk_o, vtk_e, i=i: self.observer_sync_overlay(base_ipw_triad, new_ipw_triad, i)) #fmalan-edit based on nnsmit-edit def observer_sync_overlay(self, master_ipw_triad, slave_ipw_triad, ipw_idx): # get the primary IPW master_ipw = master_ipw_triad[ipw_idx] # get the overlay IPW slave_ipw = slave_ipw_triad[ipw_idx] # get plane geometry from primary o,p1,p2 = master_ipw.GetOrigin(),master_ipw.GetPoint1(),master_ipw.GetPoint2() # and apply to the overlay slave_ipw.SetOrigin(o) slave_ipw.SetPoint1(p1) slave_ipw.SetPoint2(p2) slave_ipw.UpdatePlacement() # end edit def close(self): for id in self.ipw_triads.keys(): self.set_input(id, None) self.dv_orientation_widget.close() def activate_slice(self, id, idx): if idx in [1,2]: self.ipw_triads[id][idx].SetEnabled(1) self.ipw_triads[id][idx].SetPicker(self.picker) def deactivate_slice(self, id, idx): if idx in [1,2]: self.ipw_triads[id][idx].SetEnabled(0) self.ipw_triads[id][idx].SetPicker(None) def _get_input(self, id): return self.ipw_triads[id].GetInput() def get_world_pos(self, image_pos): """Given image coordinates, return the corresponding world position. """ idata = self._get_input(0) if not idata: return None ispacing = idata.GetSpacing() iorigin = idata.GetOrigin() # calculate real coords world = map(operator.add, iorigin, map(operator.mul, ispacing, image_pos[0:3])) return world def set_perspective(self): cam = self.renderer.GetActiveCamera() cam.ParallelProjectionOff() def set_parallel(self): cam = self.renderer.GetActiveCamera() cam.ParallelProjectionOn() def _handler_mousewheel(self, event): # event.GetWheelRotation() is + or - 120 depending on # direction of turning. if event.ControlDown(): delta = 10 elif event.ShiftDown(): delta = 1 else: # if user is NOT doing shift / control, we pass on to the # default handling which will give control to the VTK # mousewheel handlers. self.rwi.OnMouseWheel(event) return if event.GetWheelRotation() > 0: self._ipw1_delta_slice(+delta) else: self._ipw1_delta_slice(-delta) self.render() for id in self.ipw_triads.keys(): self.ipw_triads[id][0].InvokeEvent('InteractionEvent') def _ipw1_delta_slice(self, delta): """Move to the delta slices fw/bw, IF the IPW is currently aligned with one of the axes. """ ipw = self.ipw_triads[0][0] if ipw.GetPlaneOrientation() < 3: ci = ipw.GetSliceIndex() ipw.SetSliceIndex(ci + delta) def render(self): self.rwi.GetRenderWindow().Render() #TODO: Check this code # nnsmit edit # synch those overlays: ''' if self.overlay_active == 1: for i, ipw_overlay in enumerate(self.overlay_ipws): self.observer_sync_overlay(self.ipw_triads, i, 0) self.observer_sync_overlay(self.ipw_triads, i, 1) self.observer_sync_overlay(self.ipw_triads, i, 2) ''' # end edit def reset_camera(self): self.renderer.ResetCamera() def reset_to_default_view(self, view_index): """ @param view_index 2 for XY """ if view_index == 2: cam = self.renderer.GetActiveCamera() # then make sure it's up is the right way cam.SetViewUp(0,1,0) # just set the X,Y of the camera equal to the X,Y of the # focal point. fp = cam.GetFocalPoint() cp = cam.GetPosition() if cp[2] < fp[2]: z = fp[2] + (fp[2] - cp[2]) else: z = cp[2] cam.SetPosition(fp[0], fp[1], z) # first reset the camera self.renderer.ResetCamera() ''' # nnsmit edit # synch overlays as well: if self.overlay_active == 1: for i, ipw_overlay in enumerate(self.overlay_ipws): ipw_overlay.SetSliceIndex(0) ''' self.render() def set_input(self, id, input): if self.ipw_triads.has_key(id): selected_ipw_triad = self.ipw_triads[id] if input == selected_ipw_triad[0].GetInput(): return if input is None: ipw_triad = self.ipw_triads[id] for ipw in ipw_triad: # argh, this disable causes a render ipw.SetEnabled(0) ipw.SetInput(None) remaining_active_slices = False if self.has_active_slices: for key in self.ipw_triads: if key != 0: ipw_triad = self.ipw_triads[key] for ipw in ipw_triad: if ipw.GetEnabled(): remaining_active_slices = True break if remaining_active_slices: break if not remaining_active_slices: self.has_active_slices = False self.outline_source.SetInput(None) self.renderer.RemoveViewProp(self.outline_actor) self.dv_orientation_widget.set_input(None) base_ipw_triad = self.ipw_triads[0] for i, ipw in enumerate(base_ipw_triad): ipw.SetInput(None) ipw.SetEnabled(0) else: orientations = [2, 0, 1] active = [1, 0, 0] if not self.has_active_slices: self.outline_source.SetInput(input) self.renderer.AddViewProp(self.outline_actor) self.dv_orientation_widget.set_input(input) base_ipw_triad = self.ipw_triads[0] for i, ipw in enumerate(base_ipw_triad): ipw.SetInput(input) ipw.SetPlaneOrientation(orientations[i]) # axial ipw.SetSliceIndex(0) ipw.SetEnabled(active[i]) self.has_active_slices = True base_ipw_triad = self.ipw_triads[0] for i, ipw in enumerate(selected_ipw_triad): ipw.SetInput(input) ipw.SetPlaneOrientation(orientations[i]) # axial ipw.SetSliceIndex(0) ipw.SetEnabled(active[i]) self.observer_sync_overlay(base_ipw_triad, selected_ipw_triad, i) #sync to the current position of the base (pickable) triad else: raise ValueError('The overlay with id = "%s" was not found!' % id)
{ "content_hash": "32828c31898ca0f2ba12259e8af1db84", "timestamp": "", "source": "github", "line_count": 280, "max_line_length": 144, "avg_line_length": 38.96071428571429, "alnum_prop": 0.5276377303144193, "repo_name": "nagyistoce/devide", "id": "f780c1402a4620c42ac9c26c5ea83a9d40346d80", "size": "11160", "binary": false, "copies": "7", "ref": "refs/heads/master", "path": "modules/viewers/OverlaySliceViewer.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "NSIS", "bytes": "2786" }, { "name": "Python", "bytes": "3104368" }, { "name": "Shell", "bytes": "7369" } ], "symlink_target": "" }
""" Mesh stuff. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from ext_utils.meshzoo import iso_sphere import pdb def create_sphere(n_subdivide=3): # 3 makes 642 verts, 1280 faces, # 4 makes 2562 verts, 5120 faces verts, faces = iso_sphere(n_subdivide) return verts, faces def make_symmetric(verts, faces, idx=0): """ Assumes that the input mesh {V,F} is perfectly symmetric Splits the mesh along the X-axis, and reorders the mesh s.t. (so this is reflection on Y-axis..?) [indept verts, right (x>0) verts, left verts] v[:num_indept + num_sym] = A v[:-num_sym] = -A[num_indept:] """ left = verts[:, idx] < 0 right = verts[:, idx] > 0 center = verts[:, idx] == 0 left_inds = np.where(left)[0] right_inds = np.where(right)[0] center_inds = np.where(center)[0] num_indept = len(center_inds) num_sym = len(left_inds) assert(len(left_inds) == len(right_inds)) # For each right verts, find the corresponding left verts. indicator = np.array([1, 1, 1]) indicator[idx]=-1 prop_left_inds = np.hstack([np.where(np.all(verts == indicator * verts[ri], 1))[0] for ri in right_inds]) assert(prop_left_inds.shape[0] == num_sym) # Make sure right/left order are symmetric. for ind, (ri, li) in enumerate(zip(right_inds, prop_left_inds)): if np.any(verts[ri] != indicator * verts[li]): print('bad! %d' % ind) import ipdb; ipdb.set_trace() new_order = np.hstack([center_inds, right_inds, prop_left_inds]) # verts i is now vert j ind_perm = np.hstack([np.where(new_order==i)[0] for i in range(verts.shape[0])]) new_verts = verts[new_order, :] new_faces0 = ind_perm[faces] new_faces, num_indept_faces, num_sym_faces = make_faces_symmetric(new_verts, new_faces0, num_indept, num_sym) return new_verts, new_faces, num_indept, num_sym, num_indept_faces, num_sym_faces,new_order def make_faces_symmetric(verts, faces, num_indept_verts, num_sym_verts): """ This reorders the faces, such that it has this order: F_indept - independent face ids F_right (x>0) F_left 1. For each face, identify whether it's independent or has a symmetric face. A face is independent, if v_i is an independent vertex and if the other two v_j, v_k are the symmetric pairs. Otherwise, there are two kinds of symmetric faces: - v_i is indept, v_j, v_k are not the symmetric paris) - all three have symmetric counter verts. Returns a new set of faces that is in the above order. Also, the symmetric face pairs are reordered so that the vertex order is the same. i.e. verts[f_id] and verts[f_id_sym] is in the same vertex order, except the x coord are flipped """ DRAW = False indept_faces = [] right_faces = [] left_faces = [] indept_verts = verts[:num_indept_verts] symmetric_verts = verts[num_indept_verts:] # These are symmetric pairs right_ids = np.arange(num_indept_verts, num_indept_verts+num_sym_verts) left_ids = np.arange(num_indept_verts+num_sym_verts, num_indept_verts+2*num_sym_verts) # Make this for easy lookup # Saves for each vert_id, the symmetric vert_ids v_dict = {} for r_id, l_id in zip(right_ids, left_ids): v_dict[r_id] = l_id v_dict[l_id] = r_id # Return itself for indepentnet. for ind in range(num_indept_verts): v_dict[ind] = ind # Saves faces that contain this verts verts2faces = [np.where((faces == v_id).any(axis=1))[0] for v_id in range(verts.shape[0])] done_face = np.zeros(faces.shape[0]) # Make faces symmetric: for f_id in range(faces.shape[0]): if done_face[f_id]: continue v_ids = sorted(faces[f_id]) # This is triangles x [x,y,z] vs = verts[v_ids] # Find the corresponding vs? v_sym_ids = sorted([v_dict[v_id] for v_id in v_ids]) # Check if it's independent if sorted(v_sym_ids) == sorted(v_ids): # Independent!! indept_faces.append(faces[f_id]) # indept_faces.append(f_id) done_face[f_id] = 1 else: # Find the face with these verts. (so we can mark it done) possible_faces = np.hstack([verts2faces[v_id] for v_id in v_sym_ids]) possible_fids, counts = np.unique(possible_faces, return_counts=True) # The face id is the one that appears 3 times in this list. sym_fid = possible_fids[counts == 3][0] assert(sorted(v_sym_ids) == sorted(faces[sym_fid])) # Make sure that the order of these vertices are the same. # Go in the order of face: f_id face_here = faces[f_id] sym_face_here = [v_dict[v_id] for v_id in face_here] # Above is the same tri as faces[sym_fid], but vertices are in the order of faces[f_id] # Which one is right x > 0? # Only use unique verts in these faces to compute. unique_vids = np.array(v_ids) != np.array(v_sym_ids) if np.all(verts[face_here][unique_vids, 0] < verts[sym_face_here][unique_vids, 0]): # f_id is left left_faces.append(face_here) right_faces.append(sym_face_here) else: left_faces.append(sym_face_here) right_faces.append(face_here) done_face[f_id] = 1 done_face[sym_fid] = 1 # Draw # tri_sym = Mesh(verts[v_sym_ids], [[0, 1, 2]], vc='red') # mv.set_dynamic_meshes([mesh, tri, tri_sym]) assert(len(left_faces) + len(right_faces) + len(indept_faces) == faces.shape[0]) # Now concatenate them,, new_faces = np.vstack([indept_faces, right_faces, left_faces]) # Now sort each row of new_faces to make sure that bary centric coord will be same. num_indept_faces = len(indept_faces) num_sym_faces = len(right_faces) return new_faces, num_indept_faces, num_sym_faces def compute_edges2verts(verts, faces): """ Returns a list: [A, B, C, D] the 4 vertices for each edge. """ edge_dict = {} for face_id, (face) in enumerate(faces): for e1, e2, o_id in [(0, 1, 2), (0, 2, 1), (1, 2, 0)]: edge = tuple(sorted((face[e1], face[e2]))) other_v = face[o_id] if edge not in edge_dict.keys(): edge_dict[edge] = [other_v] else: if other_v not in edge_dict[edge]: edge_dict[edge].append(other_v) result = np.stack([np.hstack((edge, other_vs)) for edge, other_vs in edge_dict.items()]) return result def compute_vert2kp(verts, mean_shape): # verts: N x 3 # mean_shape: 3 x K (K=15) # # computes vert2kp: K x N matrix by picking NN to each point in mean_shape. if mean_shape.shape[0] == 3: # Make it K x 3 mean_shape = mean_shape.T num_kp = mean_shape.shape[1] nn_inds = [np.argmin(np.linalg.norm(verts - pt, axis=1)) for pt in mean_shape] dists = np.stack([np.linalg.norm(verts - verts[nn_ind], axis=1) for nn_ind in nn_inds]) vert2kp = -.5*(dists)/.01 return vert2kp def get_spherical_coords(X): # X is N x 3 rad = np.linalg.norm(X, axis=1) # Inclination theta = np.arccos(X[:, 2] / rad) # Azimuth phi = np.arctan2(X[:, 1], X[:, 0]) # Normalize both to be between [-1, 1] vv = (theta / np.pi) * 2 - 1 uu = ((phi + np.pi) / (2*np.pi)) * 2 - 1 # Return N x 2 return np.stack([uu, vv],1) def compute_uvsampler(verts, faces, tex_size=2): """ For this mesh, pre-computes the UV coordinates for F x T x T points. Returns F x T x T x 2 """ alpha = np.arange(tex_size, dtype=np.float) / (tex_size-1) beta = np.arange(tex_size, dtype=np.float) / (tex_size-1) import itertools # Barycentric coordinate values coords = np.stack([p for p in itertools.product(*[alpha, beta])]) vs = verts[faces] # Compute alpha, beta (this is the same order as NMR) v2 = vs[:, 2] v0v2 = vs[:, 0] - vs[:, 2] v1v2 = vs[:, 1] - vs[:, 2] # F x 3 x T*2 samples = np.dstack([v0v2, v1v2]).dot(coords.T) + v2.reshape(-1, 3, 1) # F x T*2 x 3 points on the sphere samples = np.transpose(samples, (0, 2, 1)) # Now convert these to uv. uv = get_spherical_coords(samples.reshape(-1, 3)) # uv = uv.reshape(-1, len(coords), 2) uv = uv.reshape(-1, tex_size, tex_size, 2) return uv def append_obj(mf_handle, vertices, faces): for vx in range(vertices.shape[0]): mf_handle.write('v {:f} {:f} {:f}\n'.format(vertices[vx, 0], vertices[vx, 1], vertices[vx, 2])) for fx in range(faces.shape[0]): mf_handle.write('f {:d} {:d} {:d}\n'.format(faces[fx, 0], faces[fx, 1], faces[fx, 2])) return
{ "content_hash": "c1714c2f4f106934211a8445a103ff4e", "timestamp": "", "source": "github", "line_count": 243, "max_line_length": 113, "avg_line_length": 37.07818930041152, "alnum_prop": 0.5918978912319645, "repo_name": "google/lasr", "id": "9472b90da036114e6517a35686473f94c73cc0e7", "size": "10169", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "third_party/ext_utils/mesh.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "1015" }, { "name": "Python", "bytes": "171279" }, { "name": "Shell", "bytes": "12815" } ], "symlink_target": "" }
from ryu.base import app_manager from ryu.controller import ofp_event from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER from ryu.controller.handler import set_ev_cls from ryu.ofproto import ofproto_v1_3 from ryu.lib.packet import packet from ryu.lib.packet import ethernet class SimpleSwitch13(app_manager.RyuApp): OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] def __init__(self, *args, **kwargs): super(SimpleSwitch13, self).__init__(*args, **kwargs) self.mac_to_port = {} @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER) def switch_features_handler(self, ev): datapath = ev.msg.datapath ofproto = datapath.ofproto parser = datapath.ofproto_parser # install table-miss flow entry # # We specify NO BUFFER to max_len of the output action due to # OVS bug. At this moment, if we specify a lesser number, e.g., # 128, OVS will send Packet-In with invalid buffer_id and # truncated packet data. In that case, we cannot output packets # correctly. The bug has been fixed in OVS v2.1.0. match = parser.OFPMatch() actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, ofproto.OFPCML_NO_BUFFER)] self.add_flow(datapath, 0, match, actions) def add_flow(self, datapath, priority, match, actions, buffer_id=None): ofproto = datapath.ofproto parser = datapath.ofproto_parser inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)] if buffer_id: mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id, priority=priority, match=match, instructions=inst) else: mod = parser.OFPFlowMod(datapath=datapath, priority=priority, match=match, instructions=inst) datapath.send_msg(mod) @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER) def _packet_in_handler(self, ev): # If you hit this you might want to increase # the "miss_send_length" of your switch if ev.msg.msg_len < ev.msg.total_len: self.logger.debug("packet truncated: only %s of %s bytes", ev.msg.msg_len, ev.msg.total_len) msg = ev.msg datapath = msg.datapath ofproto = datapath.ofproto parser = datapath.ofproto_parser in_port = msg.match['in_port'] pkt = packet.Packet(msg.data) eth = pkt.get_protocols(ethernet.ethernet)[0] dst = eth.dst src = eth.src dpid = datapath.id self.mac_to_port.setdefault(dpid, {}) self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port) # learn a mac address to avoid FLOOD next time. self.mac_to_port[dpid][src] = in_port if dst in self.mac_to_port[dpid]: out_port = self.mac_to_port[dpid][dst] else: out_port = ofproto.OFPP_FLOOD actions = [parser.OFPActionOutput(out_port)] # install a flow to avoid packet_in next time if out_port != ofproto.OFPP_FLOOD: match = parser.OFPMatch(in_port=in_port, eth_dst=dst) # verify if we have a valid buffer_id, if yes avoid to send both # flow_mod & packet_out if msg.buffer_id != ofproto.OFP_NO_BUFFER: self.add_flow(datapath, 1, match, actions, msg.buffer_id) return else: self.add_flow(datapath, 1, match, actions) data = None if msg.buffer_id == ofproto.OFP_NO_BUFFER: data = msg.data out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id, in_port=in_port, actions=actions, data=data) datapath.send_msg(out) # test-repo
{ "content_hash": "0202344f84cce6a2cefaf4590f0123cb", "timestamp": "", "source": "github", "line_count": 101, "max_line_length": 78, "avg_line_length": 39.554455445544555, "alnum_prop": 0.5932415519399249, "repo_name": "luigiponti/laureatriennale", "id": "9f412ebd4249c31fb14db848103614cf0d361cb1", "size": "4608", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "simple_switch_13.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "200" }, { "name": "Python", "bytes": "180376" }, { "name": "Shell", "bytes": "867" } ], "symlink_target": "" }
''' Created on Feb 24, 2013 @author: nino ''' import unittest from marx.workflow.context import DefaultContext, Field from marx.workflow.exceptions import InvalidContextAssignment import nose.tools class TestField(unittest.TestCase): def test1(self): class Context(DefaultContext): user = Field(int) str_or_float = Field(str, float) assert hasattr(Context, 'USER') assert Context.USER == 'user' assert hasattr(Context, 'user') c = Context(None) c.user = 1 c.str_or_float = "s" c.str_or_float = 1. assert c.user == 1 with nose.tools.assert_raises(InvalidContextAssignment): #@UndefinedVariable c.user = "s" with nose.tools.assert_raises(InvalidContextAssignment): #@UndefinedVariable c.str_or_float = 1 # check that we haven't corrupted the class c2 = Context(None) assert c2.user is None def test_contribute_to_class(self): pass def test_multiple_inheritance(self): class A(DefaultContext): a = Field(int) class B(DefaultContext): b = Field(str) class C(A, B): c = Field(int) c = C() for f in "abc": assert hasattr(c, f) assert not hasattr(c, "d")
{ "content_hash": "84b56467333da2ef76b6b14db1ddef1e", "timestamp": "", "source": "github", "line_count": 53, "max_line_length": 84, "avg_line_length": 25.20754716981132, "alnum_prop": 0.5860778443113772, "repo_name": "escherba/marx", "id": "ad6e6a6240fda5fe5445207d1a561b112d5536c1", "size": "1336", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/workflow/test_context.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Makefile", "bytes": "296" }, { "name": "Python", "bytes": "36490" } ], "symlink_target": "" }
import pytest from osf_tests.factories import SubjectFactory class ProviderMixinBase(object): @property def provider_class(self): raise NotImplementedError @pytest.mark.django_db class ProviderExistsMixin(ProviderMixinBase): # Regression for https://openscience.atlassian.net/browse/OSF-7621 @pytest.fixture() def fake_url(self): raise NotImplementedError @pytest.fixture() def provider_url(self): raise NotImplementedError @pytest.fixture() def provider_url_two(self): raise NotImplementedError @pytest.fixture() def provider_list_url(self): raise NotImplementedError @pytest.fixture() def provider_list_url_fake(self): raise NotImplementedError @pytest.fixture() def provider(self): return self.provider_class() @pytest.fixture() def provider_two(self): return self.provider_class() def test_provider_exists(self, app, provider_url, fake_url, provider_list_url, provider_list_url_fake): detail_res = app.get(provider_url) assert detail_res.status_code == 200 licenses_res = app.get('{}licenses/'.format(provider_url)) assert licenses_res.status_code == 200 res = app.get(provider_list_url) assert res.status_code == 200 taxonomies_res = app.get('{}taxonomies/'.format(provider_url)) assert taxonomies_res.status_code == 200 # test_preprint_provider_does_not_exist_returns_404 detail_res = app.get(fake_url, expect_errors=True) assert detail_res.status_code == 404 licenses_res = app.get( '{}licenses/'.format(fake_url), expect_errors=True) assert licenses_res.status_code == 404 res = app.get( provider_list_url_fake, expect_errors=True) assert res.status_code == 404 taxonomies_res = app.get( '{}taxonomies/'.format(fake_url), expect_errors=True) assert taxonomies_res.status_code == 404 def test_has_highlighted_subjects_flag( self, app, provider, provider_two, provider_url, provider_url_two): SubjectFactory( provider=provider, text='A', highlighted=True) SubjectFactory(provider=provider_two, text='B') res = app.get(provider_url) assert res.status_code == 200 res_subjects = res.json['data']['relationships']['highlighted_taxonomies'] assert res_subjects['links']['related']['meta']['has_highlighted_subjects'] is True res = app.get(provider_url_two) assert res.status_code == 200 res_subjects = res.json['data']['relationships']['highlighted_taxonomies'] assert res_subjects['links']['related']['meta']['has_highlighted_subjects'] is False @pytest.mark.django_db class ProviderSubjectsMixin(ProviderMixinBase): ''' Subject Hierarchy +-----------------------------+ | | | +-------->B+----->F | | | | | A+----------->C | | | | | +-------->D+----->G | | | | H+------>I+----->J | | | | | +----->K | | | | L+------>M+----->N | | | | | +------->E | | | | O | +-----------------------------+ ''' @pytest.fixture(autouse=True) def subA(self): return SubjectFactory(text='A') @pytest.fixture(autouse=True) def subB(self, subA): return SubjectFactory(text='B', parent=subA) @pytest.fixture(autouse=True) def subC(self, subA): return SubjectFactory(text='C', parent=subA) @pytest.fixture(autouse=True) def subD(self, subA): return SubjectFactory(text='D', parent=subA) @pytest.fixture(autouse=True) def subF(self, subB): return SubjectFactory(text='F', parent=subB) @pytest.fixture(autouse=True) def subG(self, subD): return SubjectFactory(text='G', parent=subD) @pytest.fixture(autouse=True) def subH(self): return SubjectFactory(text='H') @pytest.fixture(autouse=True) def subI(self, subH): return SubjectFactory(text='I', parent=subH) @pytest.fixture(autouse=True) def subJ(self, subI): return SubjectFactory(text='J', parent=subI) @pytest.fixture(autouse=True) def subK(self, subI): return SubjectFactory(text='K', parent=subI) @pytest.fixture(autouse=True) def subL(self): return SubjectFactory(text='L') @pytest.fixture(autouse=True) def subM(self, subL): return SubjectFactory(text='M', parent=subL) @pytest.fixture(autouse=True) def subE(self, subM): return SubjectFactory(text='E', parent=subM) @pytest.fixture(autouse=True) def subN(self, subM): return SubjectFactory(text='N', parent=subM) @pytest.fixture(autouse=True) def subO(self): return SubjectFactory(text='O') @pytest.fixture() def rules(self, subA, subB, subD, subH, subI, subJ, subL): return [ ([subA._id, subB._id], False), ([subA._id, subD._id], True), ([subH._id, subI._id, subJ._id], True), ([subL._id], True) ] # This should allow: A, B, D, G, H, I, J, L, M, N and E # This should not allow: C, F, K, O @pytest.fixture() def lawless_provider(self): return self.provider_class() @pytest.fixture() def ruled_provider(self, rules): provider = self.provider_class() provider.subjects_acceptable = rules provider.save() return provider @pytest.fixture() def lawless_url(self): raise NotImplementedError @pytest.fixture() def ruled_url(self): raise NotImplementedError @pytest.fixture() def base_url(self): raise NotImplementedError def test_max_page_size(self, app, lawless_provider, base_url): res = app.get(base_url) assert res.status_code == 200 assert res.json['links']['meta']['per_page'] == 10 res = app.get(base_url + '?page[size]=150') assert res.status_code == 200 assert res.json['links']['meta']['per_page'] == 150 res = app.get(base_url + '?page[size]=2018') assert res.status_code == 200 assert res.json['links']['meta']['per_page'] == 1000 def test_no_rules_grabs_all(self, app, lawless_url): res = app.get(lawless_url) assert res.status_code == 200 assert res.json['links']['meta']['total'] == 15 def test_rules_only_grab_acceptable_subjects(self, app, ruled_url): res = app.get(ruled_url) assert res.status_code == 200 assert res.json['links']['meta']['total'] == 11 def test_no_rules_with_null_parent_filter(self, app, lawless_url): res = app.get(lawless_url + 'filter[parents]=null') assert res.status_code == 200 assert res.json['links']['meta']['total'] == 4 def test_rules_enforced_with_null_parent_filter(self, app, ruled_url): res = app.get(ruled_url + 'filter[parents]=null') assert res.status_code == 200 assert res.json['links']['meta']['total'] == 3 texts = [item['attributes']['text'] for item in res.json['data']] assert 'A' in texts assert 'H' in texts assert 'L' in texts assert 'O' not in texts def test_no_rules_with_parents_filter(self, app, lawless_url, subB, subI, subM): res = app.get( lawless_url + 'filter[parents]={}'.format( subB._id)) assert res.status_code == 200 assert res.json['links']['meta']['total'] == 1 assert res.json['data'][0]['attributes']['text'] == 'F' res = app.get( lawless_url + 'filter[parents]={}'.format( subI._id)) assert res.status_code == 200 assert res.json['links']['meta']['total'] == 2 res = app.get( lawless_url + 'filter[parents]={}'.format( subM._id)) assert res.status_code == 200 assert res.json['links']['meta']['total'] == 2 def test_rules_enforced_with_parents_filter(self, app, ruled_url, subB, subI, subM): res = app.get( ruled_url + 'filter[parents]={}'.format( subB._id)) assert res.status_code == 200 assert res.json['links']['meta']['total'] == 0 texts = [item['attributes']['text'] for item in res.json['data']] assert 'F' not in texts res = app.get( ruled_url + 'filter[parents]={}'.format( subI._id)) assert res.status_code == 200 assert res.json['links']['meta']['total'] == 1 texts = [item['attributes']['text'] for item in res.json['data']] assert 'J' in texts assert 'K' not in texts res = app.get( ruled_url + 'filter[parents]={}'.format( subM._id)) def test_no_rules_with_parent_filter(self, app, lawless_url, subB, subI, subM): res = app.get( lawless_url + 'filter[parent]={}'.format( subB._id)) assert res.status_code == 200 assert res.json['links']['meta']['total'] == 1 assert res.json['data'][0]['attributes']['text'] == 'F' res = app.get( lawless_url + 'filter[parent]={}'.format( subI._id)) assert res.status_code == 200 assert res.json['links']['meta']['total'] == 2 res = app.get( lawless_url + 'filter[parent]={}'.format( subM._id)) assert res.status_code == 200 assert res.json['links']['meta']['total'] == 2 def test_rules_enforced_with_parent_filter(self, app, ruled_url, subB, subI, subM): res = app.get( ruled_url + 'filter[parent]={}'.format( subB._id)) assert res.status_code == 200 assert res.json['links']['meta']['total'] == 0 texts = [item['attributes']['text'] for item in res.json['data']] assert 'F' not in texts res = app.get( ruled_url + 'filter[parent]={}'.format( subI._id)) assert res.status_code == 200 assert res.json['links']['meta']['total'] == 1 texts = [item['attributes']['text'] for item in res.json['data']] assert 'J' in texts assert 'K' not in texts res = app.get( ruled_url + 'filter[parent]={}'.format( subM._id)) assert res.status_code == 200 assert res.json['links']['meta']['total'] == 2 texts = [item['attributes']['text'] for item in res.json['data']] assert 'N' in texts assert 'E' in texts def test_no_rules_with_grandparent_filter(self, app, lawless_url, subA): res = app.get( lawless_url + 'filter[parents]={}'.format( subA._id)) assert res.status_code == 200 assert res.json['links']['meta']['total'] == 3 def test_rules_enforced_with_grandparent_filter(self, app, ruled_url, subA): res = app.get( ruled_url + 'filter[parents]={}'.format( subA._id)) assert res.status_code == 200 assert res.json['links']['meta']['total'] == 2 texts = [item['attributes']['text'] for item in res.json['data']] assert 'B' in texts assert 'D' in texts assert 'C' not in texts @pytest.mark.django_db class ProviderSpecificSubjectsMixin(ProviderMixinBase): @pytest.fixture(autouse=True) def provider_1(self): return self.provider_class() @pytest.fixture(autouse=True) def provider_2(self): return self.provider_class() @pytest.fixture(autouse=True) def root_subject_1(self, provider_1): return SubjectFactory(text='R1', provider=provider_1) @pytest.fixture(autouse=True) def parent_subject_1(self, provider_1, root_subject_1): return SubjectFactory(text='P1', provider=provider_1, parent=root_subject_1) @pytest.fixture(autouse=True) def child_subject_1(self, provider_1, parent_subject_1): return SubjectFactory(text='C1', provider=provider_1, parent=parent_subject_1) @pytest.fixture(autouse=True) def root_subject_2(self, provider_2): return SubjectFactory(text='R2', provider=provider_2) @pytest.fixture(autouse=True) def parent_subject_2(self, provider_2, root_subject_2): return SubjectFactory(text='P2', provider=provider_2, parent=root_subject_2) @pytest.fixture(autouse=True) def child_subject_2(self, provider_2, parent_subject_2): return SubjectFactory(text='C2', provider=provider_2, parent=parent_subject_2) @pytest.fixture() def url_1(self): raise NotImplementedError @pytest.fixture() def url_2(self): raise NotImplementedError def test_mapped_subjects_are_not_shared_list(self, app, url_1, url_2): res_1 = app.get(url_1) res_2 = app.get(url_2) assert res_1.status_code == 200 assert res_2.status_code == 200 assert res_1.json['links']['meta']['total'] == 3 assert res_2.json['links']['meta']['total'] == 3 assert len(set([d['attributes']['text'] for d in res_1.json['data']]) & set([d['attributes']['text'] for d in res_2.json['data']])) \ == 0 assert len(set([d['attributes']['text'] for d in res_1.json['data']]) | set([d['attributes']['text'] for d in res_2.json['data']])) \ == 6 def test_mapped_subjects_are_not_shared_filter(self, app, url_1, url_2, root_subject_1, root_subject_2): res_1 = app.get( url_1 + 'filter[parent]={}'.format( root_subject_1._id)) res_2 = app.get( url_2 + 'filter[parent]={}'.format( root_subject_2._id)) assert res_1.status_code == 200 assert res_2.status_code == 200 assert res_1.json['links']['meta']['total'] == 1 assert res_2.json['links']['meta']['total'] == 1 assert len(set([d['attributes']['text'] for d in res_1.json['data']]) & set([d['attributes']['text'] for d in res_2.json['data']])) \ == 0 assert len(set([d['attributes']['text'] for d in res_1.json['data']]) | set([d['attributes']['text'] for d in res_2.json['data']])) \ == 2 def test_mapped_subjects_filter_wrong_provider(self, app, url_1, url_2, root_subject_1, root_subject_2): res_1 = app.get( url_1 + 'filter[parent]={}'.format( root_subject_2)) res_2 = app.get( url_2 + 'filter[parent]={}'.format( root_subject_1)) assert res_1.status_code == 200 assert res_2.status_code == 200 assert res_1.json['links']['meta']['total'] == 0 assert res_2.json['links']['meta']['total'] == 0
{ "content_hash": "f93818d5b92c4d9c3701d9966fbff21a", "timestamp": "", "source": "github", "line_count": 482, "max_line_length": 108, "avg_line_length": 32.24688796680498, "alnum_prop": 0.5472559994852988, "repo_name": "icereval/osf.io", "id": "8f19f53f1d00a8b43b5ad9293f669c20414b3aae", "size": "15543", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "api_tests/providers/mixins.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "108526" }, { "name": "HTML", "bytes": "261937" }, { "name": "JavaScript", "bytes": "1856123" }, { "name": "Mako", "bytes": "691640" }, { "name": "Python", "bytes": "8331919" }, { "name": "VCL", "bytes": "13885" } ], "symlink_target": "" }
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import functools import re from typing import ( Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union, ) from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import retry as retries from google.api_core.client_options import ClientOptions from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore import pkg_resources try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object] # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 from google.protobuf import empty_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from google.cloud.apigee_registry_v1.types import provisioning_service from .client import ProvisioningClient from .transports.base import DEFAULT_CLIENT_INFO, ProvisioningTransport from .transports.grpc_asyncio import ProvisioningGrpcAsyncIOTransport class ProvisioningAsyncClient: """The service that is used for managing the data plane provisioning of the Registry. """ _client: ProvisioningClient DEFAULT_ENDPOINT = ProvisioningClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = ProvisioningClient.DEFAULT_MTLS_ENDPOINT instance_path = staticmethod(ProvisioningClient.instance_path) parse_instance_path = staticmethod(ProvisioningClient.parse_instance_path) common_billing_account_path = staticmethod( ProvisioningClient.common_billing_account_path ) parse_common_billing_account_path = staticmethod( ProvisioningClient.parse_common_billing_account_path ) common_folder_path = staticmethod(ProvisioningClient.common_folder_path) parse_common_folder_path = staticmethod(ProvisioningClient.parse_common_folder_path) common_organization_path = staticmethod(ProvisioningClient.common_organization_path) parse_common_organization_path = staticmethod( ProvisioningClient.parse_common_organization_path ) common_project_path = staticmethod(ProvisioningClient.common_project_path) parse_common_project_path = staticmethod( ProvisioningClient.parse_common_project_path ) common_location_path = staticmethod(ProvisioningClient.common_location_path) parse_common_location_path = staticmethod( ProvisioningClient.parse_common_location_path ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: ProvisioningAsyncClient: The constructed client. """ return ProvisioningClient.from_service_account_info.__func__(ProvisioningAsyncClient, info, *args, **kwargs) # type: ignore @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: ProvisioningAsyncClient: The constructed client. """ return ProvisioningClient.from_service_account_file.__func__(ProvisioningAsyncClient, filename, *args, **kwargs) # type: ignore from_service_account_json = from_service_account_file @classmethod def get_mtls_endpoint_and_cert_source( cls, client_options: Optional[ClientOptions] = None ): """Return the API endpoint and client cert source for mutual TLS. The client cert source is determined in the following order: (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the client cert source is None. (2) if `client_options.client_cert_source` is provided, use the provided one; if the default client cert source exists, use the default one; otherwise the client cert source is None. The API endpoint is determined in the following order: (1) if `client_options.api_endpoint` if provided, use the provided one. (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the default mTLS endpoint; if the environment variabel is "never", use the default API endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise use the default API endpoint. More details can be found at https://google.aip.dev/auth/4114. Args: client_options (google.api_core.client_options.ClientOptions): Custom options for the client. Only the `api_endpoint` and `client_cert_source` properties may be used in this method. Returns: Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the client cert source to use. Raises: google.auth.exceptions.MutualTLSChannelError: If any errors happen. """ return ProvisioningClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore @property def transport(self) -> ProvisioningTransport: """Returns the transport used by the client instance. Returns: ProvisioningTransport: The transport used by the client instance. """ return self._client.transport get_transport_class = functools.partial( type(ProvisioningClient).get_transport_class, type(ProvisioningClient) ) def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, ProvisioningTransport] = "grpc_asyncio", client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the provisioning client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.ProvisioningTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. """ self._client = ProvisioningClient( credentials=credentials, transport=transport, client_options=client_options, client_info=client_info, ) async def create_instance( self, request: Optional[ Union[provisioning_service.CreateInstanceRequest, dict] ] = None, *, parent: Optional[str] = None, instance: Optional[provisioning_service.Instance] = None, instance_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Provisions instance resources for the Registry. .. code-block:: python # This snippet has been automatically generated and should be regarded as a # code template only. # It will require modifications to work: # - It may require correct/in-range values for request initialization. # - It may require specifying regional endpoints when creating the service # client as shown in: # https://googleapis.dev/python/google-api-core/latest/client_options.html from google.cloud import apigee_registry_v1 async def sample_create_instance(): # Create a client client = apigee_registry_v1.ProvisioningAsyncClient() # Initialize request argument(s) instance = apigee_registry_v1.Instance() instance.config.cmek_key_name = "cmek_key_name_value" request = apigee_registry_v1.CreateInstanceRequest( parent="parent_value", instance_id="instance_id_value", instance=instance, ) # Make the request operation = client.create_instance(request=request) print("Waiting for operation to complete...") response = await operation.result() # Handle the response print(response) Args: request (Optional[Union[google.cloud.apigee_registry_v1.types.CreateInstanceRequest, dict]]): The request object. Request message for CreateInstance. parent (:class:`str`): Required. Parent resource of the Instance, of the form: ``projects/*/locations/*`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. instance (:class:`google.cloud.apigee_registry_v1.types.Instance`): Required. The Instance. This corresponds to the ``instance`` field on the ``request`` instance; if ``request`` is provided, this should not be set. instance_id (:class:`str`): Required. Identifier to assign to the Instance. Must be unique within scope of the parent resource. This corresponds to the ``instance_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be :class:`google.cloud.apigee_registry_v1.types.Instance` An Instance represents the instance resources of the Registry. Currently, only one instance is allowed for each project. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, instance, instance_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = provisioning_service.CreateInstanceRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if instance is not None: request.instance = instance if instance_id is not None: request.instance_id = instance_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_instance, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Wrap the response in an operation future. response = operation_async.from_gapic( response, self._client._transport.operations_client, provisioning_service.Instance, metadata_type=provisioning_service.OperationMetadata, ) # Done; return the response. return response async def delete_instance( self, request: Optional[ Union[provisioning_service.DeleteInstanceRequest, dict] ] = None, *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Deletes the Registry instance. .. code-block:: python # This snippet has been automatically generated and should be regarded as a # code template only. # It will require modifications to work: # - It may require correct/in-range values for request initialization. # - It may require specifying regional endpoints when creating the service # client as shown in: # https://googleapis.dev/python/google-api-core/latest/client_options.html from google.cloud import apigee_registry_v1 async def sample_delete_instance(): # Create a client client = apigee_registry_v1.ProvisioningAsyncClient() # Initialize request argument(s) request = apigee_registry_v1.DeleteInstanceRequest( name="name_value", ) # Make the request operation = client.delete_instance(request=request) print("Waiting for operation to complete...") response = await operation.result() # Handle the response print(response) Args: request (Optional[Union[google.cloud.apigee_registry_v1.types.DeleteInstanceRequest, dict]]): The request object. Request message for DeleteInstance. name (:class:`str`): Required. The name of the Instance to delete. Format: ``projects/*/locations/*/instances/*``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = provisioning_service.DeleteInstanceRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_instance, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Wrap the response in an operation future. response = operation_async.from_gapic( response, self._client._transport.operations_client, empty_pb2.Empty, metadata_type=provisioning_service.OperationMetadata, ) # Done; return the response. return response async def get_instance( self, request: Optional[Union[provisioning_service.GetInstanceRequest, dict]] = None, *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> provisioning_service.Instance: r"""Gets details of a single Instance. .. code-block:: python # This snippet has been automatically generated and should be regarded as a # code template only. # It will require modifications to work: # - It may require correct/in-range values for request initialization. # - It may require specifying regional endpoints when creating the service # client as shown in: # https://googleapis.dev/python/google-api-core/latest/client_options.html from google.cloud import apigee_registry_v1 async def sample_get_instance(): # Create a client client = apigee_registry_v1.ProvisioningAsyncClient() # Initialize request argument(s) request = apigee_registry_v1.GetInstanceRequest( name="name_value", ) # Make the request response = await client.get_instance(request=request) # Handle the response print(response) Args: request (Optional[Union[google.cloud.apigee_registry_v1.types.GetInstanceRequest, dict]]): The request object. Request message for GetInstance. name (:class:`str`): Required. The name of the Instance to retrieve. Format: ``projects/*/locations/*/instances/*``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.apigee_registry_v1.types.Instance: An Instance represents the instance resources of the Registry. Currently, only one instance is allowed for each project. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = provisioning_service.GetInstanceRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_instance, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response async def list_operations( self, request: Optional[operations_pb2.ListOperationsRequest] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operations_pb2.ListOperationsResponse: r"""Lists operations that match the specified filter in the request. Args: request (:class:`~.operations_pb2.ListOperationsRequest`): The request object. Request message for `ListOperations` method. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operations_pb2.ListOperationsResponse: Response message for ``ListOperations`` method. """ # Create or coerce a protobuf request object. # The request isn't a proto-plus wrapped type, # so it must be constructed via keyword expansion. if isinstance(request, dict): request = operations_pb2.ListOperationsRequest(**request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method.wrap_method( self._client._transport.list_operations, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response async def get_operation( self, request: Optional[operations_pb2.GetOperationRequest] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operations_pb2.Operation: r"""Gets the latest state of a long-running operation. Args: request (:class:`~.operations_pb2.GetOperationRequest`): The request object. Request message for `GetOperation` method. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.operations_pb2.Operation: An ``Operation`` object. """ # Create or coerce a protobuf request object. # The request isn't a proto-plus wrapped type, # so it must be constructed via keyword expansion. if isinstance(request, dict): request = operations_pb2.GetOperationRequest(**request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method.wrap_method( self._client._transport.get_operation, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response async def delete_operation( self, request: Optional[operations_pb2.DeleteOperationRequest] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Args: request (:class:`~.operations_pb2.DeleteOperationRequest`): The request object. Request message for `DeleteOperation` method. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: None """ # Create or coerce a protobuf request object. # The request isn't a proto-plus wrapped type, # so it must be constructed via keyword expansion. if isinstance(request, dict): request = operations_pb2.DeleteOperationRequest(**request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method.wrap_method( self._client._transport.delete_operation, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) async def cancel_operation( self, request: Optional[operations_pb2.CancelOperationRequest] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Args: request (:class:`~.operations_pb2.CancelOperationRequest`): The request object. Request message for `CancelOperation` method. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: None """ # Create or coerce a protobuf request object. # The request isn't a proto-plus wrapped type, # so it must be constructed via keyword expansion. if isinstance(request, dict): request = operations_pb2.CancelOperationRequest(**request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method.wrap_method( self._client._transport.cancel_operation, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) async def set_iam_policy( self, request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: r"""Sets the IAM access control policy on the specified function. Replaces any existing policy. Args: request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): The request object. Request message for `SetIamPolicy` method. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.policy_pb2.Policy: Defines an Identity and Access Management (IAM) policy. It is used to specify access control policies for Cloud Platform resources. A ``Policy`` is a collection of ``bindings``. A ``binding`` binds one or more ``members`` to a single ``role``. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A ``role`` is a named list of permissions (defined by IAM or configured by users). A ``binding`` can optionally specify a ``condition``, which is a logic expression that further constrains the role binding based on attributes about the request and/or target resource. **JSON Example** :: { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:[email protected]", "group:[email protected]", "domain:google.com", "serviceAccount:[email protected]" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": ["user:[email protected]"], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ] } **YAML Example** :: bindings: - members: - user:[email protected] - group:[email protected] - domain:google.com - serviceAccount:[email protected] role: roles/resourcemanager.organizationAdmin - members: - user:[email protected] role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') For a description of IAM and its features, see the `IAM developer's guide <https://cloud.google.com/iam/docs>`__. """ # Create or coerce a protobuf request object. # The request isn't a proto-plus wrapped type, # so it must be constructed via keyword expansion. if isinstance(request, dict): request = iam_policy_pb2.SetIamPolicyRequest(**request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method.wrap_method( self._client._transport.set_iam_policy, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response async def get_iam_policy( self, request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: r"""Gets the IAM access control policy for a function. Returns an empty policy if the function exists and does not have a policy set. Args: request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): The request object. Request message for `GetIamPolicy` method. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.policy_pb2.Policy: Defines an Identity and Access Management (IAM) policy. It is used to specify access control policies for Cloud Platform resources. A ``Policy`` is a collection of ``bindings``. A ``binding`` binds one or more ``members`` to a single ``role``. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A ``role`` is a named list of permissions (defined by IAM or configured by users). A ``binding`` can optionally specify a ``condition``, which is a logic expression that further constrains the role binding based on attributes about the request and/or target resource. **JSON Example** :: { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:[email protected]", "group:[email protected]", "domain:google.com", "serviceAccount:[email protected]" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": ["user:[email protected]"], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ] } **YAML Example** :: bindings: - members: - user:[email protected] - group:[email protected] - domain:google.com - serviceAccount:[email protected] role: roles/resourcemanager.organizationAdmin - members: - user:[email protected] role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') For a description of IAM and its features, see the `IAM developer's guide <https://cloud.google.com/iam/docs>`__. """ # Create or coerce a protobuf request object. # The request isn't a proto-plus wrapped type, # so it must be constructed via keyword expansion. if isinstance(request, dict): request = iam_policy_pb2.GetIamPolicyRequest(**request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method.wrap_method( self._client._transport.get_iam_policy, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response async def test_iam_permissions( self, request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Tests the specified IAM permissions against the IAM access control policy for a function. If the function does not exist, this will return an empty set of permissions, not a NOT_FOUND error. Args: request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): The request object. Request message for `TestIamPermissions` method. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.iam_policy_pb2.TestIamPermissionsResponse: Response message for ``TestIamPermissions`` method. """ # Create or coerce a protobuf request object. # The request isn't a proto-plus wrapped type, # so it must be constructed via keyword expansion. if isinstance(request, dict): request = iam_policy_pb2.TestIamPermissionsRequest(**request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method.wrap_method( self._client._transport.test_iam_permissions, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response async def get_location( self, request: Optional[locations_pb2.GetLocationRequest] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> locations_pb2.Location: r"""Gets information about a location. Args: request (:class:`~.location_pb2.GetLocationRequest`): The request object. Request message for `GetLocation` method. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.location_pb2.Location: Location object. """ # Create or coerce a protobuf request object. # The request isn't a proto-plus wrapped type, # so it must be constructed via keyword expansion. if isinstance(request, dict): request = locations_pb2.GetLocationRequest(**request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method.wrap_method( self._client._transport.get_location, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response async def list_locations( self, request: Optional[locations_pb2.ListLocationsRequest] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> locations_pb2.ListLocationsResponse: r"""Lists information about the supported locations for this service. Args: request (:class:`~.location_pb2.ListLocationsRequest`): The request object. Request message for `ListLocations` method. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.location_pb2.ListLocationsResponse: Response message for ``ListLocations`` method. """ # Create or coerce a protobuf request object. # The request isn't a proto-plus wrapped type, # so it must be constructed via keyword expansion. if isinstance(request, dict): request = locations_pb2.ListLocationsRequest(**request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method.wrap_method( self._client._transport.list_locations, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response async def __aenter__(self): return self async def __aexit__(self, exc_type, exc, tb): await self.transport.close() try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution( "google-cloud-apigee-registry", ).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() __all__ = ("ProvisioningAsyncClient",)
{ "content_hash": "e393329a310c364565472138788c4da3", "timestamp": "", "source": "github", "line_count": 1234, "max_line_length": 176, "avg_line_length": 39.78606158833063, "alnum_prop": 0.5868502525664006, "repo_name": "googleapis/python-apigee-registry", "id": "3cde044c65e927f31ad7be42c10a13a3bdb91c26", "size": "49096", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "google/cloud/apigee_registry_v1/services/provisioning/async_client.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2050" }, { "name": "Python", "bytes": "2223877" }, { "name": "Shell", "bytes": "30687" } ], "symlink_target": "" }
import re from bs4 import BeautifulSoup from hashlib import md5 from database import Database from config import MONGO_SETTINGS # Spider Factory def spider(): def callback(webPage): url, pageSource = webPage.getDatas() soup = BeautifulSoup(pageSource) #tmp var _ = '' #more robust param = {} #calculate id to avoid repeat data param['id'] = md5(url).hexdigest(), #get url param['url'] = url, #get name try: _ = soup.find(id="content").h1.string except: _ = 'unknown' finally: param['name'] = _ #get size try: _ = soup.find( id='specifications' ).find_all("p")[2].get_text().strip().split('\n')[1].replace(u'\xa0', u' '), except: _ = 'unknown' finally: param['size'] = _ #get description try: _ = re.compile(r'[\n\r\t]').sub( " ",soup.find(id='description').get_text()), except: _ = 'not description right now~XD' finally: param['description'] = _ #get magnet_link try: _ = soup.find(id="download").find_all("a")[2]['href'] except: #drop it or redo? return else: param['magnet_link'] = _ query = {"id": param['id']} database = Database(db=MONGO_SETTINGS.database) database.saveData(collection='seed', query=query, document=param) #args to init spider entryFilter = dict() entryFilter['Type'] = 'allow' entryFilter['List'] = [r'/tor/\d+', r'/today', r'/yesterday', r'/sub/\d+'] yieldFilter = dict() # yieldFilter['Type'] = 'allow' # yieldFilter['List'] = [r'$'] callbackFilter = dict() callbackFilter['List'] = [r'/tor/\d+', ] callbackFilter['func'] = callback args = dict( url=['http://www.mininova.org/today', 'http://www.mininova.org/yesterday'], depth=3, threadNum=4, keyword='', entryFilter=entryFilter, yieldFilter=yieldFilter, callbackFilter=callbackFilter, db='bt_tornado', collection='link2search', ) return args
{ "content_hash": "290a92cf9a7554c2da8e3c974cece5c6", "timestamp": "", "source": "github", "line_count": 81, "max_line_length": 92, "avg_line_length": 28.271604938271604, "alnum_prop": 0.5139737991266375, "repo_name": "zhkzyth/a-super-fast-crawler", "id": "a205ecc783609a20fcf2b9beffbb3ac7b5f6900b", "size": "2330", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "spiders/mininova.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "48239" } ], "symlink_target": "" }
from __future__ import absolute_import from __future__ import unicode_literals from setuptools import find_packages from setuptools import setup import codecs with codecs.open('LICENSE.txt', 'r', 'utf8') as lf: license_str = lf.read() with codecs.open('README.rst', 'r', 'utf8') as rf: long_description_str = rf.read() setup( name='canopener', version='0.1.6', author='David Selassie', author_email='[email protected]', packages=find_packages(exclude=['tests']), url='https://github.com/selassid/canopener', license=license_str, description=( 'Python convenience function for opening compressed URLs and files.' ), keywords='open file s3 url bzip bz2 gzip gz', include_package_data=True, long_description=long_description_str, setup_requires=['setuptools'], install_requires=[ 'boto', 'pystaticconfiguration', ], )
{ "content_hash": "f773060cfe02d8e39a8f712fe7b4b72d", "timestamp": "", "source": "github", "line_count": 35, "max_line_length": 76, "avg_line_length": 26.114285714285714, "alnum_prop": 0.6695842450765864, "repo_name": "selassid/canopener", "id": "6aba69452602c04f999876366f02ddf58f2c1587", "size": "938", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Emacs Lisp", "bytes": "122" }, { "name": "Makefile", "bytes": "255" }, { "name": "Python", "bytes": "7527" } ], "symlink_target": "" }
import datetime from oslo_utils import fixture as utils_fixture from nova.api.openstack.compute import instance_usage_audit_log as v21_ial from nova import context from nova import test from nova.tests.unit.api.openstack import fakes from nova.tests.unit.objects import test_service service_base = test_service.fake_service TEST_COMPUTE_SERVICES = [dict(service_base, host='foo', topic='compute'), dict(service_base, host='bar', topic='compute'), dict(service_base, host='baz', topic='compute'), dict(service_base, host='plonk', topic='compute'), dict(service_base, host='wibble', topic='bogus'), ] begin1 = datetime.datetime(2012, 7, 4, 6, 0, 0) begin2 = end1 = datetime.datetime(2012, 7, 5, 6, 0, 0) begin3 = end2 = datetime.datetime(2012, 7, 6, 6, 0, 0) end3 = datetime.datetime(2012, 7, 7, 6, 0, 0) # test data TEST_LOGS1 = [ # all services done, no errors. dict(host="plonk", period_beginning=begin1, period_ending=end1, state="DONE", errors=0, task_items=23, message="test1"), dict(host="baz", period_beginning=begin1, period_ending=end1, state="DONE", errors=0, task_items=17, message="test2"), dict(host="bar", period_beginning=begin1, period_ending=end1, state="DONE", errors=0, task_items=10, message="test3"), dict(host="foo", period_beginning=begin1, period_ending=end1, state="DONE", errors=0, task_items=7, message="test4"), ] TEST_LOGS2 = [ # some still running... dict(host="plonk", period_beginning=begin2, period_ending=end2, state="DONE", errors=0, task_items=23, message="test5"), dict(host="baz", period_beginning=begin2, period_ending=end2, state="DONE", errors=0, task_items=17, message="test6"), dict(host="bar", period_beginning=begin2, period_ending=end2, state="RUNNING", errors=0, task_items=10, message="test7"), dict(host="foo", period_beginning=begin2, period_ending=end2, state="DONE", errors=0, task_items=7, message="test8"), ] TEST_LOGS3 = [ # some errors.. dict(host="plonk", period_beginning=begin3, period_ending=end3, state="DONE", errors=0, task_items=23, message="test9"), dict(host="baz", period_beginning=begin3, period_ending=end3, state="DONE", errors=2, task_items=17, message="test10"), dict(host="bar", period_beginning=begin3, period_ending=end3, state="DONE", errors=0, task_items=10, message="test11"), dict(host="foo", period_beginning=begin3, period_ending=end3, state="DONE", errors=1, task_items=7, message="test12"), ] def fake_task_log_get_all(context, task_name, begin, end, host=None, state=None): assert task_name == "instance_usage_audit" if begin == begin1 and end == end1: return TEST_LOGS1 if begin == begin2 and end == end2: return TEST_LOGS2 if begin == begin3 and end == end3: return TEST_LOGS3 raise AssertionError("Invalid date %s to %s" % (begin, end)) def fake_last_completed_audit_period(unit=None, before=None): audit_periods = [(begin3, end3), (begin2, end2), (begin1, end1)] if before is not None: for begin, end in audit_periods: if before > end: return begin, end raise AssertionError("Invalid before date %s" % (before)) return begin1, end1 class InstanceUsageAuditLogTestV21(test.NoDBTestCase): def setUp(self): super(InstanceUsageAuditLogTestV21, self).setUp() self.context = context.get_admin_context() self.useFixture( utils_fixture.TimeFixture(datetime.datetime(2012, 7, 5, 10, 0, 0))) self._set_up_controller() self.host_api = self.controller.host_api def fake_service_get_all(context, disabled): self.assertIsNone(disabled) return TEST_COMPUTE_SERVICES self.stub_out( 'nova.utils.last_completed_audit_period', fake_last_completed_audit_period) self.stub_out('nova.db.main.api.service_get_all', fake_service_get_all) self.stub_out( 'nova.db.main.api.task_log_get_all', fake_task_log_get_all) self.req = fakes.HTTPRequest.blank('') def _set_up_controller(self): self.controller = v21_ial.InstanceUsageAuditLogController() def test_index(self): result = self.controller.index(self.req) self.assertIn('instance_usage_audit_logs', result) logs = result['instance_usage_audit_logs'] self.assertEqual(57, logs['total_instances']) self.assertEqual(0, logs['total_errors']) self.assertEqual(4, len(logs['log'])) self.assertEqual(4, logs['num_hosts']) self.assertEqual(4, logs['num_hosts_done']) self.assertEqual(0, logs['num_hosts_running']) self.assertEqual(0, logs['num_hosts_not_run']) self.assertEqual("ALL hosts done. 0 errors.", logs['overall_status']) def test_show(self): result = self.controller.show(self.req, '2012-07-05 10:00:00') self.assertIn('instance_usage_audit_log', result) logs = result['instance_usage_audit_log'] self.assertEqual(57, logs['total_instances']) self.assertEqual(0, logs['total_errors']) self.assertEqual(4, len(logs['log'])) self.assertEqual(4, logs['num_hosts']) self.assertEqual(4, logs['num_hosts_done']) self.assertEqual(0, logs['num_hosts_running']) self.assertEqual(0, logs['num_hosts_not_run']) self.assertEqual("ALL hosts done. 0 errors.", logs['overall_status']) def test_show_with_running(self): result = self.controller.show(self.req, '2012-07-06 10:00:00') self.assertIn('instance_usage_audit_log', result) logs = result['instance_usage_audit_log'] self.assertEqual(57, logs['total_instances']) self.assertEqual(0, logs['total_errors']) self.assertEqual(4, len(logs['log'])) self.assertEqual(4, logs['num_hosts']) self.assertEqual(3, logs['num_hosts_done']) self.assertEqual(1, logs['num_hosts_running']) self.assertEqual(0, logs['num_hosts_not_run']) self.assertEqual("3 of 4 hosts done. 0 errors.", logs['overall_status']) def test_show_with_errors(self): result = self.controller.show(self.req, '2012-07-07 10:00:00') self.assertIn('instance_usage_audit_log', result) logs = result['instance_usage_audit_log'] self.assertEqual(57, logs['total_instances']) self.assertEqual(3, logs['total_errors']) self.assertEqual(4, len(logs['log'])) self.assertEqual(4, logs['num_hosts']) self.assertEqual(4, logs['num_hosts_done']) self.assertEqual(0, logs['num_hosts_running']) self.assertEqual(0, logs['num_hosts_not_run']) self.assertEqual("ALL hosts done. 3 errors.", logs['overall_status'])
{ "content_hash": "055b8b2092459799c7ee918ed4655d74", "timestamp": "", "source": "github", "line_count": 171, "max_line_length": 79, "avg_line_length": 41.50877192982456, "alnum_prop": 0.6238377007607777, "repo_name": "openstack/nova", "id": "9ff09bd40c45bc9223bfe8fe6304012948369819", "size": "7738", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "nova/tests/unit/api/openstack/compute/test_instance_usage_audit_log.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C++", "bytes": "3545" }, { "name": "Mako", "bytes": "1952" }, { "name": "Python", "bytes": "23261880" }, { "name": "Shell", "bytes": "28113" }, { "name": "Smarty", "bytes": "507244" } ], "symlink_target": "" }
""" homeassistant.keyboard ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Provides functionality to emulate keyboard presses on host machine. """ import logging from homeassistant.const import ( SERVICE_VOLUME_UP, SERVICE_VOLUME_DOWN, SERVICE_VOLUME_MUTE, SERVICE_MEDIA_NEXT_TRACK, SERVICE_MEDIA_PREV_TRACK, SERVICE_MEDIA_PLAY_PAUSE) DOMAIN = "keyboard" DEPENDENCIES = [] def volume_up(hass): """ Press the keyboard button for volume up. """ hass.services.call(DOMAIN, SERVICE_VOLUME_UP) def volume_down(hass): """ Press the keyboard button for volume down. """ hass.services.call(DOMAIN, SERVICE_VOLUME_DOWN) def volume_mute(hass): """ Press the keyboard button for muting volume. """ hass.services.call(DOMAIN, SERVICE_VOLUME_MUTE) def media_play_pause(hass): """ Press the keyboard button for play/pause. """ hass.services.call(DOMAIN, SERVICE_MEDIA_PLAY_PAUSE) def media_next_track(hass): """ Press the keyboard button for next track. """ hass.services.call(DOMAIN, SERVICE_MEDIA_NEXT_TRACK) def media_prev_track(hass): """ Press the keyboard button for prev track. """ hass.services.call(DOMAIN, SERVICE_MEDIA_PREV_TRACK) def setup(hass, config): """ Listen for keyboard events. """ try: import pykeyboard except ImportError: logging.getLogger(__name__).exception( "Error while importing dependency PyUserInput.") return False keyboard = pykeyboard.PyKeyboard() keyboard.special_key_assignment() hass.services.register(DOMAIN, SERVICE_VOLUME_UP, lambda service: keyboard.tap_key(keyboard.volume_up_key)) hass.services.register(DOMAIN, SERVICE_VOLUME_DOWN, lambda service: keyboard.tap_key(keyboard.volume_down_key)) hass.services.register(DOMAIN, SERVICE_VOLUME_MUTE, lambda service: keyboard.tap_key(keyboard.volume_mute_key)) hass.services.register(DOMAIN, SERVICE_MEDIA_PLAY_PAUSE, lambda service: keyboard.tap_key(keyboard.media_play_pause_key)) hass.services.register(DOMAIN, SERVICE_MEDIA_NEXT_TRACK, lambda service: keyboard.tap_key(keyboard.media_next_track_key)) hass.services.register(DOMAIN, SERVICE_MEDIA_PREV_TRACK, lambda service: keyboard.tap_key(keyboard.media_prev_track_key)) return True
{ "content_hash": "717694e128f989a61f6d089e57ed1191", "timestamp": "", "source": "github", "line_count": 86, "max_line_length": 75, "avg_line_length": 30.151162790697676, "alnum_prop": 0.6209024296182029, "repo_name": "Jaidan/jaidan-hab-home-assistant", "id": "8e820856c343cb16845aa731c649ec9fdb9e0d9d", "size": "2593", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "homeassistant/components/keyboard.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "708583" }, { "name": "Python", "bytes": "413732" }, { "name": "Shell", "bytes": "3984" } ], "symlink_target": "" }
""" Net graph. """ import networkx as nx __all__ = [ 'pn_graph', ] def _make_input_arc_label(arc_name, arc_config): arc_class = arc_config["class"] if arc_class == "Variable": return arc_config["name"] elif arc_class == "VariableTest": return "[{}]".format(arc_config["name"]) elif arc_class == "PackVariable": return "*{}".format(arc_config["name"]) elif arc_class == "PackVariableTest": return "[*{}]".format(arc_config["name"]) elif arc_class == "Inhibitor": return "~" else: return str(arc_config) def _make_output_arc_label(arc_name, arc_config): arc_class = arc_config["class"] if arc_class == "Value": return arc_config["expression"] elif arc_class == "UnpackValue": return "*" + arc_config["expression"] else: return str(arc_config) def _make_place_label(place_name, place_config): label = place_name if place_config["tokens"]: label += "\n" + repr(place_config["tokens"]) return label def _make_transition_label(transition_name, transition_config): label = transition_name if transition_config["condition"]: label += "\n" + transition_config["condition"] return label def pn_graph(pnd): gx = nx.DiGraph() for place_name, place_config in pnd["places"].items(): gx.add_node(place_name, bipartite=0, label=_make_place_label(place_name, place_config), shape='ellipse') for transition_name, transition_config in pnd["transitions"].items(): gx.add_node(transition_name, bipartite=1, label=_make_transition_label(transition_name, transition_config), shape='box') for arc_name, arc_config in pnd["input_arcs"].items(): gx.add_edge(arc_config["place"], arc_config["transition"], label=_make_input_arc_label(arc_name, arc_config)) for arc_name, arc_config in pnd["output_arcs"].items(): gx.add_edge(arc_config["transition"], arc_config["place"], label=_make_output_arc_label(arc_name, arc_config)) return gx
{ "content_hash": "1fc569f05c68a9258448437c57d62602", "timestamp": "", "source": "github", "line_count": 69, "max_line_length": 85, "avg_line_length": 31.246376811594203, "alnum_prop": 0.5960111317254174, "repo_name": "simone-campagna/pnets", "id": "0edda9e6b4398baa6cb5dfd0f0ab60161a85731c", "size": "2764", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/pnets/pn_graph.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "105438" } ], "symlink_target": "" }
import urlparse from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker def parse_pgurl(db_url): """ Given a SQLAlchemy-compatible Postgres url, return a dict with keys for user, password, host, port, and database. """ parsed = urlparse.urlsplit(db_url) return { 'user': parsed.username, 'password': parsed.password, 'database': parsed.path.lstrip('/'), 'host': parsed.hostname, 'port': parsed.port, } def make_session_cls(db_url, echo=False): return sessionmaker(bind=create_engine(db_url, echo=echo))
{ "content_hash": "73f2450f061a27b71ef551670fb46ee7", "timestamp": "", "source": "github", "line_count": 22, "max_line_length": 66, "avg_line_length": 27.272727272727273, "alnum_prop": 0.66, "repo_name": "yougov/mettle", "id": "c60c34cc13420259b6bdb71a4a56fcf77406fa54", "size": "600", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mettle/db.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "12606" }, { "name": "HTML", "bytes": "13738" }, { "name": "JavaScript", "bytes": "59448" }, { "name": "Makefile", "bytes": "2613" }, { "name": "PLpgSQL", "bytes": "16589" }, { "name": "Python", "bytes": "149403" }, { "name": "Shell", "bytes": "8321" } ], "symlink_target": "" }
''' Test script for security-check.py ''' import subprocess import unittest def write_testcode(filename): with open(filename, 'w', encoding="utf8") as f: f.write(''' #include <stdio.h> int main() { printf("the quick brown fox jumps over the lazy god\\n"); return 0; } ''') def call_security_check(cc, source, executable, options): subprocess.check_call([cc,source,'-o',executable] + options) p = subprocess.Popen(['./security-check.py',executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True) (stdout, stderr) = p.communicate() return (p.returncode, stdout.rstrip()) class TestSecurityChecks(unittest.TestCase): def test_ELF(self): source = 'test1.c' executable = 'test1' cc = 'gcc' write_testcode(source) self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE']), (1, executable+': failed PIE NX RELRO Canary')) self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE']), (1, executable+': failed PIE RELRO Canary')) self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE']), (1, executable+': failed PIE RELRO')) self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE']), (1, executable+': failed RELRO')) self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE']), (0, '')) def test_PE(self): source = 'test1.c' executable = 'test1.exe' cc = 'x86_64-w64-mingw32-gcc' write_testcode(source) self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--no-nxcompat','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va']), (1, executable+': failed DYNAMIC_BASE HIGH_ENTROPY_VA NX')) self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va']), (1, executable+': failed DYNAMIC_BASE HIGH_ENTROPY_VA')) self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--dynamicbase','-Wl,--no-high-entropy-va']), (1, executable+': failed HIGH_ENTROPY_VA')) self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--dynamicbase','-Wl,--high-entropy-va']), (0, '')) def test_MACHO(self): source = 'test1.c' executable = 'test1' cc = 'clang' write_testcode(source) self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace', '-Wl,-allow_stack_execute']), (1, executable+': failed PIE NOUNDEFS NX')) self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace']), (1, executable+': failed PIE NOUNDEFS')) self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie']), (1, executable+': failed PIE')) self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-pie']), (0, '')) if __name__ == '__main__': unittest.main()
{ "content_hash": "4a5ce30069a0515a6c8e972a2969dfac", "timestamp": "", "source": "github", "line_count": 74, "max_line_length": 158, "avg_line_length": 48.41891891891892, "alnum_prop": 0.6089868825006978, "repo_name": "ahmedbodi/vertcoin", "id": "e2a8154f16da62c7d7364b55fefc53731d940945", "size": "3797", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "contrib/devtools/test-security-check.py", "mode": "33261", "license": "mit", "language": [ { "name": "Assembly", "bytes": "28456" }, { "name": "C", "bytes": "1163389" }, { "name": "C++", "bytes": "4857520" }, { "name": "CSS", "bytes": "1127" }, { "name": "HTML", "bytes": "50622" }, { "name": "Java", "bytes": "30290" }, { "name": "M4", "bytes": "185589" }, { "name": "Makefile", "bytes": "108571" }, { "name": "Objective-C", "bytes": "3892" }, { "name": "Objective-C++", "bytes": "7232" }, { "name": "Protocol Buffer", "bytes": "2328" }, { "name": "Python", "bytes": "1076417" }, { "name": "QMake", "bytes": "756" }, { "name": "Shell", "bytes": "48193" } ], "symlink_target": "" }
import os, requests, json, string, datetime, csv import randr, nlc # ------------------------------------------------ # GLOBAL VARIABLES (Set from ENV Variables)------- # Dialog and classifier # -- defaults for testing DIALOG_ID = 'xxxx' DIALOG_USERNAME = 'xxxx' DIALOG_PASSWORD = 'xxxx' # -- overwrites by env variables if 'DIALOG_ID' in os.environ: DIALOG_ID = os.environ['DIALOG_ID'] if 'VCAP_SERVICES' in os.environ: dialog = json.loads(os.environ['VCAP_SERVICES'])['dialog'][0] DIALOG_USERNAME = dialog["credentials"]["username"] DIALOG_PASSWORD = dialog["credentials"]["password"] #Dialog Functions def BMIX_get_first_dialog_response_json(): global DIALOG_ID, DIALOG_USERNAME, DIALOG_PASSWORD #print 'in first_dialog' POST_SUCCESS = 201 response_json = None url = 'https://watson-api-explorer.mybluemix.net/dialog/api/v1/dialogs/' + DIALOG_ID + '/conversation' r = requests.post(url, auth=(DIALOG_USERNAME, DIALOG_PASSWORD)) if r.status_code == POST_SUCCESS: response_json = r.json() response_json['response'] = format_dialog_response_as_string(response_json['response']) #print response_json return response_json def BMIX_get_next_dialog_response(client_id, conversation_id, input): global DIALOG_ID, DIALOG_USERNAME, DIALOG_PASSWORD print 'in second dialog' POST_SUCCESS = 201 response = '' url = 'https://watson-api-explorer.mybluemix.net/dialog/api/v1/dialogs/' + DIALOG_ID + '/conversation' payload = {'client_id': client_id, 'conversation_id': conversation_id, 'input': input} r = requests.post(url, auth=(DIALOG_USERNAME, DIALOG_PASSWORD), params=payload) print("DIALOG NEXT RESPONSE") print(r.status_code) print(r) if r.status_code == POST_SUCCESS: response = format_dialog_response_as_string(r.json()['response']) return response def format_dialog_response_as_string(response_strings): response = '' if response_strings: for response_string in response_strings: if str(response_string) != '': if len(response) > 0: response = response + '<BR>' + response_string else: response = response_string return response
{ "content_hash": "78aadd24dc6576b19e7998d4cfefa90b", "timestamp": "", "source": "github", "line_count": 57, "max_line_length": 106, "avg_line_length": 39.68421052631579, "alnum_prop": 0.6489832007073386, "repo_name": "cackerso/virual-agent-lab", "id": "f5f9dee801587cf683923285ac211eea24dab8e2", "size": "2262", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "dialog.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "144478" }, { "name": "HTML", "bytes": "5615" }, { "name": "Python", "bytes": "12558" } ], "symlink_target": "" }
from functools import wraps import inspect import sys from .dogpile import cache_get from .dogpile import cache_set PY3 = sys.version_info[0] == 3 def default_key_fun_impl(fun, *args, **kwargs): name = fun.__name__ mod = fun.__module__ call_args = inspect.getcallargs(fun, *args, **kwargs) return "%s-%s-%s" % (name, mod, '-'.join( ["%s-%s" % (k, call_args[k]) for k in sorted(iterkeys(call_args))])) def cacheable(cache, key=None, ttl=60, is_enabled=True): """ Decorator for cacheable function """ def decorator(fxn): if callable(key): key_fun = key else: key_fun = default_key_fun_impl if key is None else \ lambda fxn, *args, **kwargs: key @wraps(fxn) def wrapper(*args, **kwargs): if is_enabled: key = key_fun(fxn, *args, **kwargs) data = cache_get(cache, key) if data is None: data = fxn(*args, **kwargs) cache_set(cache, key, data, ttl) return data else: return fxn(*args, **kwargs) return wrapper return decorator def iterkeys(d, **kw): if PY3: return iter(d.keys(**kw)) else: return d.iterkeys(**kw)
{ "content_hash": "d93362592774e35dcd7be616c6231a93", "timestamp": "", "source": "github", "line_count": 50, "max_line_length": 76, "avg_line_length": 26.2, "alnum_prop": 0.5290076335877862, "repo_name": "ryankanno/django-utilities", "id": "53ea8f91c30fb8d5fc6d5f87f3e29f7051d2f29c", "size": "1310", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "django_utilities/cache/decorators.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "517" }, { "name": "Python", "bytes": "7959" } ], "symlink_target": "" }
from panda3d.core import NodePath, TextNode from direct.interval.IntervalGlobal import LerpFunctionInterval from direct.gui.DirectGui import DirectLabel, DirectFrame, DGG from direct.showbase.PythonUtil import bound as clamp from toontown.toonbase import ToontownGlobals import CogdoUtil import CogdoFlyingGameGlobals as Globals class CogdoFlyingProgressGui(DirectFrame): def __init__(self, parent, level, pos2d = Globals.Gui.ProgressPos2D): DirectFrame.__init__(self, relief=None, state=DGG.NORMAL, sortOrder=DGG.BACKGROUND_SORT_INDEX) self._parent = parent self._level = level self.reparentTo(self._parent) self.setPos(pos2d[0], 0.0, pos2d[1]) self._levelStartY = self._level.startPlatform.getModel().getY() self._levelEndY = self._level.endPlatform.getModel().getY() self._levelDistance = abs(self._levelEndY - self._levelStartY) self._toonMarkers = {} self._initModel() return def destroy(self): self._laffMeterModel.removeNode() del self._laffMeterModel DirectFrame.destroy(self) def _initModel(self): self._laffMeterModel = loader.loadModel('phase_3/models/gui/laff_o_meter') self._model = CogdoUtil.loadFlyingModel('progressMeter', group='gui') self._model.reparentTo(self) self._model.setBin('fixed', 0) self._lineStart = self._model.find('**/start_loc').getZ() self._lineEnd = self._model.find('**/end_loc').getZ() self._lineDistance = abs(self._lineEnd - self._lineStart) def addToon(self, toon): marker = NodePath('toon_marker-%i' % toon.doId) marker.reparentTo(self) self._getToonMarker(toon).copyTo(marker) marker.setColor(toon.style.getHeadColor()) if toon.isLocal(): marker.setScale(Globals.Gui.LocalMarkerScale) marker.setBin('fixed', 10) else: marker.setScale(Globals.Gui.MarkerScale) marker.setBin('fixed', 5) marker.flattenStrong() self._toonMarkers[toon] = marker def removeToon(self, toon): marker = self._toonMarkers.get(toon, None) if marker is not None: marker.removeNode() del self._toonMarkers[toon] return def _getToonMarker(self, toon): type = self._laffMeterModel.find('**/' + toon.style.getType() + 'head') if type.isEmpty(): type = self._laffMeterModel.find('**/bunnyhead') return type def update(self): for toon, marker in self._toonMarkers.items(): progress = clamp((toon.getY() - self._levelStartY) / self._levelDistance, self._levelStartY, self._levelEndY) marker.setZ(clamp(self._lineStart + self._lineDistance * progress, self._lineStart, self._lineEnd)) class CogdoFlyingFuelGui(DirectFrame): def __init__(self, parent): DirectFrame.__init__(self, relief=None, state=DGG.NORMAL, sortOrder=DGG.BACKGROUND_SORT_INDEX) self.reparentTo(parent) self.active = 0 self._initModel() self._initIntervals() return def _initModel(self): self.setPos(Globals.Gui.FuelPos2D[0], 0.0, Globals.Gui.FuelPos2D[1]) self.gui = CogdoUtil.loadFlyingModel('propellerMeter', group='gui') self.gui.reparentTo(self) self.gui.setBin('fixed', 0) self.healthBar = self.gui.find('**/healthBar') self.healthBar.setBin('fixed', 1) self.healthBar.setColor(*Globals.Gui.FuelNormalColor) bottomBarLocator = self.gui.find('**/bottomOfBar_loc') bottomBarPos = bottomBarLocator.getPos(render) topBarLocator = self.gui.find('**/topOfBar_loc') topBarPos = topBarLocator.getPos(render) zDist = topBarPos.getZ() - bottomBarPos.getZ() self.fuelLowIndicator = self.gui.find('**/fuelLowIndicator') self.fuelLowIndicator.setBin('fixed', 2) pos = self.fuelLowIndicator.getPos(render) newPos = pos newPos.setZ(bottomBarPos.getZ() + zDist * Globals.Gameplay.FuelLowAmt) self.fuelLowIndicator.setPos(render, newPos) self.fuelVeryLowIndicator = self.gui.find('**/fuelVeryLowIndicator') self.fuelVeryLowIndicator.setBin('fixed', 2) pos = self.fuelVeryLowIndicator.getPos(render) newPos = pos newPos.setZ(bottomBarPos.getZ() + zDist * Globals.Gameplay.FuelVeryLowAmt) self.fuelVeryLowIndicator.setPos(render, newPos) self.propellerMain = self.gui.find('**/propellers') self.propellerMain.setBin('fixed', 3) self.propellerHead = self.gui.find('**/propellerHead') self.propellerHead.setBin('fixed', 4) self.blades = [] self.activeBlades = [] index = 1 blade = self.propellerMain.find('**/propeller%d' % index) while not blade.isEmpty(): self.blades.append(blade) index += 1 blade = self.propellerMain.find('**/propeller%d' % index) for blade in self.blades: self.activeBlades.append(blade) self.bladeNumberLabel = DirectLabel(parent=self.propellerHead, relief=None, pos=(Globals.Gui.FuelNumBladesPos2D[0], 0, Globals.Gui.FuelNumBladesPos2D[1]), scale=Globals.Gui.FuelNumBladesScale, text=str(len(self.activeBlades)), text_align=TextNode.ACenter, text_fg=(0.0, 0.0, -0.002, 1), text_shadow=(0.75, 0.75, 0.75, 1), text_font=ToontownGlobals.getInterfaceFont()) self.bladeNumberLabel.setBin('fixed', 5) return def _initIntervals(self): self._healthIval = LerpFunctionInterval(self.healthBar.setSz, fromData=0.0, toData=1.0, duration=2.0) self.baseSpinDuration = 2.0 self._spinIval = LerpFunctionInterval(self.propellerMain.setR, fromData=0.0, toData=-360.0, duration=self.baseSpinDuration) def show(self): DirectFrame.show(self) self._spinIval.loop() def hide(self): DirectFrame.hide(self) self._spinIval.pause() def resetBlades(self): self.setBlades(len(self.blades)) def setBlades(self, fuelState): if fuelState not in Globals.Gameplay.FuelStates: return numBlades = fuelState - 1 if len(self.activeBlades) != numBlades: for i in xrange(len(self.activeBlades)): blade = self.activeBlades.pop() blade.stash() if numBlades > len(self.blades): numBlades = len(self.blades) for i in xrange(numBlades): blade = self.blades[i] self.activeBlades.append(blade) blade.unstash() self.bladeNumberLabel['text'] = str(len(self.activeBlades)) self.bladeNumberLabel.setText() self.updateHealthBarColor() def bladeLost(self): if len(self.activeBlades) > 0: blade = self.activeBlades.pop() blade.stash() self.bladeNumberLabel['text'] = str(len(self.activeBlades)) self.bladeNumberLabel.setText() self.updateHealthBarColor() def updateHealthBarColor(self): color = Globals.Gui.NumBlades2FuelColor[len(self.activeBlades)] self.healthBar.setColor(*color) def setPropellerSpinRate(self, newRate): self._spinIval.setPlayRate(newRate) def setRefuelLerpFromData(self): startScale = self.healthBar.getSz() self._healthIval.fromData = startScale def setFuel(self, fuel): self.fuel = fuel def update(self): self.healthBar.setSz(self.fuel) def destroy(self): self.bladeNumberLabel.removeNode() self.bladeNumberLabel = None self._healthIval.clearToInitial() del self._healthIval self.healthBar = None self.fuelLowIndicator = None self.fuelVeryLowIndicator = None self.propellerMain = None self.propellerHead = None del self.blades[:] del self.activeBlades[:] self.gui.detachNode() self.gui = None DirectFrame.destroy(self) return
{ "content_hash": "67f07f733c5b7679381b7aa001e349ca", "timestamp": "", "source": "github", "line_count": 202, "max_line_length": 375, "avg_line_length": 41.00990099009901, "alnum_prop": 0.6243360695316272, "repo_name": "DedMemez/ODS-August-2017", "id": "75a0694be35da704601464c9b136baa62895a9df", "size": "8385", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cogdominium/CogdoFlyingGameGuis.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "10152014" }, { "name": "Shell", "bytes": "707" } ], "symlink_target": "" }
from __future__ import unicode_literals from kivy.uix.screenmanager import Screen from kivy.adapters.dictadapter import DictAdapter from kivy.uix.listview import ListView from kivy.uix.textinput import TextInput from ..data_provider import load_stations class SelectStationWindow(Screen): def __init__(self, **kwargs): super(SelectStationWindow, self).__init__(**kwargs) self.stations = load_stations() self.selected_station = None self.bind(on_pre_enter=self.prepare) def prepare(self, args): self.clear_widgets() filter_station = TextInput(pos_hint={"top":1}, hint_text='Почніть вводити назву станції', size_hint_y=0.07, font_size='20sp') filter_station.bind(text=self.on_filter_changed) dict_adapter = self.prepare_stations_dict_adapter(self.stations, '') self.list_view = ListView(pos_hint={"top":0.93}, adapter=dict_adapter) self.add_widget(filter_station) self.add_widget(self.list_view) def station_converter(self, row_index, station): converted = {'text': station.name, 'size_hint_y': None, 'height': '60sp', 'station': station, 'window': self, 'manager': self.manager} return converted def on_filter_changed(self, filter_station, filter_text): dict_adapter = self.prepare_stations_dict_adapter(self.stations, filter_text.decode('utf-8')) self.remove_widget(self.list_view) self.list_view = ListView(pos_hint={"top":0.93}, adapter=dict_adapter) self.add_widget(self.list_view) def prepare_stations_dict_adapter(self, stations, filter_text): data = {} for number in range(len(stations)): station = stations[number] if station.matches(filter_text) is True: data[number] = station dict_adapter = DictAdapter(data=data, args_converter=self.station_converter, template=b'StationTemplate') return dict_adapter
{ "content_hash": "5eccd60652c697c471fcfae811ea53f9", "timestamp": "", "source": "github", "line_count": 57, "max_line_length": 133, "avg_line_length": 37.35087719298246, "alnum_prop": 0.6171911695631752, "repo_name": "vitaliibaz/uz-train-schedule", "id": "1737472fb3daf863788703f05679b7cb412512d5", "size": "2170", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/ui/select_station_window.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "29680" } ], "symlink_target": "" }
""" Given an array of integers, find out whether there are two distinct indices i and j in the array such that the difference between nums[i] and nums[j] is at most t and the difference between i and j is at most k. """ class Solution: # @param {integer[]} nums # @param {integer} k # @param {integer} t # @return {boolean} def containsNearbyAlmostDuplicate(self, nums, k, t): import collections if k < 1 or t < 0: return False numDict = collections.OrderedDict() for x in range(len(nums)): key = nums[x] / max(1, t) for m in (key, key - 1, key + 1): if m in numDict and abs(nums[x] - numDict[m]) <= t: return True numDict[key] = nums[x] if x >= k: numDict.popitem(last=False) return False
{ "content_hash": "b098a65fa5c3f826451daf782dbc06a7", "timestamp": "", "source": "github", "line_count": 27, "max_line_length": 77, "avg_line_length": 32.03703703703704, "alnum_prop": 0.5572254335260116, "repo_name": "fantuanmianshi/Daily", "id": "0c82031847fd9c3b2c49a43f13081b33c0b75dd0", "size": "865", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "LeetCode/contains_duplicate_iii.py", "mode": "33188", "license": "mit", "language": [ { "name": "Go", "bytes": "908" }, { "name": "Python", "bytes": "137913" } ], "symlink_target": "" }
from swgpy.object import * def create(kernel): result = Static() result.template = "object/static/item/shared_item_container_plain_s04.iff" result.attribute_template_id = -1 result.stfName("obj_n","unknown_object") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
{ "content_hash": "750232cc65866c3fa2e7155bd4540e28", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 75, "avg_line_length": 23.384615384615383, "alnum_prop": 0.6907894736842105, "repo_name": "anhstudios/swganh", "id": "853788b11bfa2c91095cd2850bf29c7225ba5e7b", "size": "449", "binary": false, "copies": "2", "ref": "refs/heads/develop", "path": "data/scripts/templates/object/static/item/shared_item_container_plain_s04.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "11887" }, { "name": "C", "bytes": "7699" }, { "name": "C++", "bytes": "2357839" }, { "name": "CMake", "bytes": "41264" }, { "name": "PLSQL", "bytes": "42065" }, { "name": "Python", "bytes": "7503510" }, { "name": "SQLPL", "bytes": "42770" } ], "symlink_target": "" }
import argparse from collections import OrderedDict from datetime import datetime import json import os import subprocess import sys from bs4 import BeautifulSoup import requests import yaml CONFIG_PATH = "{}/.config/track/config.yaml".format(os.environ['HOME']) USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64; rv:47.0) Gecko/20100101' \ + 'Firefox/47.0' def main(): parser = argparse.ArgumentParser() exclusive_args = parser.add_mutually_exclusive_group() exclusive_args.add_argument('-l', action='store_true', help="list packages in config, looks for \ $HOME/.config/track/config.yaml") exclusive_args.add_argument('-p', metavar="PACKAGE_NAME", action="store", help='package to track') exclusive_args.add_argument('-e', action='store_true', help='edit config file with $EDITOR or vi') args = parser.parse_args() if args.l: list_packages() if args.p: track_package(args.p) if args.e: edit_config() if len(sys.argv) == 1: parser.print_help() sys.exit(1) # Remove whitespace in USPS strings def remove_whitespace(s): return s.replace('\r', '').replace('\t', '').replace('\n', '') ############################## # USPS Tracking ############################## # Fix USPS datetime formatting def fix_usps_datetime(date_time): s = date_time.split(",") if len(s) == 2: # In case no time is given return "{}, {}".format(s[0], s[1].lstrip()) else: return "{}, {}, {}".format(s[0], s[1].lstrip(), s[2]) # Make request and return OrderedDict of status updates def get_usps_data(tracking_number): url = "https://tools.usps.com/go/TrackConfirmAction?qtc_tLabels1="\ + tracking_number headers = {'user-agent': USER_AGENT} r = requests.get(url, headers=headers) html = r.text # There is a missing </p> tag in the first "status" # <td>, causing BeautifulSoup to ignore the parent # <td>, removing all <p> and </p> tags fixes this issue html = html.replace('<p>', '').replace('</p>', '') soup = BeautifulSoup(html, 'html.parser') table_rows = soup.find_all("tr", class_="detail-wrapper") if not table_rows: return None data = {} for tr in table_rows: date_time_td = tr.find("td", class_="date-time") date_time = remove_whitespace(date_time_td.string) date_time = fix_usps_datetime(date_time) try: timestamp = datetime.strptime(date_time, "%B %d, %Y, %I:%M %p").timestamp() except: timestamp = datetime.strptime(date_time, "%B %d, %Y").timestamp() status_td = tr.find("td", class_="status") status_p = status_td.find("p") # Some status strings are in an additional span child if "clearfix" in status_p["class"]: status_span = status_p.find("span") status = remove_whitespace(status_span.string.lstrip()) else: # There is an <input> along with text in # this element, get just the text. status = remove_whitespace(status_p.contents[0]) location_td = tr.find("td", class_="location") location = remove_whitespace(location_td.string) data[timestamp] = {} data[timestamp]["date_time"] = date_time data[timestamp]["status"] = status data[timestamp]["location"] = location return OrderedDict(sorted(data.items())) ############################## # UPS Tracking ############################## def get_ups_data(tracking_number): url = "https://wwwapps.ups.com/WebTracking/track?track=yes&trackNums="\ + tracking_number + "&loc=en_us" headers = {'user-agent': USER_AGENT} r = requests.get(url, headers=headers) html = r.text soup = BeautifulSoup(html, 'html5lib') # # This parses the tracking information for the page # of a package that has already been delivered. # Tracking information is collapsed and expanding # sends an XHR to retrieve it. # # Progress is loaded through an XHR, get form parameters here form = soup.select("#detailFormid")[0] # Not valid if no form if not form: return None hidden = form.select("input") params = {} for e in hidden: params[e['name']] = e['value'] post_url = "https://wwwapps.ups.com/WebTracking/detail" r = requests.post(post_url, data=params) soup = BeautifulSoup(r.text, 'html5lib') table = soup.select(".dataTable")[0] table_rows = table.select("tr + tr") data = {} # Lots of extra space characters leading, trailing and # in between words for tr in table_rows: location = tr.select("td:nth-of-type(1)")[0] location = remove_whitespace(location.string) location = " ".join(location.split()) date = tr.select("td:nth-of-type(2)")[0] date = remove_whitespace(date.string).strip() time = tr.select("td:nth-of-type(3)")[0] # AM/PM need no periods for strptime time = remove_whitespace(time.string).strip().replace('.', '') status = tr.select("td:nth-of-type(4)")[0] status = remove_whitespace(status.string) status = " ".join(status.split()) # Get timestamp for sorting date_time = "{} {}".format(date, time) timestamp = datetime.strptime(date_time, "%m/%d/%Y %I:%M %p").timestamp() data[timestamp] = {} data[timestamp]["date_time"] = date_time data[timestamp]["status"] = status data[timestamp]["location"] = location return OrderedDict(sorted(data.items())) ############################## # Fedex Tracking ############################## def get_fedex_data(tracking_number): url = "https://www.fedex.com/trackingCal/track" data_params = {"TrackPackagesRequest": {"appType": "WTRK", "uniqueKey": "", "processingParameters": {}, "trackingInfoList": [ {"trackNumberInfo": { "trackingNumber": tracking_number, "trackingQualifier": "", "trackingCarrier": "" }} ]}} request_data = {"data": json.dumps(data_params), "action": "trackpackages", "locale": "en_US", "version": "1", "format": "json"} r = requests.post(url, data=request_data) fedex_data = json.loads(r.text) events = fedex_data['TrackPackagesResponse']['packageList'][0][ 'scanEventList'] data = {} for entry in events: location = entry['scanLocation'] date = entry['date'] time = entry['time'] status = entry['status'] # Get timestamp for sorting date_time = "{} {}".format(date, time) timestamp = datetime.strptime(date_time, "%Y-%m-%d %H:%M:%S").timestamp() data[timestamp] = {} data[timestamp]["date_time"] = date_time data[timestamp]["status"] = status data[timestamp]["location"] = location return OrderedDict(sorted(data.items())) def print_history(history): for a, b in history.items(): print(b['date_time']) print('\t' + b['status']) print('\t' + b['location']) def list_packages(): config = yaml.load(open(CONFIG_PATH, 'r'), Loader=yaml.BaseLoader) for carrier, shipments in config.items(): print(carrier) for number, name in shipments.items(): print("\t{}: {}".format(name, number)) def track_package(package_name): config = yaml.load(open(CONFIG_PATH, 'r'), Loader=yaml.BaseLoader) for c, shipments in config.items(): for number, name in shipments.items(): if package_name == name: tracking_number = number carrier = c break try: if carrier == 'USPS': history = get_usps_data(tracking_number) if carrier == 'UPS': history = get_ups_data(tracking_number) if carrier == 'FedEx': history = get_fedex_data(tracking_number) print_history(history) except: print("Tracking information not found") def edit_config(): try: subprocess.run([os.environ['EDITOR'], CONFIG_PATH]) except: subprocess.run(["vi", CONFIG_PATH]) if __name__ == "__main__": main()
{ "content_hash": "b8ed69643a7b64f3ec96b5462d939e9a", "timestamp": "", "source": "github", "line_count": 289, "max_line_length": 76, "avg_line_length": 30.519031141868513, "alnum_prop": 0.5471655328798186, "repo_name": "justinledford/track", "id": "cf5f135c0c991960f6afee0a666113b4fb121d27", "size": "8840", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "track.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "8840" } ], "symlink_target": "" }
"""Verifies that instance_links are being retrieved properly from LINKS. Verifies that app_data.json.j2 contains the instance link information""" from unittest import mock from foremast.app import SpinnakerApp @mock.patch('foremast.app.spinnaker_app.LINKS', new={'example1': 'https://example1.com'}) def test_default_instance_links(): """Validate default instance_links are being populated properly.""" pipeline_config = { "instance_links": { "example2": "https://example2.com", } } combined = {'example1': 'https://example1.com'} combined.update(pipeline_config['instance_links']) spinnaker_app = SpinnakerApp("aws", pipeline_config=pipeline_config) instance_links = spinnaker_app.retrieve_instance_links() assert instance_links == combined, "Instance Links are not being retrieved properly" @mock.patch('foremast.app.spinnaker_app.LINKS', new={'example': 'example1', 'example': 'example2'}) def test_duplicate_instance_links(): """Validate behavior when two keys are identical.""" pipeline_config = { "instance_links": {} } duplicate = {'example': 'example2'} spinnaker_app = SpinnakerApp("aws", pipeline_config=pipeline_config) instance_links = spinnaker_app.retrieve_instance_links() assert instance_links == duplicate, "Instance links handing duplicates are wrong."
{ "content_hash": "a20a1cb1fbbb69f671df93a9193c7dcc", "timestamp": "", "source": "github", "line_count": 37, "max_line_length": 103, "avg_line_length": 37.21621621621622, "alnum_prop": 0.7015250544662309, "repo_name": "gogoair/foremast", "id": "61334ccbd7ea65d457b9387ece9b9b626c9994b5", "size": "1377", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/app/test_create_app.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Makefile", "bytes": "7614" }, { "name": "Python", "bytes": "484364" }, { "name": "Shell", "bytes": "180" } ], "symlink_target": "" }
""" Perform Levenberg-Marquardt least-squares minimization, based on MINPACK-1. AUTHORS The original version of this software, called LMFIT, was written in FORTRAN as part of the MINPACK-1 package by XXX. Craig Markwardt converted the FORTRAN code to IDL. The information for the IDL version is: Craig B. Markwardt, NASA/GSFC Code 662, Greenbelt, MD 20770 [email protected] UPDATED VERSIONs can be found on my WEB PAGE: http://cow.physics.wisc.edu/~craigm/idl/idl.html Mark Rivers created this Python version from Craig's IDL version. Mark Rivers, University of Chicago Building 434A, Argonne National Laboratory 9700 South Cass Avenue, Argonne, IL 60439 [email protected] Updated versions can be found at http://cars.uchicago.edu/software Sergey Koposov converted the Mark's Python version from Numeric to numpy Sergey Koposov, University of Cambridge, Institute of Astronomy, Madingley road, CB3 0HA, Cambridge, UK [email protected] Updated versions can be found at http://code.google.com/p/astrolibpy/source/browse/trunk/ DESCRIPTION MPFIT uses the Levenberg-Marquardt technique to solve the least-squares problem. In its typical use, MPFIT will be used to fit a user-supplied function (the "model") to user-supplied data points (the "data") by adjusting a set of parameters. MPFIT is based upon MINPACK-1 (LMDIF.F) by More' and collaborators. For example, a researcher may think that a set of observed data points is best modelled with a Gaussian curve. A Gaussian curve is parameterized by its mean, standard deviation and normalization. MPFIT will, within certain constraints, find the set of parameters which best fits the data. The fit is "best" in the least-squares sense; that is, the sum of the weighted squared differences between the model and data is minimized. The Levenberg-Marquardt technique is a particular strategy for iteratively searching for the best fit. This particular implementation is drawn from MINPACK-1 (see NETLIB), and is much faster and more accurate than the version provided in the Scientific Python package in Scientific.Functions.LeastSquares. This version allows upper and lower bounding constraints to be placed on each parameter, or the parameter can be held fixed. The user-supplied Python function should return an array of weighted deviations between model and data. In a typical scientific problem the residuals should be weighted so that each deviate has a gaussian sigma of 1.0. If X represents values of the independent variable, Y represents a measurement for each value of X, and ERR represents the error in the measurements, then the deviates could be calculated as follows: DEVIATES = (Y - F(X)) / ERR where F is the analytical function representing the model. You are recommended to use the convenience functions MPFITFUN and MPFITEXPR, which are driver functions that calculate the deviates for you. If ERR are the 1-sigma uncertainties in Y, then TOTAL( DEVIATES^2 ) will be the total chi-squared value. MPFIT will minimize the chi-square value. The values of X, Y and ERR are passed through MPFIT to the user-supplied function via the FUNCTKW keyword. Simple constraints can be placed on parameter values by using the PARINFO keyword to MPFIT. See below for a description of this keyword. MPFIT does not perform more general optimization tasks. See TNMIN instead. MPFIT is customized, based on MINPACK-1, to the least-squares minimization problem. USER FUNCTION The user must define a function which returns the appropriate values as specified above. The function should return the weighted deviations between the model and the data. It should also return a status flag and an optional partial derivative array. For applications which use finite-difference derivatives -- the default -- the user function should be declared in the following way: def myfunct(p, fjac=None, x=None, y=None, err=None) # Parameter values are passed in "p" # If fjac==None then partial derivatives should not be # computed. It will always be None if MPFIT is called with default # flag. model = F(x, p) # Non-negative status value means MPFIT should continue, negative means # stop the calculation. status = 0 return([status, (y-model)/err] See below for applications with analytical derivatives. The keyword parameters X, Y, and ERR in the example above are suggestive but not required. Any parameters can be passed to MYFUNCT by using the functkw keyword to MPFIT. Use MPFITFUN and MPFITEXPR if you need ideas on how to do that. The function *must* accept a parameter list, P. In general there are no restrictions on the number of dimensions in X, Y or ERR. However the deviates *must* be returned in a one-dimensional Numeric array of type Float. User functions may also indicate a fatal error condition using the status return described above. If status is set to a number between -15 and -1 then MPFIT will stop the calculation and return to the caller. ANALYTIC DERIVATIVES In the search for the best-fit solution, MPFIT by default calculates derivatives numerically via a finite difference approximation. The user-supplied function need not calculate the derivatives explicitly. However, if you desire to compute them analytically, then the AUTODERIVATIVE=0 keyword must be passed to MPFIT. As a practical matter, it is often sufficient and even faster to allow MPFIT to calculate the derivatives numerically, and so AUTODERIVATIVE=0 is not necessary. If AUTODERIVATIVE=0 is used then the user function must check the parameter FJAC, and if FJAC!=None then return the partial derivative array in the return list. def myfunct(p, fjac=None, x=None, y=None, err=None) # Parameter values are passed in "p" # If FJAC!=None then partial derivatives must be comptuer. # FJAC contains an array of len(p), where each entry # is 1 if that parameter is free and 0 if it is fixed. model = F(x, p) Non-negative status value means MPFIT should continue, negative means # stop the calculation. status = 0 if (dojac): pderiv = zeros([len(x), len(p)], Float) for j in range(len(p)): pderiv[:,j] = FGRAD(x, p, j) else: pderiv = None return([status, (y-model)/err, pderiv] where FGRAD(x, p, i) is a user function which must compute the derivative of the model with respect to parameter P[i] at X. When finite differencing is used for computing derivatives (ie, when AUTODERIVATIVE=1), or when MPFIT needs only the errors but not the derivatives the parameter FJAC=None. Derivatives should be returned in the PDERIV array. PDERIV should be an m x n array, where m is the number of data points and n is the number of parameters. dp[i,j] is the derivative at the ith point with respect to the jth parameter. The derivatives with respect to fixed parameters are ignored; zero is an appropriate value to insert for those derivatives. Upon input to the user function, FJAC is set to a vector with the same length as P, with a value of 1 for a parameter which is free, and a value of zero for a parameter which is fixed (and hence no derivative needs to be calculated). If the data is higher than one dimensional, then the *last* dimension should be the parameter dimension. Example: fitting a 50x50 image, "dp" should be 50x50xNPAR. CONSTRAINING PARAMETER VALUES WITH THE PARINFO KEYWORD The behavior of MPFIT can be modified with respect to each parameter to be fitted. A parameter value can be fixed; simple boundary constraints can be imposed; limitations on the parameter changes can be imposed; properties of the automatic derivative can be modified; and parameters can be tied to one another. These properties are governed by the PARINFO structure, which is passed as a keyword parameter to MPFIT. PARINFO should be a list of dictionaries, one list entry for each parameter. Each parameter is associated with one element of the array, in numerical order. The dictionary can have the following keys (none are required, keys are case insensitive): 'value' - the starting parameter value (but see the START_PARAMS parameter for more information). 'fixed' - a boolean value, whether the parameter is to be held fixed or not. Fixed parameters are not varied by MPFIT, but are passed on to MYFUNCT for evaluation. 'limited' - a two-element boolean array. If the first/second element is set, then the parameter is bounded on the lower/upper side. A parameter can be bounded on both sides. Both LIMITED and LIMITS must be given together. 'limits' - a two-element float array. Gives the parameter limits on the lower and upper sides, respectively. Zero, one or two of these values can be set, depending on the values of LIMITED. Both LIMITED and LIMITS must be given together. 'parname' - a string, giving the name of the parameter. The fitting code of MPFIT does not use this tag in any way. However, the default iterfunct will print the parameter name if available. 'step' - the step size to be used in calculating the numerical derivatives. If set to zero, then the step size is computed automatically. Ignored when AUTODERIVATIVE=0. 'mpside' - the sidedness of the finite difference when computing numerical derivatives. This field can take four values: 0 - one-sided derivative computed automatically 1 - one-sided derivative (f(x+h) - f(x) )/h -1 - one-sided derivative (f(x) - f(x-h))/h 2 - two-sided derivative (f(x+h) - f(x-h))/(2*h) Where H is the STEP parameter described above. The "automatic" one-sided derivative method will chose a direction for the finite difference which does not violate any constraints. The other methods do not perform this check. The two-sided method is in principle more precise, but requires twice as many function evaluations. Default: 0. 'mpmaxstep' - the maximum change to be made in the parameter value. During the fitting process, the parameter will never be changed by more than this value in one iteration. A value of 0 indicates no maximum. Default: 0. 'tied' - a string expression which "ties" the parameter to other free or fixed parameters. Any expression involving constants and the parameter array P are permitted. Example: if parameter 2 is always to be twice parameter 1 then use the following: parinfo(2).tied = '2 * p(1)'. Since they are totally constrained, tied parameters are considered to be fixed; no errors are computed for them. [ NOTE: the PARNAME can't be used in expressions. ] 'mpprint' - if set to 1, then the default iterfunct will print the parameter value. If set to 0, the parameter value will not be printed. This tag can be used to selectively print only a few parameter values out of many. Default: 1 (all parameters printed) Future modifications to the PARINFO structure, if any, will involve adding dictionary tags beginning with the two letters "MP". Therefore programmers are urged to avoid using tags starting with the same letters; otherwise they are free to include their own fields within the PARINFO structure, and they will be ignored. PARINFO Example: parinfo = [{'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]} for i in range(5)] parinfo[0]['fixed'] = 1 parinfo[4]['limited'][0] = 1 parinfo[4]['limits'][0] = 50. values = [5.7, 2.2, 500., 1.5, 2000.] for i in range(5): parinfo[i]['value']=values[i] A total of 5 parameters, with starting values of 5.7, 2.2, 500, 1.5, and 2000 are given. The first parameter is fixed at a value of 5.7, and the last parameter is constrained to be above 50. EXAMPLE import mpfit import numpy.oldnumeric as Numeric x = arange(100, float) p0 = [5.7, 2.2, 500., 1.5, 2000.] y = ( p[0] + p[1]*[x] + p[2]*[x**2] + p[3]*sqrt(x) + p[4]*log(x)) fa = {'x':x, 'y':y, 'err':err} m = mpfit('myfunct', p0, functkw=fa) print 'status = ', m.status if (m.status <= 0): print 'error message = ', m.errmsg print 'parameters = ', m.params Minimizes sum of squares of MYFUNCT. MYFUNCT is called with the X, Y, and ERR keyword parameters that are given by FUNCTKW. The results can be obtained from the returned object m. THEORY OF OPERATION There are many specific strategies for function minimization. One very popular technique is to use function gradient information to realize the local structure of the function. Near a local minimum the function value can be taylor expanded about x0 as follows: f(x) = f(x0) + f'(x0) . (x-x0) + (1/2) (x-x0) . f''(x0) . (x-x0) ----- --------------- ------------------------------- (1) Order 0th 1st 2nd Here f'(x) is the gradient vector of f at x, and f''(x) is the Hessian matrix of second derivatives of f at x. The vector x is the set of function parameters, not the measured data vector. One can find the minimum of f, f(xm) using Newton's method, and arrives at the following linear equation: f''(x0) . (xm-x0) = - f'(x0) (2) If an inverse can be found for f''(x0) then one can solve for (xm-x0), the step vector from the current position x0 to the new projected minimum. Here the problem has been linearized (ie, the gradient information is known to first order). f''(x0) is symmetric n x n matrix, and should be positive definite. The Levenberg - Marquardt technique is a variation on this theme. It adds an additional diagonal term to the equation which may aid the convergence properties: (f''(x0) + nu I) . (xm-x0) = -f'(x0) (2a) where I is the identity matrix. When nu is large, the overall matrix is diagonally dominant, and the iterations follow steepest descent. When nu is small, the iterations are quadratically convergent. In principle, if f''(x0) and f'(x0) are known then xm-x0 can be determined. However the Hessian matrix is often difficult or impossible to compute. The gradient f'(x0) may be easier to compute, if even by finite difference techniques. So-called quasi-Newton techniques attempt to successively estimate f''(x0) by building up gradient information as the iterations proceed. In the least squares problem there are further simplifications which assist in solving eqn (2). The function to be minimized is a sum of squares: f = Sum(hi^2) (3) where hi is the ith residual out of m residuals as described above. This can be substituted back into eqn (2) after computing the derivatives: f' = 2 Sum(hi hi') f'' = 2 Sum(hi' hj') + 2 Sum(hi hi'') (4) If one assumes that the parameters are already close enough to a minimum, then one typically finds that the second term in f'' is negligible [or, in any case, is too difficult to compute]. Thus, equation (2) can be solved, at least approximately, using only gradient information. In matrix notation, the combination of eqns (2) and (4) becomes: hT' . h' . dx = - hT' . h (5) Where h is the residual vector (length m), hT is its transpose, h' is the Jacobian matrix (dimensions n x m), and dx is (xm-x0). The user function supplies the residual vector h, and in some cases h' when it is not found by finite differences (see MPFIT_FDJAC2, which finds h and hT'). Even if dx is not the best absolute step to take, it does provide a good estimate of the best *direction*, so often a line minimization will occur along the dx vector direction. The method of solution employed by MINPACK is to form the Q . R factorization of h', where Q is an orthogonal matrix such that QT . Q = I, and R is upper right triangular. Using h' = Q . R and the ortogonality of Q, eqn (5) becomes (RT . QT) . (Q . R) . dx = - (RT . QT) . h RT . R . dx = - RT . QT . h (6) R . dx = - QT . h where the last statement follows because R is upper triangular. Here, R, QT and h are known so this is a matter of solving for dx. The routine MPFIT_QRFAC provides the QR factorization of h, with pivoting, and MPFIT_QRSOLV provides the solution for dx. REFERENCES MINPACK-1, Jorge More', available from netlib (www.netlib.org). "Optimization Software Guide," Jorge More' and Stephen Wright, SIAM, *Frontiers in Applied Mathematics*, Number 14. More', Jorge J., "The Levenberg-Marquardt Algorithm: Implementation and Theory," in *Numerical Analysis*, ed. Watson, G. A., Lecture Notes in Mathematics 630, Springer-Verlag, 1977. MODIFICATION HISTORY Translated from MINPACK-1 in FORTRAN, Apr-Jul 1998, CM Copyright (C) 1997-2002, Craig Markwardt This software is provided as is without any warranty whatsoever. Permission to use, copy, modify, and distribute modified or unmodified copies is granted, provided this copyright and disclaimer are included unchanged. Translated from MPFIT (Craig Markwardt's IDL package) to Python, August, 2002. Mark Rivers Converted from Numeric to numpy (Sergey Koposov, July 2008) """ import numpy import types import scipy.linalg.blas # Original FORTRAN documentation # ********** # # subroutine lmdif # # the purpose of lmdif is to minimize the sum of the squares of # m nonlinear functions in n variables by a modification of # the levenberg-marquardt algorithm. the user must provide a # subroutine which calculates the functions. the jacobian is # then calculated by a forward-difference approximation. # # the subroutine statement is # # subroutine lmdif(fcn,m,n,x,fvec,ftol,xtol,gtol,maxfev,epsfcn, # diag,mode,factor,nprint,info,nfev,fjac, # ldfjac,ipvt,qtf,wa1,wa2,wa3,wa4) # # where # # fcn is the name of the user-supplied subroutine which # calculates the functions. fcn must be declared # in an external statement in the user calling # program, and should be written as follows. # # subroutine fcn(m,n,x,fvec,iflag) # integer m,n,iflag # double precision x(n),fvec(m) # ---------- # calculate the functions at x and # return this vector in fvec. # ---------- # return # end # # the value of iflag should not be changed by fcn unless # the user wants to terminate execution of lmdif. # in this case set iflag to a negative integer. # # m is a positive integer input variable set to the number # of functions. # # n is a positive integer input variable set to the number # of variables. n must not exceed m. # # x is an array of length n. on input x must contain # an initial estimate of the solution vector. on output x # contains the final estimate of the solution vector. # # fvec is an output array of length m which contains # the functions evaluated at the output x. # # ftol is a nonnegative input variable. termination # occurs when both the actual and predicted relative # reductions in the sum of squares are at most ftol. # therefore, ftol measures the relative error desired # in the sum of squares. # # xtol is a nonnegative input variable. termination # occurs when the relative error between two consecutive # iterates is at most xtol. therefore, xtol measures the # relative error desired in the approximate solution. # # gtol is a nonnegative input variable. termination # occurs when the cosine of the angle between fvec and # any column of the jacobian is at most gtol in absolute # value. therefore, gtol measures the orthogonality # desired between the function vector and the columns # of the jacobian. # # maxfev is a positive integer input variable. termination # occurs when the number of calls to fcn is at least # maxfev by the end of an iteration. # # epsfcn is an input variable used in determining a suitable # step length for the forward-difference approximation. this # approximation assumes that the relative errors in the # functions are of the order of epsfcn. if epsfcn is less # than the machine precision, it is assumed that the relative # errors in the functions are of the order of the machine # precision. # # diag is an array of length n. if mode = 1 (see # below), diag is internally set. if mode = 2, diag # must contain positive entries that serve as # multiplicative scale factors for the variables. # # mode is an integer input variable. if mode = 1, the # variables will be scaled internally. if mode = 2, # the scaling is specified by the input diag. other # values of mode are equivalent to mode = 1. # # factor is a positive input variable used in determining the # initial step bound. this bound is set to the product of # factor and the euclidean norm of diag*x if nonzero, or else # to factor itself. in most cases factor should lie in the # interval (.1,100.). 100. is a generally recommended value. # # nprint is an integer input variable that enables controlled # printing of iterates if it is positive. in this case, # fcn is called with iflag = 0 at the beginning of the first # iteration and every nprint iterations thereafter and # immediately prior to return, with x and fvec available # for printing. if nprint is not positive, no special calls # of fcn with iflag = 0 are made. # # info is an integer output variable. if the user has # terminated execution, info is set to the (negative) # value of iflag. see description of fcn. otherwise, # info is set as follows. # # info = 0 improper input parameters. # # info = 1 both actual and predicted relative reductions # in the sum of squares are at most ftol. # # info = 2 relative error between two consecutive iterates # is at most xtol. # # info = 3 conditions for info = 1 and info = 2 both hold. # # info = 4 the cosine of the angle between fvec and any # column of the jacobian is at most gtol in # absolute value. # # info = 5 number of calls to fcn has reached or # exceeded maxfev. # # info = 6 ftol is too small. no further reduction in # the sum of squares is possible. # # info = 7 xtol is too small. no further improvement in # the approximate solution x is possible. # # info = 8 gtol is too small. fvec is orthogonal to the # columns of the jacobian to machine precision. # # nfev is an integer output variable set to the number of # calls to fcn. # # fjac is an output m by n array. the upper n by n submatrix # of fjac contains an upper triangular matrix r with # diagonal elements of nonincreasing magnitude such that # # t t t # p *(jac *jac)*p = r *r, # # where p is a permutation matrix and jac is the final # calculated jacobian. column j of p is column ipvt(j) # (see below) of the identity matrix. the lower trapezoidal # part of fjac contains information generated during # the computation of r. # # ldfjac is a positive integer input variable not less than m # which specifies the leading dimension of the array fjac. # # ipvt is an integer output array of length n. ipvt # defines a permutation matrix p such that jac*p = q*r, # where jac is the final calculated jacobian, q is # orthogonal (not stored), and r is upper triangular # with diagonal elements of nonincreasing magnitude. # column j of p is column ipvt(j) of the identity matrix. # # qtf is an output array of length n which contains # the first n elements of the vector (q transpose)*fvec. # # wa1, wa2, and wa3 are work arrays of length n. # # wa4 is a work array of length m. # # subprograms called # # user-supplied ...... fcn # # minpack-supplied ... dpmpar,enorm,fdjac2,,qrfac # # fortran-supplied ... dabs,dmax1,dmin1,dsqrt,mod # # argonne national laboratory. minpack project. march 1980. # burton s. garbow, kenneth e. hillstrom, jorge j. more # # ********** class mpfit: blas_enorm32, = scipy.linalg.blas.get_blas_funcs(['nrm2'],numpy.array([0],dtype=numpy.float32)) blas_enorm64, = scipy.linalg.blas.get_blas_funcs(['nrm2'],numpy.array([0],dtype=numpy.float64)) def __init__(self, fcn, xall=None, functkw={}, parinfo=None, ftol=1.e-10, xtol=1.e-10, gtol=1.e-10, damp=0., maxiter=200, factor=100., nprint=1, iterfunct='default', iterkw={}, nocovar=0, rescale=0, autoderivative=1, quiet=0, diag=None, epsfcn=None, debug=0): """ Inputs: fcn: The function to be minimized. The function should return the weighted deviations between the model and the data, as described above. xall: An array of starting values for each of the parameters of the model. The number of parameters should be fewer than the number of measurements. This parameter is optional if the parinfo keyword is used (but see parinfo). The parinfo keyword provides a mechanism to fix or constrain individual parameters. Keywords: autoderivative: If this is set, derivatives of the function will be computed automatically via a finite differencing procedure. If not set, then fcn must provide the (analytical) derivatives. Default: set (=1) NOTE: to supply your own analytical derivatives, explicitly pass autoderivative=0 ftol: A nonnegative input variable. Termination occurs when both the actual and predicted relative reductions in the sum of squares are at most ftol (and status is accordingly set to 1 or 3). Therefore, ftol measures the relative error desired in the sum of squares. Default: 1E-10 functkw: A dictionary which contains the parameters to be passed to the user-supplied function specified by fcn via the standard Python keyword dictionary mechanism. This is the way you can pass additional data to your user-supplied function without using global variables. Consider the following example: if functkw = {'xval':[1.,2.,3.], 'yval':[1.,4.,9.], 'errval':[1.,1.,1.] } then the user supplied function should be declared like this: def myfunct(p, fjac=None, xval=None, yval=None, errval=None): Default: {} No extra parameters are passed to the user-supplied function. gtol: A nonnegative input variable. Termination occurs when the cosine of the angle between fvec and any column of the jacobian is at most gtol in absolute value (and status is accordingly set to 4). Therefore, gtol measures the orthogonality desired between the function vector and the columns of the jacobian. Default: 1e-10 iterkw: The keyword arguments to be passed to iterfunct via the dictionary keyword mechanism. This should be a dictionary and is similar in operation to FUNCTKW. Default: {} No arguments are passed. iterfunct: The name of a function to be called upon each NPRINT iteration of the MPFIT routine. It should be declared in the following way: def iterfunct(myfunct, p, iter, fnorm, functkw=None, parinfo=None, quiet=0, dof=None, [iterkw keywords here]) # perform custom iteration update iterfunct must accept all three keyword parameters (FUNCTKW, PARINFO and QUIET). myfunct: The user-supplied function to be minimized, p: The current set of model parameters iter: The iteration number functkw: The arguments to be passed to myfunct. fnorm: The chi-squared value. quiet: Set when no textual output should be printed. dof: The number of degrees of freedom, normally the number of points less the number of free parameters. See below for documentation of parinfo. In implementation, iterfunct can perform updates to the terminal or graphical user interface, to provide feedback while the fit proceeds. If the fit is to be stopped for any reason, then iterfunct should return a a status value between -15 and -1. Otherwise it should return None (e.g. no return statement) or 0. In principle, iterfunct should probably not modify the parameter values, because it may interfere with the algorithm's stability. In practice it is allowed. Default: an internal routine is used to print the parameter values. Set iterfunct=None if there is no user-defined routine and you don't want the internal default routine be called. maxiter: The maximum number of iterations to perform. If the number is exceeded, then the status value is set to 5 and MPFIT returns. Default: 200 iterations nocovar: Set this keyword to prevent the calculation of the covariance matrix before returning (see COVAR) Default: clear (=0) The covariance matrix is returned nprint: The frequency with which iterfunct is called. A value of 1 indicates that iterfunct is called with every iteration, while 2 indicates every other iteration, etc. Note that several Levenberg-Marquardt attempts can be made in a single iteration. Default value: 1 parinfo Provides a mechanism for more sophisticated constraints to be placed on parameter values. When parinfo is not passed, then it is assumed that all parameters are free and unconstrained. Values in parinfo are never modified during a call to MPFIT. See description above for the structure of PARINFO. Default value: None All parameters are free and unconstrained. quiet: Set this keyword when no textual output should be printed by MPFIT damp: A scalar number, indicating the cut-off value of residuals where "damping" will occur. Residuals with magnitudes greater than this number will be replaced by their hyperbolic tangent. This partially mitigates the so-called large residual problem inherent in least-squares solvers (as for the test problem CURVI, http://www.maxthis.com/curviex.htm). A value of 0 indicates no damping. Default: 0 Note: DAMP doesn't work with autoderivative=0 xtol: A nonnegative input variable. Termination occurs when the relative error between two consecutive iterates is at most xtol (and status is accordingly set to 2 or 3). Therefore, xtol measures the relative error desired in the approximate solution. Default: 1E-10 Outputs: Returns an object of type mpfit. The results are attributes of this class, e.g. mpfit.status, mpfit.errmsg, mpfit.params, npfit.niter, mpfit.covar. .status An integer status code is returned. All values greater than zero can represent success (however .status == 5 may indicate failure to converge). It can have one of the following values: -16 A parameter or function value has become infinite or an undefined number. This is usually a consequence of numerical overflow in the user's model function, which must be avoided. -15 to -1 These are error codes that either MYFUNCT or iterfunct may return to terminate the fitting process. Values from -15 to -1 are reserved for the user functions and will not clash with MPFIT. 0 Improper input parameters. 1 Both actual and predicted relative reductions in the sum of squares are at most ftol. 2 Relative error between two consecutive iterates is at most xtol 3 Conditions for status = 1 and status = 2 both hold. 4 The cosine of the angle between fvec and any column of the jacobian is at most gtol in absolute value. 5 The maximum number of iterations has been reached. 6 ftol is too small. No further reduction in the sum of squares is possible. 7 xtol is too small. No further improvement in the approximate solution x is possible. 8 gtol is too small. fvec is orthogonal to the columns of the jacobian to machine precision. .fnorm The value of the summed squared residuals for the returned parameter values. .covar The covariance matrix for the set of parameters returned by MPFIT. The matrix is NxN where N is the number of parameters. The square root of the diagonal elements gives the formal 1-sigma statistical errors on the parameters if errors were treated "properly" in fcn. Parameter errors are also returned in .perror. To compute the correlation matrix, pcor, use this example: cov = mpfit.covar pcor = cov * 0. for i in range(n): for j in range(n): pcor[i,j] = cov[i,j]/sqrt(cov[i,i]*cov[j,j]) If nocovar is set or MPFIT terminated abnormally, then .covar is set to a scalar with value None. .errmsg A string error or warning message is returned. .nfev The number of calls to MYFUNCT performed. .niter The number of iterations completed. .perror The formal 1-sigma errors in each parameter, computed from the covariance matrix. If a parameter is held fixed, or if it touches a boundary, then the error is reported as zero. If the fit is unweighted (i.e. no errors were given, or the weights were uniformly set to unity), then .perror will probably not represent the true parameter uncertainties. *If* you can assume that the true reduced chi-squared value is unity -- meaning that the fit is implicitly assumed to be of good quality -- then the estimated parameter uncertainties can be computed by scaling .perror by the measured chi-squared value. dof = len(x) - len(mpfit.params) # deg of freedom # scaled uncertainties pcerror = mpfit.perror * sqrt(mpfit.fnorm / dof) """ self.niter = 0 self.params = None self.covar = None self.perror = None self.status = 0 # Invalid input flag set while we check inputs self.debug = debug self.errmsg = '' self.nfev = 0 self.damp = damp self.dof=0 if fcn==None: self.errmsg = "Usage: parms = mpfit('myfunt', ... )" return if iterfunct == 'default': iterfunct = self.defiter # Parameter damping doesn't work when user is providing their own # gradients. if (self.damp != 0) and (autoderivative == 0): self.errmsg = 'ERROR: keywords DAMP and AUTODERIVATIVE are mutually exclusive' return # Parameters can either be stored in parinfo, or x. x takes precedence if it exists if (xall is None) and (parinfo is None): self.errmsg = 'ERROR: must pass parameters in P or PARINFO' return # Be sure that PARINFO is of the right type if parinfo is not None: if type(parinfo) != types.ListType: self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.' return else: if type(parinfo[0]) != types.DictionaryType: self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.' return if ((xall is not None) and (len(xall) != len(parinfo))): self.errmsg = 'ERROR: number of elements in PARINFO and P must agree' return # If the parameters were not specified at the command line, then # extract them from PARINFO if xall is None: xall = self.parinfo(parinfo, 'value') if xall is None: self.errmsg = 'ERROR: either P or PARINFO(*)["value"] must be supplied.' return # Make sure parameters are numpy arrays xall = numpy.asarray(xall) # In the case if the xall is not float or if is float but has less # than 64 bits we do convert it into double if xall.dtype.kind != 'f' or xall.dtype.itemsize<=4: xall = xall.astype(numpy.float) npar = len(xall) self.fnorm = -1. fnorm1 = -1. # TIED parameters? ptied = self.parinfo(parinfo, 'tied', default='', n=npar) self.qanytied = 0 for i in range(npar): ptied[i] = ptied[i].strip() if ptied[i] != '': self.qanytied = 1 self.ptied = ptied # FIXED parameters ? pfixed = self.parinfo(parinfo, 'fixed', default=0, n=npar) pfixed = (pfixed == 1) for i in range(npar): pfixed[i] = pfixed[i] or (ptied[i] != '') # Tied parameters are also effectively fixed # Finite differencing step, absolute and relative, and sidedness of deriv. step = self.parinfo(parinfo, 'step', default=0., n=npar) dstep = self.parinfo(parinfo, 'relstep', default=0., n=npar) dside = self.parinfo(parinfo, 'mpside', default=0, n=npar) # Maximum and minimum steps allowed to be taken in one iteration maxstep = self.parinfo(parinfo, 'mpmaxstep', default=0., n=npar) minstep = self.parinfo(parinfo, 'mpminstep', default=0., n=npar) qmin = minstep != 0 qmin[:] = False # Remove minstep for now!! qmax = maxstep != 0 if numpy.any(qmin & qmax & (maxstep<minstep)): self.errmsg = 'ERROR: MPMINSTEP is greater than MPMAXSTEP' return wh = (numpy.nonzero((qmin!=0.) | (qmax!=0.)))[0] qminmax = len(wh > 0) # Finish up the free parameters ifree = (numpy.nonzero(pfixed != 1))[0] nfree = len(ifree) if nfree == 0: self.errmsg = 'ERROR: no free parameters' return # Compose only VARYING parameters self.params = xall.copy() # self.params is the set of parameters to be returned x = self.params[ifree] # x is the set of free parameters # LIMITED parameters ? limited = self.parinfo(parinfo, 'limited', default=[0,0], n=npar) limits = self.parinfo(parinfo, 'limits', default=[0.,0.], n=npar) if (limited is not None) and (limits is not None): # Error checking on limits in parinfo if numpy.any((limited[:,0] & (xall < limits[:,0])) | (limited[:,1] & (xall > limits[:,1]))): self.errmsg = 'ERROR: parameters are not within PARINFO limits' return if numpy.any((limited[:,0] & limited[:,1]) & (limits[:,0] >= limits[:,1]) & (pfixed == 0)): self.errmsg = 'ERROR: PARINFO parameter limits are not consistent' return # Transfer structure values to local variables qulim = (limited[:,1])[ifree] ulim = (limits [:,1])[ifree] qllim = (limited[:,0])[ifree] llim = (limits [:,0])[ifree] if numpy.any((qulim!=0.) | (qllim!=0.)): qanylim = 1 else: qanylim = 0 else: # Fill in local variables with dummy values qulim = numpy.zeros(nfree) ulim = x * 0. qllim = qulim llim = x * 0. qanylim = 0 n = len(x) # Check input parameters for errors if (n < 0) or (ftol <= 0) or (xtol <= 0) or (gtol <= 0) \ or (maxiter < 0) or (factor <= 0): self.errmsg = 'ERROR: input keywords are inconsistent' return if rescale != 0: self.errmsg = 'ERROR: DIAG parameter scales are inconsistent' if len(diag) < n: return if numpy.any(diag <= 0): return self.errmsg = '' [self.status, fvec] = self.call(fcn, self.params, functkw) if self.status < 0: self.errmsg = 'ERROR: first call to "'+str(fcn)+'" failed' return # If the returned fvec has more than four bits I assume that we have # double precision # It is important that the machar is determined by the precision of # the returned value, not by the precision of the input array if numpy.array([fvec]).dtype.itemsize>4: self.machar = machar(double=1) self.blas_enorm = mpfit.blas_enorm64 else: self.machar = machar(double=0) self.blas_enorm = mpfit.blas_enorm32 machep = self.machar.machep m = len(fvec) if m < n: self.errmsg = 'ERROR: number of parameters must not exceed data' return self.dof = m-nfree self.fnorm = self.enorm(fvec) # Initialize Levelberg-Marquardt parameter and iteration counter par = 0. self.niter = 1 qtf = x * 0. self.status = 0 # Beginning of the outer loop while(1): # If requested, call fcn to enable printing of iterates self.params[ifree] = x if self.qanytied: self.params = self.tie(self.params, ptied) if (nprint > 0) and (iterfunct is not None): if ((self.niter-1) % nprint) == 0: mperr = 0 xnew0 = self.params.copy() dof = numpy.max([len(fvec) - len(x), 0]) status = iterfunct(fcn, self.params, self.niter, self.fnorm**2, functkw=functkw, parinfo=parinfo, quiet=quiet, dof=dof, **iterkw) if status is not None: self.status = status # Check for user termination if self.status < 0: self.errmsg = 'WARNING: premature termination by ' + str(iterfunct) return # If parameters were changed (grrr..) then re-tie if numpy.max(numpy.abs(xnew0-self.params)) > 0: if self.qanytied: self.params = self.tie(self.params, ptied) x = self.params[ifree] # Calculate the jacobian matrix self.status = 2 catch_msg = 'calling MPFIT_FDJAC2' fjac = self.fdjac2(fcn, x, fvec, step, qulim, ulim, dside, epsfcn=epsfcn, autoderivative=autoderivative, dstep=dstep, functkw=functkw, ifree=ifree, xall=self.params) if fjac is None: self.errmsg = 'WARNING: premature termination by FDJAC2' return # Determine if any of the parameters are pegged at the limits if qanylim: catch_msg = 'zeroing derivatives of pegged parameters' whlpeg = (numpy.nonzero(qllim & (x == llim)))[0] nlpeg = len(whlpeg) whupeg = (numpy.nonzero(qulim & (x == ulim)))[0] nupeg = len(whupeg) # See if any "pegged" values should keep their derivatives if nlpeg > 0: # Total derivative of sum wrt lower pegged parameters for i in range(nlpeg): sum0 = sum(fvec * fjac[:,whlpeg[i]]) if sum0 > 0: fjac[:,whlpeg[i]] = 0 if nupeg > 0: # Total derivative of sum wrt upper pegged parameters for i in range(nupeg): sum0 = sum(fvec * fjac[:,whupeg[i]]) if sum0 < 0: fjac[:,whupeg[i]] = 0 # Compute the QR factorization of the jacobian [fjac, ipvt, wa1, wa2] = self.qrfac(fjac, pivot=1) # On the first iteration if "diag" is unspecified, scale # according to the norms of the columns of the initial jacobian catch_msg = 'rescaling diagonal elements' if self.niter == 1: if (rescale==0) or (len(diag) < n): diag = wa2.copy() diag[diag == 0] = 1. # On the first iteration, calculate the norm of the scaled x # and initialize the step bound delta wa3 = diag * x xnorm = self.enorm(wa3) delta = factor*xnorm if delta == 0.: delta = factor # Form (q transpose)*fvec and store the first n components in qtf catch_msg = 'forming (q transpose)*fvec' wa4 = fvec.copy() for j in range(n): lj = ipvt[j] temp3 = fjac[j,lj] if temp3 != 0: fj = fjac[j:,lj] wj = wa4[j:] # *** optimization wa4(j:*) wa4[j:] = wj - fj * sum(fj*wj) / temp3 fjac[j,lj] = wa1[j] qtf[j] = wa4[j] # From this point on, only the square matrix, consisting of the # triangle of R, is needed. fjac = fjac[0:n, 0:n] fjac.shape = [n, n] temp = fjac.copy() for i in range(n): temp[:,i] = fjac[:, ipvt[i]] fjac = temp.copy() # Check for overflow. This should be a cheap test here since FJAC # has been reduced to a (small) square matrix, and the test is # O(N^2). #wh = where(finite(fjac) EQ 0, ct) #if ct GT 0 then goto, FAIL_OVERFLOW # Compute the norm of the scaled gradient catch_msg = 'computing the scaled gradient' gnorm = 0. if self.fnorm != 0: for j in range(n): l = ipvt[j] if wa2[l] != 0: sum0 = sum(fjac[0:j+1,j]*qtf[0:j+1])/self.fnorm gnorm = numpy.max([gnorm,numpy.abs(sum0/wa2[l])]) # Test for convergence of the gradient norm if gnorm <= gtol: self.status = 4 break if maxiter == 0: self.status = 5 break # Rescale if necessary if rescale == 0: diag = numpy.choose(diag>wa2, (wa2, diag)) # Beginning of the inner loop while(1): # Determine the levenberg-marquardt parameter catch_msg = 'calculating LM parameter (MPFIT_)' [fjac, par, wa1, wa2] = self.lmpar(fjac, ipvt, diag, qtf, delta, wa1, wa2, par=par) # Store the direction p and x+p. Calculate the norm of p wa1 = -wa1 if (qanylim == 0) and (qminmax == 0): # No parameter limits, so just move to new position WA2 alpha = 1. wa2 = x + wa1 else: # Respect the limits. If a step were to go out of bounds, then # we should take a step in the same direction but shorter distance. # The step should take us right to the limit in that case. alpha = 1. if qanylim: # Do not allow any steps out of bounds catch_msg = 'checking for a step out of bounds' if nlpeg > 0: wa1[whlpeg] = numpy.clip( wa1[whlpeg], 0., numpy.max(wa1)) if nupeg > 0: wa1[whupeg] = numpy.clip(wa1[whupeg], numpy.min(wa1), 0.) dwa1 = numpy.abs(wa1) > machep whl = (numpy.nonzero(((dwa1!=0.) & qllim) & ((x + wa1) < llim)))[0] if len(whl) > 0: t = ((llim[whl] - x[whl]) / wa1[whl]) alpha = numpy.min([alpha, numpy.min(t)]) whu = (numpy.nonzero(((dwa1!=0.) & qulim) & ((x + wa1) > ulim)))[0] if len(whu) > 0: t = ((ulim[whu] - x[whu]) / wa1[whu]) alpha = numpy.min([alpha, numpy.min(t)]) # Obey any max step values. if qminmax: nwa1 = wa1 * alpha whmax = (numpy.nonzero((qmax != 0.) & (maxstep > 0)))[0] if len(whmax) > 0: mrat = numpy.max(numpy.abs(nwa1[whmax]) / numpy.abs(maxstep[ifree[whmax]])) if mrat > 1: alpha = alpha / mrat # Scale the resulting vector wa1 = wa1 * alpha wa2 = x + wa1 # Adjust the final output values. If the step put us exactly # on a boundary, make sure it is exact. sgnu = (ulim >= 0) * 2. - 1. sgnl = (llim >= 0) * 2. - 1. # Handles case of # ... nonzero *LIM ... ...zero * LIM ulim1 = ulim * (1 - sgnu * machep) - (ulim == 0) * machep llim1 = llim * (1 + sgnl * machep) + (llim == 0) * machep wh = (numpy.nonzero((qulim!=0) & (wa2 >= ulim1)))[0] if len(wh) > 0: wa2[wh] = ulim[wh] wh = (numpy.nonzero((qllim!=0.) & (wa2 <= llim1)))[0] if len(wh) > 0: wa2[wh] = llim[wh] # endelse wa3 = diag * wa1 pnorm = self.enorm(wa3) # On the first iteration, adjust the initial step bound if self.niter == 1: delta = numpy.min([delta,pnorm]) self.params[ifree] = wa2 # Evaluate the function at x+p and calculate its norm mperr = 0 catch_msg = 'calling '+str(fcn) [self.status, wa4] = self.call(fcn, self.params, functkw) if self.status < 0: self.errmsg = 'WARNING: premature termination by "'+fcn+'"' return fnorm1 = self.enorm(wa4) # Compute the scaled actual reduction catch_msg = 'computing convergence criteria' actred = -1. if (0.1 * fnorm1) < self.fnorm: actred = - (fnorm1/self.fnorm)**2 + 1. # Compute the scaled predicted reduction and the scaled directional # derivative for j in range(n): wa3[j] = 0 wa3[0:j+1] = wa3[0:j+1] + fjac[0:j+1,j]*wa1[ipvt[j]] # Remember, alpha is the fraction of the full LM step actually # taken temp1 = self.enorm(alpha*wa3)/self.fnorm temp2 = (numpy.sqrt(alpha*par)*pnorm)/self.fnorm prered = temp1*temp1 + (temp2*temp2)/0.5 dirder = -(temp1*temp1 + temp2*temp2) # Compute the ratio of the actual to the predicted reduction. ratio = 0. if prered != 0: ratio = actred/prered # Update the step bound if ratio <= 0.25: if actred >= 0: temp = .5 else: temp = .5*dirder/(dirder + .5*actred) if ((0.1*fnorm1) >= self.fnorm) or (temp < 0.1): temp = 0.1 delta = temp*numpy.min([delta,pnorm/0.1]) par = par/temp else: if (par == 0) or (ratio >= 0.75): delta = pnorm/.5 par = .5*par # Test for successful iteration if ratio >= 0.0001: # Successful iteration. Update x, fvec, and their norms x = wa2 wa2 = diag * x fvec = wa4 xnorm = self.enorm(wa2) self.fnorm = fnorm1 self.niter = self.niter + 1 # Tests for convergence if (numpy.abs(actred) <= ftol) and (prered <= ftol) \ and (0.5 * ratio <= 1): self.status = 1 if delta <= xtol*xnorm: self.status = 2 if (numpy.abs(actred) <= ftol) and (prered <= ftol) \ and (0.5 * ratio <= 1) and (self.status == 2): self.status = 3 if self.status != 0: break # Tests for termination and stringent tolerances if self.niter >= maxiter: self.status = 5 if (numpy.abs(actred) <= machep) and (prered <= machep) \ and (0.5*ratio <= 1): self.status = 6 if delta <= machep*xnorm: self.status = 7 if gnorm <= machep: self.status = 8 if self.status != 0: break # End of inner loop. Repeat if iteration unsuccessful if ratio >= 0.0001: break # Check for over/underflow if ~numpy.all(numpy.isfinite(wa1) & numpy.isfinite(wa2) & \ numpy.isfinite(x)) or ~numpy.isfinite(ratio): errmsg = ('''ERROR: parameter or function value(s) have become 'infinite; check model function for over- 'and underflow''') self.status = -16 break #wh = where(finite(wa1) EQ 0 OR finite(wa2) EQ 0 OR finite(x) EQ 0, ct) #if ct GT 0 OR finite(ratio) EQ 0 then begin if self.status != 0: break; # End of outer loop. catch_msg = 'in the termination phase' # Termination, either normal or user imposed. if len(self.params) == 0: return if nfree == 0: self.params = xall.copy() else: self.params[ifree] = x if (nprint > 0) and (self.status > 0): catch_msg = 'calling ' + str(fcn) [status, fvec] = self.call(fcn, self.params, functkw) catch_msg = 'in the termination phase' self.fnorm = self.enorm(fvec) if (self.fnorm is not None) and (fnorm1 is not None): self.fnorm = numpy.max([self.fnorm, fnorm1]) self.fnorm = self.fnorm**2. self.covar = None self.perror = None # (very carefully) set the covariance matrix COVAR if (self.status > 0) and (nocovar==0) and (n is not None) \ and (fjac is not None) and (ipvt is not None): sz = fjac.shape if (n > 0) and (sz[0] >= n) and (sz[1] >= n) \ and (len(ipvt) >= n): catch_msg = 'computing the covariance matrix' cv = self.calc_covar(fjac[0:n,0:n], ipvt[0:n]) cv.shape = [n, n] nn = len(xall) # Fill in actual covariance matrix, accounting for fixed # parameters. self.covar = numpy.zeros([nn, nn], dtype=float) for i in range(n): self.covar[ifree,ifree[i]] = cv[:,i] # Compute errors in parameters catch_msg = 'computing parameter errors' self.perror = numpy.zeros(nn, dtype=float) d = numpy.diagonal(self.covar) wh = (numpy.nonzero(d >= 0))[0] if len(wh) > 0: self.perror[wh] = numpy.sqrt(d[wh]) return def __str__(self): return {'params': self.params, 'niter': self.niter, 'params': self.params, 'covar': self.covar, 'perror': self.perror, 'status': self.status, 'debug': self.debug, 'errmsg': self.errmsg, 'nfev': self.nfev, 'damp': self.damp #,'machar':self.machar }.__str__() # Default procedure to be called every iteration. It simply prints # the parameter values. def defiter(self, fcn, x, iter, fnorm=None, functkw=None, quiet=0, iterstop=None, parinfo=None, format=None, pformat='%.10g', dof=1): if self.debug: print 'Entering defiter...' if quiet: return if fnorm is None: [status, fvec] = self.call(fcn, x, functkw) fnorm = self.enorm(fvec)**2 # Determine which parameters to print nprint = len(x) print "Iter ", ('%6i' % iter)," CHI-SQUARE = ",('%.10g' % fnorm)," DOF = ", ('%i' % dof) for i in range(nprint): if (parinfo is not None) and (parinfo[i].has_key('parname')): p = ' ' + parinfo[i]['parname'] + ' = ' else: p = ' P' + str(i) + ' = ' if (parinfo is not None) and (parinfo[i].has_key('mpprint')): iprint = parinfo[i]['mpprint'] else: iprint = 1 if iprint: print p + (pformat % x[i]) + ' ' return 0 # DO_ITERSTOP: # if keyword_set(iterstop) then begin # k = get_kbrd(0) # if k EQ string(byte(7)) then begin # message, 'WARNING: minimization not complete', /info # print, 'Do you want to terminate this procedure? (y/n)', $ # format='(A,$)' # k = '' # read, k # if strupcase(strmid(k,0,1)) EQ 'Y' then begin # message, 'WARNING: Procedure is terminating.', /info # mperr = -1 # endif # endif # endif # Procedure to parse the parameter values in PARINFO, which is a list of dictionaries def parinfo(self, parinfo=None, key='a', default=None, n=0): if self.debug: print 'Entering parinfo...' if (n == 0) and (parinfo is not None): n = len(parinfo) if n == 0: values = default return values values = [] for i in range(n): if (parinfo is not None) and (parinfo[i].has_key(key)): values.append(parinfo[i][key]) else: values.append(default) # Convert to numeric arrays if possible test = default if type(default) == types.ListType: test=default[0] if isinstance(test, types.IntType): values = numpy.asarray(values, int) elif isinstance(test, types.FloatType): values = numpy.asarray(values, float) return values # Call user function or procedure, with _EXTRA or not, with # derivatives or not. def call(self, fcn, x, functkw, fjac=None): if self.debug: print 'Entering call...' if self.qanytied: x = self.tie(x, self.ptied) self.nfev = self.nfev + 1 if fjac is None: [status, f] = fcn(x, fjac=fjac, **functkw) if self.damp > 0: # Apply the damping if requested. This replaces the residuals # with their hyperbolic tangent. Thus residuals larger than # DAMP are essentially clipped. f = numpy.tanh(f/self.damp) return [status, f] else: return fcn(x, fjac=fjac, **functkw) def enorm(self, vec): ans = self.blas_enorm(vec) return ans def fdjac2(self, fcn, x, fvec, step=None, ulimited=None, ulimit=None, dside=None, epsfcn=None, autoderivative=1, functkw=None, xall=None, ifree=None, dstep=None): if self.debug: print 'Entering fdjac2...' machep = self.machar.machep if epsfcn is None: epsfcn = machep if xall is None: xall = x if ifree is None: ifree = numpy.arange(len(xall)) if step is None: step = x * 0. nall = len(xall) eps = numpy.sqrt(numpy.max([epsfcn, machep])) m = len(fvec) n = len(x) # Compute analytical derivative if requested if autoderivative == 0: mperr = 0 fjac = numpy.zeros(nall, dtype=float) fjac[ifree] = 1.0 # Specify which parameters need derivatives [status, fp] = self.call(fcn, xall, functkw, fjac=fjac) if len(fjac) != m*nall: print 'ERROR: Derivative matrix was not computed properly.' return None # This definition is consistent with CURVEFIT # Sign error found (thanks Jesus Fernandez <[email protected]>) fjac.shape = [m,nall] fjac = -fjac # Select only the free parameters if len(ifree) < nall: fjac = fjac[:,ifree] fjac.shape = [m, n] return fjac fjac = numpy.zeros([m, n], dtype=float) h = eps * numpy.abs(x) # if STEP is given, use that # STEP includes the fixed parameters if step is not None: stepi = step[ifree] wh = (numpy.nonzero(stepi > 0))[0] if len(wh) > 0: h[wh] = stepi[wh] # if relative step is given, use that # DSTEP includes the fixed parameters if len(dstep) > 0: dstepi = dstep[ifree] wh = (numpy.nonzero(dstepi > 0))[0] if len(wh) > 0: h[wh] = numpy.abs(dstepi[wh]*x[wh]) # In case any of the step values are zero h[h == 0] = eps # Reverse the sign of the step if we are up against the parameter # limit, or if the user requested it. # DSIDE includes the fixed parameters (ULIMITED/ULIMIT have only # varying ones) mask = dside[ifree] == -1 if len(ulimited) > 0 and len(ulimit) > 0: mask = (mask | ((ulimited!=0) & (x > ulimit-h))) wh = (numpy.nonzero(mask))[0] if len(wh) > 0: h[wh] = - h[wh] # Loop through parameters, computing the derivative for each for j in range(n): xp = xall.copy() xp[ifree[j]] = xp[ifree[j]] + h[j] [status, fp] = self.call(fcn, xp, functkw) if status < 0: return None if numpy.abs(dside[ifree[j]]) <= 1: # COMPUTE THE ONE-SIDED DERIVATIVE # Note optimization fjac(0:*,j) fjac[0:,j] = (fp-fvec)/h[j] else: # COMPUTE THE TWO-SIDED DERIVATIVE xp[ifree[j]] = xall[ifree[j]] - h[j] mperr = 0 [status, fm] = self.call(fcn, xp, functkw) if status < 0: return None # Note optimization fjac(0:*,j) fjac[0:,j] = (fp-fm)/(2*h[j]) return fjac # Original FORTRAN documentation # ********** # # subroutine qrfac # # this subroutine uses householder transformations with column # pivoting (optional) to compute a qr factorization of the # m by n matrix a. that is, qrfac determines an orthogonal # matrix q, a permutation matrix p, and an upper trapezoidal # matrix r with diagonal elements of nonincreasing magnitude, # such that a*p = q*r. the householder transformation for # column k, k = 1,2,...,min(m,n), is of the form # # t # i - (1/u(k))*u*u # # where u has zeros in the first k-1 positions. the form of # this transformation and the method of pivoting first # appeared in the corresponding linpack subroutine. # # the subroutine statement is # # subroutine qrfac(m,n,a,lda,pivot,ipvt,lipvt,rdiag,acnorm,wa) # # where # # m is a positive integer input variable set to the number # of rows of a. # # n is a positive integer input variable set to the number # of columns of a. # # a is an m by n array. on input a contains the matrix for # which the qr factorization is to be computed. on output # the strict upper trapezoidal part of a contains the strict # upper trapezoidal part of r, and the lower trapezoidal # part of a contains a factored form of q (the non-trivial # elements of the u vectors described above). # # lda is a positive integer input variable not less than m # which specifies the leading dimension of the array a. # # pivot is a logical input variable. if pivot is set true, # then column pivoting is enforced. if pivot is set false, # then no column pivoting is done. # # ipvt is an integer output array of length lipvt. ipvt # defines the permutation matrix p such that a*p = q*r. # column j of p is column ipvt(j) of the identity matrix. # if pivot is false, ipvt is not referenced. # # lipvt is a positive integer input variable. if pivot is false, # then lipvt may be as small as 1. if pivot is true, then # lipvt must be at least n. # # rdiag is an output array of length n which contains the # diagonal elements of r. # # acnorm is an output array of length n which contains the # norms of the corresponding columns of the input matrix a. # if this information is not needed, then acnorm can coincide # with rdiag. # # wa is a work array of length n. if pivot is false, then wa # can coincide with rdiag. # # subprograms called # # minpack-supplied ... dpmpar,enorm # # fortran-supplied ... dmax1,dsqrt,min0 # # argonne national laboratory. minpack project. march 1980. # burton s. garbow, kenneth e. hillstrom, jorge j. more # # ********** # # PIVOTING / PERMUTING: # # Upon return, A(*,*) is in standard parameter order, A(*,IPVT) is in # permuted order. # # RDIAG is in permuted order. # ACNORM is in standard parameter order. # # # NOTE: in IDL the factors appear slightly differently than described # above. The matrix A is still m x n where m >= n. # # The "upper" triangular matrix R is actually stored in the strict # lower left triangle of A under the standard notation of IDL. # # The reflectors that generate Q are in the upper trapezoid of A upon # output. # # EXAMPLE: decompose the matrix [[9.,2.,6.],[4.,8.,7.]] # aa = [[9.,2.,6.],[4.,8.,7.]] # mpfit_qrfac, aa, aapvt, rdiag, aanorm # IDL> print, aa # 1.81818* 0.181818* 0.545455* # -8.54545+ 1.90160* 0.432573* # IDL> print, rdiag # -11.0000+ -7.48166+ # # The components marked with a * are the components of the # reflectors, and those marked with a + are components of R. # # To reconstruct Q and R we proceed as follows. First R. # r = fltarr(m, n) # for i = 0, n-1 do r(0:i,i) = aa(0:i,i) # fill in lower diag # r(lindgen(n)*(m+1)) = rdiag # # Next, Q, which are composed from the reflectors. Each reflector v # is taken from the upper trapezoid of aa, and converted to a matrix # via (I - 2 vT . v / (v . vT)). # # hh = ident # identity matrix # for i = 0, n-1 do begin # v = aa(*,i) & if i GT 0 then v(0:i-1) = 0 # extract reflector # hh = hh # (ident - 2*(v # v)/total(v * v)) # generate matrix # endfor # # Test the result: # IDL> print, hh # transpose(r) # 9.00000 4.00000 # 2.00000 8.00000 # 6.00000 7.00000 # # Note that it is usually never necessary to form the Q matrix # explicitly, and MPFIT does not. def qrfac(self, a, pivot=0): if self.debug: print 'Entering qrfac...' machep = self.machar.machep sz = a.shape m = sz[0] n = sz[1] # Compute the initial column norms and initialize arrays acnorm = numpy.zeros(n, dtype=float) for j in range(n): acnorm[j] = self.enorm(a[:,j]) rdiag = acnorm.copy() wa = rdiag.copy() ipvt = numpy.arange(n) # Reduce a to r with householder transformations minmn = numpy.min([m,n]) for j in range(minmn): if pivot != 0: # Bring the column of largest norm into the pivot position rmax = numpy.max(rdiag[j:]) kmax = (numpy.nonzero(rdiag[j:] == rmax))[0] ct = len(kmax) kmax = kmax + j if ct > 0: kmax = kmax[0] # Exchange rows via the pivot only. Avoid actually exchanging # the rows, in case there is lots of memory transfer. The # exchange occurs later, within the body of MPFIT, after the # extraneous columns of the matrix have been shed. if kmax != j: temp = ipvt[j] ; ipvt[j] = ipvt[kmax] ; ipvt[kmax] = temp rdiag[kmax] = rdiag[j] wa[kmax] = wa[j] # Compute the householder transformation to reduce the jth # column of A to a multiple of the jth unit vector lj = ipvt[j] ajj = a[j:,lj] ajnorm = self.enorm(ajj) if ajnorm == 0: break if a[j,lj] < 0: ajnorm = -ajnorm ajj = ajj / ajnorm ajj[0] = ajj[0] + 1 # *** Note optimization a(j:*,j) a[j:,lj] = ajj # Apply the transformation to the remaining columns # and update the norms # NOTE to SELF: tried to optimize this by removing the loop, # but it actually got slower. Reverted to "for" loop to keep # it simple. if j+1 < n: for k in range(j+1, n): lk = ipvt[k] ajk = a[j:,lk] # *** Note optimization a(j:*,lk) # (corrected 20 Jul 2000) if a[j,lj] != 0: a[j:,lk] = ajk - ajj * sum(ajk*ajj)/a[j,lj] if (pivot != 0) and (rdiag[k] != 0): temp = a[j,lk]/rdiag[k] rdiag[k] = rdiag[k] * numpy.sqrt(numpy.max([(1.-temp**2), 0.])) temp = rdiag[k]/wa[k] if (0.05*temp*temp) <= machep: rdiag[k] = self.enorm(a[j+1:,lk]) wa[k] = rdiag[k] rdiag[j] = -ajnorm return [a, ipvt, rdiag, acnorm] # Original FORTRAN documentation # ********** # # subroutine qrsolv # # given an m by n matrix a, an n by n diagonal matrix d, # and an m-vector b, the problem is to determine an x which # solves the system # # a*x = b , d*x = 0 , # # in the least squares sense. # # this subroutine completes the solution of the problem # if it is provided with the necessary information from the # factorization, with column pivoting, of a. that is, if # a*p = q*r, where p is a permutation matrix, q has orthogonal # columns, and r is an upper triangular matrix with diagonal # elements of nonincreasing magnitude, then qrsolv expects # the full upper triangle of r, the permutation matrix p, # and the first n components of (q transpose)*b. the system # a*x = b, d*x = 0, is then equivalent to # # t t # r*z = q *b , p *d*p*z = 0 , # # where x = p*z. if this system does not have full rank, # then a least squares solution is obtained. on output qrsolv # also provides an upper triangular matrix s such that # # t t t # p *(a *a + d*d)*p = s *s . # # s is computed within qrsolv and may be of separate interest. # # the subroutine statement is # # subroutine qrsolv(n,r,ldr,ipvt,diag,qtb,x,sdiag,wa) # # where # # n is a positive integer input variable set to the order of r. # # r is an n by n array. on input the full upper triangle # must contain the full upper triangle of the matrix r. # on output the full upper triangle is unaltered, and the # strict lower triangle contains the strict upper triangle # (transposed) of the upper triangular matrix s. # # ldr is a positive integer input variable not less than n # which specifies the leading dimension of the array r. # # ipvt is an integer input array of length n which defines the # permutation matrix p such that a*p = q*r. column j of p # is column ipvt(j) of the identity matrix. # # diag is an input array of length n which must contain the # diagonal elements of the matrix d. # # qtb is an input array of length n which must contain the first # n elements of the vector (q transpose)*b. # # x is an output array of length n which contains the least # squares solution of the system a*x = b, d*x = 0. # # sdiag is an output array of length n which contains the # diagonal elements of the upper triangular matrix s. # # wa is a work array of length n. # # subprograms called # # fortran-supplied ... dabs,dsqrt # # argonne national laboratory. minpack project. march 1980. # burton s. garbow, kenneth e. hillstrom, jorge j. more # def qrsolv(self, r, ipvt, diag, qtb, sdiag): if self.debug: print 'Entering qrsolv...' sz = r.shape m = sz[0] n = sz[1] # copy r and (q transpose)*b to preserve input and initialize s. # in particular, save the diagonal elements of r in x. for j in range(n): r[j:n,j] = r[j,j:n] x = numpy.diagonal(r).copy() wa = qtb.copy() # Eliminate the diagonal matrix d using a givens rotation for j in range(n): l = ipvt[j] if diag[l] == 0: break sdiag[j:] = 0 sdiag[j] = diag[l] # The transformations to eliminate the row of d modify only a # single element of (q transpose)*b beyond the first n, which # is initially zero. qtbpj = 0. for k in range(j,n): if sdiag[k] == 0: break if numpy.abs(r[k,k]) < numpy.abs(sdiag[k]): cotan = r[k,k]/sdiag[k] sine = 0.5/numpy.sqrt(.25 + .25*cotan*cotan) cosine = sine*cotan else: tang = sdiag[k]/r[k,k] cosine = 0.5/numpy.sqrt(.25 + .25*tang*tang) sine = cosine*tang # Compute the modified diagonal element of r and the # modified element of ((q transpose)*b,0). r[k,k] = cosine*r[k,k] + sine*sdiag[k] temp = cosine*wa[k] + sine*qtbpj qtbpj = -sine*wa[k] + cosine*qtbpj wa[k] = temp # Accumulate the transformation in the row of s if n > k+1: temp = cosine*r[k+1:n,k] + sine*sdiag[k+1:n] sdiag[k+1:n] = -sine*r[k+1:n,k] + cosine*sdiag[k+1:n] r[k+1:n,k] = temp sdiag[j] = r[j,j] r[j,j] = x[j] # Solve the triangular system for z. If the system is singular # then obtain a least squares solution nsing = n wh = (numpy.nonzero(sdiag == 0))[0] if len(wh) > 0: nsing = wh[0] wa[nsing:] = 0 if nsing >= 1: wa[nsing-1] = wa[nsing-1]/sdiag[nsing-1] # Degenerate case # *** Reverse loop *** for j in range(nsing-2,-1,-1): sum0 = sum(r[j+1:nsing,j]*wa[j+1:nsing]) wa[j] = (wa[j]-sum0)/sdiag[j] # Permute the components of z back to components of x x[ipvt] = wa return (r, x, sdiag) # Original FORTRAN documentation # # subroutine lmpar # # given an m by n matrix a, an n by n nonsingular diagonal # matrix d, an m-vector b, and a positive number delta, # the problem is to determine a value for the parameter # par such that if x solves the system # # a*x = b , sqrt(par)*d*x = 0 , # # in the least squares sense, and dxnorm is the euclidean # norm of d*x, then either par is zero and # # (dxnorm-delta) .le. 0.1*delta , # # or par is positive and # # abs(dxnorm-delta) .le. 0.1*delta . # # this subroutine completes the solution of the problem # if it is provided with the necessary information from the # qr factorization, with column pivoting, of a. that is, if # a*p = q*r, where p is a permutation matrix, q has orthogonal # columns, and r is an upper triangular matrix with diagonal # elements of nonincreasing magnitude, then lmpar expects # the full upper triangle of r, the permutation matrix p, # and the first n components of (q transpose)*b. on output # lmpar also provides an upper triangular matrix s such that # # t t t # p *(a *a + par*d*d)*p = s *s . # # s is employed within lmpar and may be of separate interest. # # only a few iterations are generally needed for convergence # of the algorithm. if, however, the limit of 10 iterations # is reached, then the output par will contain the best # value obtained so far. # # the subroutine statement is # # subroutine lmpar(n,r,ldr,ipvt,diag,qtb,delta,par,x,sdiag, # wa1,wa2) # # where # # n is a positive integer input variable set to the order of r. # # r is an n by n array. on input the full upper triangle # must contain the full upper triangle of the matrix r. # on output the full upper triangle is unaltered, and the # strict lower triangle contains the strict upper triangle # (transposed) of the upper triangular matrix s. # # ldr is a positive integer input variable not less than n # which specifies the leading dimension of the array r. # # ipvt is an integer input array of length n which defines the # permutation matrix p such that a*p = q*r. column j of p # is column ipvt(j) of the identity matrix. # # diag is an input array of length n which must contain the # diagonal elements of the matrix d. # # qtb is an input array of length n which must contain the first # n elements of the vector (q transpose)*b. # # delta is a positive input variable which specifies an upper # bound on the euclidean norm of d*x. # # par is a nonnegative variable. on input par contains an # initial estimate of the levenberg-marquardt parameter. # on output par contains the final estimate. # # x is an output array of length n which contains the least # squares solution of the system a*x = b, sqrt(par)*d*x = 0, # for the output par. # # sdiag is an output array of length n which contains the # diagonal elements of the upper triangular matrix s. # # wa1 and wa2 are work arrays of length n. # # subprograms called # # minpack-supplied ... dpmpar,enorm,qrsolv # # fortran-supplied ... dabs,dmax1,dmin1,dsqrt # # argonne national laboratory. minpack project. march 1980. # burton s. garbow, kenneth e. hillstrom, jorge j. more # def lmpar(self, r, ipvt, diag, qtb, delta, x, sdiag, par=None): if self.debug: print 'Entering lmpar...' dwarf = self.machar.minnum machep = self.machar.machep sz = r.shape m = sz[0] n = sz[1] # Compute and store in x the gauss-newton direction. If the # jacobian is rank-deficient, obtain a least-squares solution nsing = n wa1 = qtb.copy() rthresh = numpy.max(numpy.abs(numpy.diagonal(r))) * machep wh = (numpy.nonzero(numpy.abs(numpy.diagonal(r)) < rthresh))[0] if len(wh) > 0: nsing = wh[0] wa1[wh[0]:] = 0 if nsing >= 1: # *** Reverse loop *** for j in range(nsing-1,-1,-1): wa1[j] = wa1[j]/r[j,j] if j-1 >= 0: wa1[0:j] = wa1[0:j] - r[0:j,j]*wa1[j] # Note: ipvt here is a permutation array x[ipvt] = wa1 # Initialize the iteration counter. Evaluate the function at the # origin, and test for acceptance of the gauss-newton direction iter = 0 wa2 = diag * x dxnorm = self.enorm(wa2) fp = dxnorm - delta if fp <= 0.1*delta: return [r, 0., x, sdiag] # If the jacobian is not rank deficient, the newton step provides a # lower bound, parl, for the zero of the function. Otherwise set # this bound to zero. parl = 0. if nsing >= n: wa1 = diag[ipvt] * wa2[ipvt] / dxnorm wa1[0] = wa1[0] / r[0,0] # Degenerate case for j in range(1,n): # Note "1" here, not zero sum0 = sum(r[0:j,j]*wa1[0:j]) wa1[j] = (wa1[j] - sum0)/r[j,j] temp = self.enorm(wa1) parl = ((fp/delta)/temp)/temp # Calculate an upper bound, paru, for the zero of the function for j in range(n): sum0 = sum(r[0:j+1,j]*qtb[0:j+1]) wa1[j] = sum0/diag[ipvt[j]] gnorm = self.enorm(wa1) paru = gnorm/delta if paru == 0: paru = dwarf/numpy.min([delta,0.1]) # If the input par lies outside of the interval (parl,paru), set # par to the closer endpoint par = numpy.max([par,parl]) par = numpy.min([par,paru]) if par == 0: par = gnorm/dxnorm # Beginning of an interation while(1): iter = iter + 1 # Evaluate the function at the current value of par if par == 0: par = numpy.max([dwarf, paru*0.001]) temp = numpy.sqrt(par) wa1 = temp * diag [r, x, sdiag] = self.qrsolv(r, ipvt, wa1, qtb, sdiag) wa2 = diag*x dxnorm = self.enorm(wa2) temp = fp fp = dxnorm - delta if (numpy.abs(fp) <= 0.1*delta) or \ ((parl == 0) and (fp <= temp) and (temp < 0)) or \ (iter == 10): break; # Compute the newton correction wa1 = diag[ipvt] * wa2[ipvt] / dxnorm for j in range(n-1): wa1[j] = wa1[j]/sdiag[j] wa1[j+1:n] = wa1[j+1:n] - r[j+1:n,j]*wa1[j] wa1[n-1] = wa1[n-1]/sdiag[n-1] # Degenerate case temp = self.enorm(wa1) parc = ((fp/delta)/temp)/temp # Depending on the sign of the function, update parl or paru if fp > 0: parl = numpy.max([parl,par]) if fp < 0: paru = numpy.min([paru,par]) # Compute an improved estimate for par par = numpy.max([parl, par+parc]) # End of an iteration # Termination return [r, par, x, sdiag] # Procedure to tie one parameter to another. def tie(self, p, ptied=None): if self.debug: print 'Entering tie...' if ptied is None: return for i in range(len(ptied)): if ptied[i] == '': continue cmd = 'p[' + str(i) + '] = ' + ptied[i] exec(cmd) return p # Original FORTRAN documentation # ********** # # subroutine covar # # given an m by n matrix a, the problem is to determine # the covariance matrix corresponding to a, defined as # # t # inverse(a *a) . # # this subroutine completes the solution of the problem # if it is provided with the necessary information from the # qr factorization, with column pivoting, of a. that is, if # a*p = q*r, where p is a permutation matrix, q has orthogonal # columns, and r is an upper triangular matrix with diagonal # elements of nonincreasing magnitude, then covar expects # the full upper triangle of r and the permutation matrix p. # the covariance matrix is then computed as # # t t # p*inverse(r *r)*p . # # if a is nearly rank deficient, it may be desirable to compute # the covariance matrix corresponding to the linearly independent # columns of a. to define the numerical rank of a, covar uses # the tolerance tol. if l is the largest integer such that # # abs(r(l,l)) .gt. tol*abs(r(1,1)) , # # then covar computes the covariance matrix corresponding to # the first l columns of r. for k greater than l, column # and row ipvt(k) of the covariance matrix are set to zero. # # the subroutine statement is # # subroutine covar(n,r,ldr,ipvt,tol,wa) # # where # # n is a positive integer input variable set to the order of r. # # r is an n by n array. on input the full upper triangle must # contain the full upper triangle of the matrix r. on output # r contains the square symmetric covariance matrix. # # ldr is a positive integer input variable not less than n # which specifies the leading dimension of the array r. # # ipvt is an integer input array of length n which defines the # permutation matrix p such that a*p = q*r. column j of p # is column ipvt(j) of the identity matrix. # # tol is a nonnegative input variable used to define the # numerical rank of a in the manner described above. # # wa is a work array of length n. # # subprograms called # # fortran-supplied ... dabs # # argonne national laboratory. minpack project. august 1980. # burton s. garbow, kenneth e. hillstrom, jorge j. more # # ********** def calc_covar(self, rr, ipvt=None, tol=1.e-14): if self.debug: print 'Entering calc_covar...' if numpy.rank(rr) != 2: print 'ERROR: r must be a two-dimensional matrix' return -1 s = rr.shape n = s[0] if s[0] != s[1]: print 'ERROR: r must be a square matrix' return -1 if ipvt is None: ipvt = numpy.arange(n) r = rr.copy() r.shape = [n,n] # For the inverse of r in the full upper triangle of r l = -1 tolr = tol * numpy.abs(r[0,0]) for k in range(n): if numpy.abs(r[k,k]) <= tolr: break r[k,k] = 1./r[k,k] for j in range(k): temp = r[k,k] * r[j,k] r[j,k] = 0. r[0:j+1,k] = r[0:j+1,k] - temp*r[0:j+1,j] l = k # Form the full upper triangle of the inverse of (r transpose)*r # in the full upper triangle of r if l >= 0: for k in range(l+1): for j in range(k): temp = r[j,k] r[0:j+1,j] = r[0:j+1,j] + temp*r[0:j+1,k] temp = r[k,k] r[0:k+1,k] = temp * r[0:k+1,k] # For the full lower triangle of the covariance matrix # in the strict lower triangle or and in wa wa = numpy.repeat([r[0,0]], n) for j in range(n): jj = ipvt[j] sing = j > l for i in range(j+1): if sing: r[i,j] = 0. ii = ipvt[i] if ii > jj: r[ii,jj] = r[i,j] if ii < jj: r[jj,ii] = r[i,j] wa[jj] = r[j,j] # Symmetrize the covariance matrix in r for j in range(n): r[0:j+1,j] = r[j,0:j+1] r[j,j] = wa[j] return r class machar: def __init__(self, double=1): if double == 0: info = numpy.finfo(numpy.float32) else: info = numpy.finfo(numpy.float64) self.machep = info.eps self.maxnum = info.max self.minnum = info.tiny self.maxlog = numpy.log(self.maxnum) self.minlog = numpy.log(self.minnum) self.rdwarf = numpy.sqrt(self.minnum*1.5) * 10 self.rgiant = numpy.sqrt(self.maxnum) * 0.1
{ "content_hash": "bb2ea70924ef59d924b99f17f8522949", "timestamp": "", "source": "github", "line_count": 2341, "max_line_length": 96, "avg_line_length": 33.497650576676634, "alnum_prop": 0.6661735825958326, "repo_name": "richardseifert/Hydra_pipeline", "id": "7943e581c7b4cb8342d4adc829fca8ec929a1b99", "size": "78418", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "libs/mpfit.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "266115" } ], "symlink_target": "" }
"""Support for (EMEA/EU-based) Honeywell TCC climate systems. Such systems include evohome, Round Thermostat, and others. """ from __future__ import annotations from datetime import datetime as dt, timedelta from http import HTTPStatus import logging import re from typing import Any import aiohttp.client_exceptions import evohomeasync import evohomeasync2 import voluptuous as vol from homeassistant.const import ( ATTR_ENTITY_ID, CONF_PASSWORD, CONF_SCAN_INTERVAL, CONF_USERNAME, Platform, ) from homeassistant.core import HomeAssistant, ServiceCall, callback from homeassistant.helpers import entity_registry as er from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.discovery import async_load_platform from homeassistant.helpers.dispatcher import ( async_dispatcher_connect, async_dispatcher_send, ) from homeassistant.helpers.entity import Entity from homeassistant.helpers.event import async_call_later, async_track_time_interval from homeassistant.helpers.service import verify_domain_control from homeassistant.helpers.storage import Store from homeassistant.helpers.typing import ConfigType import homeassistant.util.dt as dt_util from .const import DOMAIN, GWS, STORAGE_KEY, STORAGE_VER, TCS, UTC_OFFSET _LOGGER = logging.getLogger(__name__) ACCESS_TOKEN = "access_token" ACCESS_TOKEN_EXPIRES = "access_token_expires" REFRESH_TOKEN = "refresh_token" USER_DATA = "user_data" CONF_LOCATION_IDX = "location_idx" SCAN_INTERVAL_DEFAULT = timedelta(seconds=300) SCAN_INTERVAL_MINIMUM = timedelta(seconds=60) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Optional(CONF_LOCATION_IDX, default=0): cv.positive_int, vol.Optional( CONF_SCAN_INTERVAL, default=SCAN_INTERVAL_DEFAULT ): vol.All(cv.time_period, vol.Range(min=SCAN_INTERVAL_MINIMUM)), } ) }, extra=vol.ALLOW_EXTRA, ) ATTR_SYSTEM_MODE = "mode" ATTR_DURATION_DAYS = "period" ATTR_DURATION_HOURS = "duration" ATTR_ZONE_TEMP = "setpoint" ATTR_DURATION_UNTIL = "duration" SVC_REFRESH_SYSTEM = "refresh_system" SVC_SET_SYSTEM_MODE = "set_system_mode" SVC_RESET_SYSTEM = "reset_system" SVC_SET_ZONE_OVERRIDE = "set_zone_override" SVC_RESET_ZONE_OVERRIDE = "clear_zone_override" RESET_ZONE_OVERRIDE_SCHEMA = vol.Schema({vol.Required(ATTR_ENTITY_ID): cv.entity_id}) SET_ZONE_OVERRIDE_SCHEMA = vol.Schema( { vol.Required(ATTR_ENTITY_ID): cv.entity_id, vol.Required(ATTR_ZONE_TEMP): vol.All( vol.Coerce(float), vol.Range(min=4.0, max=35.0) ), vol.Optional(ATTR_DURATION_UNTIL): vol.All( cv.time_period, vol.Range(min=timedelta(days=0), max=timedelta(days=1)) ), } ) # system mode schemas are built dynamically, below def _dt_local_to_aware(dt_naive: dt) -> dt: dt_aware = dt_util.now() + (dt_naive - dt.now()) if dt_aware.microsecond >= 500000: dt_aware += timedelta(seconds=1) return dt_aware.replace(microsecond=0) def _dt_aware_to_naive(dt_aware: dt) -> dt: dt_naive = dt.now() + (dt_aware - dt_util.now()) if dt_naive.microsecond >= 500000: dt_naive += timedelta(seconds=1) return dt_naive.replace(microsecond=0) def convert_until(status_dict: dict, until_key: str) -> None: """Reformat a dt str from "%Y-%m-%dT%H:%M:%SZ" as local/aware/isoformat.""" if until_key in status_dict and ( # only present for certain modes dt_utc_naive := dt_util.parse_datetime(status_dict[until_key]) ): status_dict[until_key] = dt_util.as_local(dt_utc_naive).isoformat() def convert_dict(dictionary: dict[str, Any]) -> dict[str, Any]: """Recursively convert a dict's keys to snake_case.""" def convert_key(key: str) -> str: """Convert a string to snake_case.""" string = re.sub(r"[\-\.\s]", "_", str(key)) return (string[0]).lower() + re.sub( r"[A-Z]", lambda matched: f"_{matched.group(0).lower()}", # type:ignore[str-bytes-safe] string[1:], ) return { (convert_key(k) if isinstance(k, str) else k): ( convert_dict(v) if isinstance(v, dict) else v ) for k, v in dictionary.items() } def _handle_exception(err) -> None: """Return False if the exception can't be ignored.""" try: raise err except evohomeasync2.AuthenticationError: _LOGGER.error( "Failed to authenticate with the vendor's server. " "Check your username and password. NB: Some special password characters " "that work correctly via the website will not work via the web API. " "Message is: %s", err, ) except aiohttp.ClientConnectionError: # this appears to be a common occurrence with the vendor's servers _LOGGER.warning( "Unable to connect with the vendor's server. " "Check your network and the vendor's service status page. " "Message is: %s", err, ) except aiohttp.ClientResponseError: if err.status == HTTPStatus.SERVICE_UNAVAILABLE: _LOGGER.warning( "The vendor says their server is currently unavailable. " "Check the vendor's service status page" ) elif err.status == HTTPStatus.TOO_MANY_REQUESTS: _LOGGER.warning( "The vendor's API rate limit has been exceeded. " "If this message persists, consider increasing the %s", CONF_SCAN_INTERVAL, ) else: raise # we don't expect/handle any other Exceptions async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool: """Create a (EMEA/EU-based) Honeywell TCC system.""" async def load_auth_tokens(store) -> tuple[dict, dict | None]: app_storage = await store.async_load() tokens = dict(app_storage or {}) if tokens.pop(CONF_USERNAME, None) != config[DOMAIN][CONF_USERNAME]: # any tokens won't be valid, and store might be be corrupt await store.async_save({}) return ({}, None) # evohomeasync2 requires naive/local datetimes as strings if tokens.get(ACCESS_TOKEN_EXPIRES) is not None and ( expires := dt_util.parse_datetime(tokens[ACCESS_TOKEN_EXPIRES]) ): tokens[ACCESS_TOKEN_EXPIRES] = _dt_aware_to_naive(expires) user_data = tokens.pop(USER_DATA, None) return (tokens, user_data) store = Store[dict[str, Any]](hass, STORAGE_VER, STORAGE_KEY) tokens, user_data = await load_auth_tokens(store) client_v2 = evohomeasync2.EvohomeClient( config[DOMAIN][CONF_USERNAME], config[DOMAIN][CONF_PASSWORD], **tokens, session=async_get_clientsession(hass), ) try: await client_v2.login() except (aiohttp.ClientError, evohomeasync2.AuthenticationError) as err: _handle_exception(err) return False finally: config[DOMAIN][CONF_PASSWORD] = "REDACTED" loc_idx = config[DOMAIN][CONF_LOCATION_IDX] try: loc_config = client_v2.installation_info[loc_idx] except IndexError: _LOGGER.error( "Config error: '%s' = %s, but the valid range is 0-%s. " "Unable to continue. Fix any configuration errors and restart HA", CONF_LOCATION_IDX, loc_idx, len(client_v2.installation_info) - 1, ) return False if _LOGGER.isEnabledFor(logging.DEBUG): _config: dict[str, Any] = { "locationInfo": {"timeZone": None}, GWS: [{TCS: None}], } _config["locationInfo"]["timeZone"] = loc_config["locationInfo"]["timeZone"] _config[GWS][0][TCS] = loc_config[GWS][0][TCS] _LOGGER.debug("Config = %s", _config) client_v1 = evohomeasync.EvohomeClient( client_v2.username, client_v2.password, user_data=user_data, session=async_get_clientsession(hass), ) hass.data[DOMAIN] = {} hass.data[DOMAIN]["broker"] = broker = EvoBroker( hass, client_v2, client_v1, store, config[DOMAIN] ) await broker.save_auth_tokens() await broker.async_update() # get initial state hass.async_create_task( async_load_platform(hass, Platform.CLIMATE, DOMAIN, {}, config) ) if broker.tcs.hotwater: hass.async_create_task( async_load_platform(hass, Platform.WATER_HEATER, DOMAIN, {}, config) ) async_track_time_interval( hass, broker.async_update, config[DOMAIN][CONF_SCAN_INTERVAL] ) setup_service_functions(hass, broker) return True @callback def setup_service_functions(hass: HomeAssistant, broker): """Set up the service handlers for the system/zone operating modes. Not all Honeywell TCC-compatible systems support all operating modes. In addition, each mode will require any of four distinct service schemas. This has to be enumerated before registering the appropriate handlers. It appears that all TCC-compatible systems support the same three zones modes. """ @verify_domain_control(hass, DOMAIN) async def force_refresh(call: ServiceCall) -> None: """Obtain the latest state data via the vendor's RESTful API.""" await broker.async_update() @verify_domain_control(hass, DOMAIN) async def set_system_mode(call: ServiceCall) -> None: """Set the system mode.""" payload = { "unique_id": broker.tcs.systemId, "service": call.service, "data": call.data, } async_dispatcher_send(hass, DOMAIN, payload) @verify_domain_control(hass, DOMAIN) async def set_zone_override(call: ServiceCall) -> None: """Set the zone override (setpoint).""" entity_id = call.data[ATTR_ENTITY_ID] registry = er.async_get(hass) registry_entry = registry.async_get(entity_id) if registry_entry is None or registry_entry.platform != DOMAIN: raise ValueError(f"'{entity_id}' is not a known {DOMAIN} entity") if registry_entry.domain != "climate": raise ValueError(f"'{entity_id}' is not an {DOMAIN} controller/zone") payload = { "unique_id": registry_entry.unique_id, "service": call.service, "data": call.data, } async_dispatcher_send(hass, DOMAIN, payload) hass.services.async_register(DOMAIN, SVC_REFRESH_SYSTEM, force_refresh) # Enumerate which operating modes are supported by this system modes = broker.config["allowedSystemModes"] # Not all systems support "AutoWithReset": register this handler only if required if [m["systemMode"] for m in modes if m["systemMode"] == "AutoWithReset"]: hass.services.async_register(DOMAIN, SVC_RESET_SYSTEM, set_system_mode) system_mode_schemas = [] modes = [m for m in modes if m["systemMode"] != "AutoWithReset"] # Permanent-only modes will use this schema perm_modes = [m["systemMode"] for m in modes if not m["canBeTemporary"]] if perm_modes: # any of: "Auto", "HeatingOff": permanent only schema = vol.Schema({vol.Required(ATTR_SYSTEM_MODE): vol.In(perm_modes)}) system_mode_schemas.append(schema) modes = [m for m in modes if m["canBeTemporary"]] # These modes are set for a number of hours (or indefinitely): use this schema temp_modes = [m["systemMode"] for m in modes if m["timingMode"] == "Duration"] if temp_modes: # any of: "AutoWithEco", permanent or for 0-24 hours schema = vol.Schema( { vol.Required(ATTR_SYSTEM_MODE): vol.In(temp_modes), vol.Optional(ATTR_DURATION_HOURS): vol.All( cv.time_period, vol.Range(min=timedelta(hours=0), max=timedelta(hours=24)), ), } ) system_mode_schemas.append(schema) # These modes are set for a number of days (or indefinitely): use this schema temp_modes = [m["systemMode"] for m in modes if m["timingMode"] == "Period"] if temp_modes: # any of: "Away", "Custom", "DayOff", permanent or for 1-99 days schema = vol.Schema( { vol.Required(ATTR_SYSTEM_MODE): vol.In(temp_modes), vol.Optional(ATTR_DURATION_DAYS): vol.All( cv.time_period, vol.Range(min=timedelta(days=1), max=timedelta(days=99)), ), } ) system_mode_schemas.append(schema) if system_mode_schemas: hass.services.async_register( DOMAIN, SVC_SET_SYSTEM_MODE, set_system_mode, schema=vol.Any(*system_mode_schemas), ) # The zone modes are consistent across all systems and use the same schema hass.services.async_register( DOMAIN, SVC_RESET_ZONE_OVERRIDE, set_zone_override, schema=RESET_ZONE_OVERRIDE_SCHEMA, ) hass.services.async_register( DOMAIN, SVC_SET_ZONE_OVERRIDE, set_zone_override, schema=SET_ZONE_OVERRIDE_SCHEMA, ) class EvoBroker: """Container for evohome client and data.""" def __init__( self, hass, client: evohomeasync2.EvohomeClient, client_v1: evohomeasync.EvohomeClient | None, store: Store[dict[str, Any]], params, ) -> None: """Initialize the evohome client and its data structure.""" self.hass = hass self.client = client self.client_v1 = client_v1 self._store = store self.params = params loc_idx = params[CONF_LOCATION_IDX] self.config = client.installation_info[loc_idx][GWS][0][TCS][0] self.tcs = client.locations[loc_idx]._gateways[0]._control_systems[0] self.tcs_utc_offset = timedelta( minutes=client.locations[loc_idx].timeZone[UTC_OFFSET] ) self.temps: dict[str, Any] | None = {} async def save_auth_tokens(self) -> None: """Save access tokens and session IDs to the store for later use.""" # evohomeasync2 uses naive/local datetimes access_token_expires = _dt_local_to_aware(self.client.access_token_expires) app_storage = { CONF_USERNAME: self.client.username, REFRESH_TOKEN: self.client.refresh_token, ACCESS_TOKEN: self.client.access_token, ACCESS_TOKEN_EXPIRES: access_token_expires.isoformat(), } if self.client_v1 and self.client_v1.user_data: app_storage[USER_DATA] = { "userInfo": {"userID": self.client_v1.user_data["userInfo"]["userID"]}, "sessionId": self.client_v1.user_data["sessionId"], } else: app_storage[USER_DATA] = None await self._store.async_save(app_storage) async def call_client_api(self, api_function, update_state=True) -> Any: """Call a client API and update the broker state if required.""" try: result = await api_function except (aiohttp.ClientError, evohomeasync2.AuthenticationError) as err: _handle_exception(err) return if update_state: # wait a moment for system to quiesce before updating state async_call_later(self.hass, 1, self._update_v2_api_state) return result async def _update_v1_api_temps(self, *args, **kwargs) -> None: """Get the latest high-precision temperatures of the default Location.""" assert self.client_v1 def get_session_id(client_v1) -> str | None: user_data = client_v1.user_data if client_v1 else None return user_data.get("sessionId") if user_data else None session_id = get_session_id(self.client_v1) try: temps = list(await self.client_v1.temperatures(force_refresh=True)) except aiohttp.ClientError as err: _LOGGER.warning( "Unable to obtain the latest high-precision temperatures. " "Check your network and the vendor's service status page. " "Proceeding with low-precision temperatures. " "Message is: %s", err, ) self.temps = None # these are now stale, will fall back to v2 temps else: if ( str(self.client_v1.location_id) != self.client.locations[self.params[CONF_LOCATION_IDX]].locationId ): _LOGGER.warning( "The v2 API's configured location doesn't match " "the v1 API's default location (there is more than one location), " "so the high-precision feature will be disabled" ) self.client_v1 = self.temps = None else: self.temps = {str(i["id"]): i["temp"] for i in temps} _LOGGER.debug("Temperatures = %s", self.temps) if session_id != get_session_id(self.client_v1): await self.save_auth_tokens() async def _update_v2_api_state(self, *args, **kwargs) -> None: """Get the latest modes, temperatures, setpoints of a Location.""" access_token = self.client.access_token loc_idx = self.params[CONF_LOCATION_IDX] try: status = await self.client.locations[loc_idx].status() except (aiohttp.ClientError, evohomeasync2.AuthenticationError) as err: _handle_exception(err) else: async_dispatcher_send(self.hass, DOMAIN) _LOGGER.debug("Status = %s", status) if access_token != self.client.access_token: await self.save_auth_tokens() async def async_update(self, *args, **kwargs) -> None: """Get the latest state data of an entire Honeywell TCC Location. This includes state data for a Controller and all its child devices, such as the operating mode of the Controller and the current temp of its children (e.g. Zones, DHW controller). """ if self.client_v1: await self._update_v1_api_temps() await self._update_v2_api_state() class EvoDevice(Entity): """Base for any evohome device. This includes the Controller, (up to 12) Heating Zones and (optionally) a DHW controller. """ _attr_should_poll = False def __init__(self, evo_broker, evo_device) -> None: """Initialize the evohome entity.""" self._evo_device = evo_device self._evo_broker = evo_broker self._evo_tcs = evo_broker.tcs self._device_state_attrs: dict[str, Any] = {} async def async_refresh(self, payload: dict | None = None) -> None: """Process any signals.""" if payload is None: self.async_schedule_update_ha_state(force_refresh=True) return if payload["unique_id"] != self._attr_unique_id: return if payload["service"] in (SVC_SET_ZONE_OVERRIDE, SVC_RESET_ZONE_OVERRIDE): await self.async_zone_svc_request(payload["service"], payload["data"]) return await self.async_tcs_svc_request(payload["service"], payload["data"]) async def async_tcs_svc_request(self, service: str, data: dict[str, Any]) -> None: """Process a service request (system mode) for a controller.""" raise NotImplementedError async def async_zone_svc_request(self, service: str, data: dict[str, Any]) -> None: """Process a service request (setpoint override) for a zone.""" raise NotImplementedError @property def extra_state_attributes(self) -> dict[str, Any]: """Return the evohome-specific state attributes.""" status = self._device_state_attrs if "systemModeStatus" in status: convert_until(status["systemModeStatus"], "timeUntil") if "setpointStatus" in status: convert_until(status["setpointStatus"], "until") if "stateStatus" in status: convert_until(status["stateStatus"], "until") return {"status": convert_dict(status)} async def async_added_to_hass(self) -> None: """Run when entity about to be added to hass.""" async_dispatcher_connect(self.hass, DOMAIN, self.async_refresh) class EvoChild(EvoDevice): """Base for any evohome child. This includes (up to 12) Heating Zones and (optionally) a DHW controller. """ def __init__(self, evo_broker, evo_device) -> None: """Initialize a evohome Controller (hub).""" super().__init__(evo_broker, evo_device) self._schedule: dict[str, Any] = {} self._setpoints: dict[str, Any] = {} @property def current_temperature(self) -> float | None: """Return the current temperature of a Zone.""" if ( self._evo_broker.temps and self._evo_broker.temps[self._evo_device.zoneId] != 128 ): return self._evo_broker.temps[self._evo_device.zoneId] if self._evo_device.temperatureStatus["isAvailable"]: return self._evo_device.temperatureStatus["temperature"] return None @property def setpoints(self) -> dict[str, Any]: """Return the current/next setpoints from the schedule. Only Zones & DHW controllers (but not the TCS) can have schedules. """ def _dt_evo_to_aware(dt_naive: dt, utc_offset: timedelta) -> dt: dt_aware = dt_naive.replace(tzinfo=dt_util.UTC) - utc_offset return dt_util.as_local(dt_aware) if not self._schedule or not self._schedule.get("DailySchedules"): return {} # no scheduled setpoints when {'DailySchedules': []} day_time = dt_util.now() day_of_week = day_time.weekday() # for evohome, 0 is Monday time_of_day = day_time.strftime("%H:%M:%S") try: # Iterate today's switchpoints until past the current time of day... day = self._schedule["DailySchedules"][day_of_week] sp_idx = -1 # last switchpoint of the day before for i, tmp in enumerate(day["Switchpoints"]): if time_of_day > tmp["TimeOfDay"]: sp_idx = i # current setpoint else: break # Did the current SP start yesterday? Does the next start SP tomorrow? this_sp_day = -1 if sp_idx == -1 else 0 next_sp_day = 1 if sp_idx + 1 == len(day["Switchpoints"]) else 0 for key, offset, idx in ( ("this", this_sp_day, sp_idx), ("next", next_sp_day, (sp_idx + 1) * (1 - next_sp_day)), ): sp_date = (day_time + timedelta(days=offset)).strftime("%Y-%m-%d") day = self._schedule["DailySchedules"][(day_of_week + offset) % 7] switchpoint = day["Switchpoints"][idx] switchpoint_time_of_day = dt_util.parse_datetime( f"{sp_date}T{switchpoint['TimeOfDay']}" ) assert switchpoint_time_of_day dt_aware = _dt_evo_to_aware( switchpoint_time_of_day, self._evo_broker.tcs_utc_offset ) self._setpoints[f"{key}_sp_from"] = dt_aware.isoformat() try: self._setpoints[f"{key}_sp_temp"] = switchpoint["heatSetpoint"] except KeyError: self._setpoints[f"{key}_sp_state"] = switchpoint["DhwState"] except IndexError: self._setpoints = {} _LOGGER.warning( "Failed to get setpoints, report as an issue if this error persists", exc_info=True, ) return self._setpoints async def _update_schedule(self) -> None: """Get the latest schedule, if any.""" self._schedule = await self._evo_broker.call_client_api( self._evo_device.schedule(), update_state=False ) _LOGGER.debug("Schedule['%s'] = %s", self.name, self._schedule) async def async_update(self) -> None: """Get the latest state data.""" next_sp_from = self._setpoints.get("next_sp_from", "2000-01-01T00:00:00+00:00") next_sp_from_dt = dt_util.parse_datetime(next_sp_from) if next_sp_from_dt is None or dt_util.now() >= next_sp_from_dt: await self._update_schedule() # no schedule, or it's out-of-date self._device_state_attrs = {"setpoints": self.setpoints}
{ "content_hash": "c09a6ac625484f56c07011a6d93d5eed", "timestamp": "", "source": "github", "line_count": 688, "max_line_length": 90, "avg_line_length": 36.303779069767444, "alnum_prop": 0.6061977018857349, "repo_name": "w1ll1am23/home-assistant", "id": "9c46ff430323593dc3575a82f7866c57ff8645d4", "size": "24977", "binary": false, "copies": "3", "ref": "refs/heads/dev", "path": "homeassistant/components/evohome/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2963" }, { "name": "PLSQL", "bytes": "840" }, { "name": "Python", "bytes": "52277012" }, { "name": "Shell", "bytes": "6252" } ], "symlink_target": "" }
import pytest import os from test_index import CORPUSES import tempfile import shutil import pipes import ujson as json import subprocess def _validate_txt_graph(webgraph_dir): # We collect(1) so there should be only one partition webgraph_files = [f for f in os.listdir(os.path.join(webgraph_dir, "out")) if f.endswith(".txt")] assert len(webgraph_files) == 1 with open(os.path.join(webgraph_dir, "out", webgraph_files[0]), "r") as f: graph = [x.split(" ") for x in f.read().strip().split("\n")] print graph assert len(graph) == 3 assert ["example-a.com", "example-b.com"] in graph assert ["example-b.com", "example-c.com"] in graph assert ["example-c.com", "example-b.com"] in graph def _read_parquet(parquet_path): out = subprocess.check_output("hadoop jar /usr/lib/parquet-tools-1.8.1.jar cat --json %s 2>/dev/null" % parquet_path, shell=True) return [json.loads(line) for line in out.strip().split("\n")] def test_spark_link_graph_txt(sparksubmit): webgraph_dir = tempfile.mkdtemp() try: sparksubmit("spark/jobs/pipeline.py --source wikidata --source corpus:%s --plugin plugins.webgraph.DomainToDomain:coalesce=1,output=%s/out/" % ( pipes.quote(json.dumps(CORPUSES["simple_link_graph_domain"])), webgraph_dir )) _validate_txt_graph(webgraph_dir) finally: shutil.rmtree(webgraph_dir) def test_spark_link_graph_txt_with_intermediate_dump(sparksubmit): """ Test intermediate dump generation & parquet source, + having no dependency on elasticsearch when not actually indexing """ webgraph_dir = tempfile.mkdtemp() try: # Generate temporary dump sparksubmit("spark/jobs/pipeline.py --source corpus:%s --plugin plugins.dump.DocumentMetadata:format=parquet,output=%s/intermediate/,abort=1 --plugin plugins.webgraph.DomainToDomain:coalesce=1,output=%s/out/" % ( pipes.quote(json.dumps(CORPUSES["simple_link_graph_domain"])), webgraph_dir, webgraph_dir )) assert not os.path.isdir("%s/out/" % webgraph_dir) print "Intermediate file dump:" print _read_parquet("%s/intermediate/" % webgraph_dir) # Resume pipeline from that dump sparksubmit("spark/jobs/pipeline.py --source metadata:path=%s/intermediate/ --plugin plugins.webgraph.DomainToDomain:coalesce=1,output=%s/out/" % ( webgraph_dir, webgraph_dir )) _validate_txt_graph(webgraph_dir) finally: shutil.rmtree(webgraph_dir) def test_spark_link_graph_parquet(urlclient, sparksubmit): webgraph_dir = tempfile.mkdtemp() try: domain_a_id = urlclient.client.get_domain_id("http://example-a.com/") domain_b_id = urlclient.client.get_domain_id("http://example-b.com/") domain_c_id = urlclient.client.get_domain_id("http://example-c.com/") domain_d_id = urlclient.client.get_domain_id("http://example-d.com/") sparksubmit("spark/jobs/pipeline.py --source corpus:%s --plugin plugins.webgraph.DomainToDomainParquet:coalesce=1,output=%s/out/" % ( pipes.quote(json.dumps(CORPUSES["simple_link_graph_domain"])), webgraph_dir )) # Then read the generated Parquet files with another library to ensure compatibility # TODO: replace this with a JSON dump from a Python binding when available lines = _read_parquet("%s/out/edges/" % webgraph_dir) for src, dst in [ (domain_a_id, domain_b_id), (domain_b_id, domain_c_id), (domain_c_id, domain_b_id) ]: assert {"src": src, "dst": dst, "weight": 1.0} in lines assert len(lines) == 3 lines = _read_parquet("%s/out/vertices/" % webgraph_dir) assert {"id": domain_a_id, "domain": "example-a.com"} in lines assert {"id": domain_b_id, "domain": "example-b.com"} in lines assert {"id": domain_c_id, "domain": "example-c.com"} in lines assert {"id": domain_d_id, "domain": "example-d.com"} in lines assert len(lines) == 4 finally: shutil.rmtree(webgraph_dir)
{ "content_hash": "f2dc3a09aeebc1ac703b82f27842132c", "timestamp": "", "source": "github", "line_count": 122, "max_line_length": 220, "avg_line_length": 34.450819672131146, "alnum_prop": 0.6354984534856055, "repo_name": "commonsearch/cosr-back", "id": "956978bfc33aef31b81df7fc665bd8950caf25fe", "size": "4203", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/sparktests/test_plugin_webgraph.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "244" }, { "name": "HTML", "bytes": "18009232" }, { "name": "JavaScript", "bytes": "1883" }, { "name": "Makefile", "bytes": "8635" }, { "name": "Perl", "bytes": "1374" }, { "name": "Protocol Buffer", "bytes": "393" }, { "name": "Python", "bytes": "235923" }, { "name": "Shell", "bytes": "1195" } ], "symlink_target": "" }
"""General purpose metrics writer interface.""" from absl import logging try: from tensorboardX import SummaryWriter # type: ignore finally: pass class Writer: """General purpose metrics writer.""" def __init__(self, logdir=''): self._writer = SummaryWriter(logdir=logdir) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self._writer.close() def write_hparams(self, hparams): """Writes global hparams.""" logging.info('Hyperparameters: %s', hparams) self._writer.add_hparams(hparams, {}) def write_scalars(self, step, scalars): """Writers scalar metrics.""" values = [ f'{k}={v:.6f}' if isinstance(v, float) else f'{k}={v}' for k, v in sorted(scalars.items()) ] logging.info('[%d] %s', step, ', '.join(values)) for k, v in scalars.items(): self._writer.add_scalars(k, {k: v}, step)
{ "content_hash": "0301c871d68da96fc31e7d33e20412f1", "timestamp": "", "source": "github", "line_count": 37, "max_line_length": 62, "avg_line_length": 24.513513513513512, "alnum_prop": 0.619625137816979, "repo_name": "google/brax", "id": "23b0da74e3f440540dbb55acf57ac5314156e017", "size": "1491", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "brax/io/metrics.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "JavaScript", "bytes": "27572" }, { "name": "Jupyter Notebook", "bytes": "8554172" }, { "name": "Python", "bytes": "1189091" } ], "symlink_target": "" }
""" Author: Thomas G. Close ([email protected]) Copyright: 2012-2014 Thomas G. Close. License: This file is part of the "NineLine" package, which is released under the MIT Licence, see LICENSE for details. """ # from pype9.utils.mpi import mpi_comm import os.path import nineml import ninemlcatalog from argparse import ArgumentTypeError import pype9.utils.logging.handlers.sysout # @UnusedImport CATALOG_PREFIX = 'catalog://' def existing_file(fname): if not os.path.isfile(fname): raise ArgumentTypeError( "'{}' does not refer to an existing file".format(fname)) return fname def nineml_document(doc_path): if doc_path.startswith(CATALOG_PREFIX): model = ninemlcatalog.load(doc_path[len(CATALOG_PREFIX):]) else: if (not doc_path.startswith('/') and not doc_path.startswith('./') and not doc_path.startswith('../')): doc_path = './' + doc_path model = nineml.read(doc_path, relative_to=os.getcwd()) return model def nineml_model(model_path): model = nineml_document(model_path) if isinstance(model, nineml.Document): model = model.as_network( os.path.splitext(os.path.basename(model_path))[0]) return model # Might be useful so have kept it here # # class randomseed(int): # # """ # Automatically generates unique random seeds if none are provided, as well # as ensuring that unique seeds are passed to each MPI process # # Parameters # ---------- # arg : int # An existing seed to use # mirror_mpi: bool # Flags whether the seeds should be the same on different # MPI nodes or not # """ # counter = 0 # # def __new__(cls, arg=None, mirror_mpi=False): # if arg is None or arg == 'None' or int(arg) == 0: # seed = int(time.time() * 256) + cls.counter # cls.counter += 1 # else: # seed = int(arg) # # Ensure a different seed gets used on each MPI node # if not mirror_mpi: # seed = seed * mpi_comm.size + mpi_comm.rank # return cls(seed)
{ "content_hash": "38a78b41d975a57795dc9fdc1039eea2", "timestamp": "", "source": "github", "line_count": 70, "max_line_length": 79, "avg_line_length": 30.685714285714287, "alnum_prop": 0.6126629422718808, "repo_name": "tclose/PyPe9", "id": "bb58a4185990257feb95127ec72b887524c732fb", "size": "2148", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "pype9/utils/arguments.py", "mode": "33188", "license": "mit", "language": [ { "name": "C++", "bytes": "1575" }, { "name": "Python", "bytes": "383807" }, { "name": "Shell", "bytes": "4546" } ], "symlink_target": "" }
__author__ = 'JMwill' __contact__ = '[email protected]'
{ "content_hash": "b2363efefc849ad4a80bf8c80067ebb6", "timestamp": "", "source": "github", "line_count": 2, "max_line_length": 35, "avg_line_length": 29, "alnum_prop": 0.6206896551724138, "repo_name": "JMwill/wiki", "id": "bfaa92873c9df12a292c08ae25929016c229bd66", "size": "58", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "notebook/tool/spider/meizi_spider/python_spider/grab-proxy/version-2/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "161247" }, { "name": "HTML", "bytes": "249039" }, { "name": "JavaScript", "bytes": "3940057" }, { "name": "Python", "bytes": "105911" }, { "name": "QML", "bytes": "2985" }, { "name": "Racket", "bytes": "1296" }, { "name": "Ruby", "bytes": "4126" }, { "name": "Scheme", "bytes": "8261" }, { "name": "Shell", "bytes": "4462" }, { "name": "Vim script", "bytes": "12735" }, { "name": "Vue", "bytes": "64287" }, { "name": "mupad", "bytes": "3496" } ], "symlink_target": "" }
"""Configuration parameters for the server side subsystems.""" import grr from grr.lib import config_lib from grr.lib import rdfvalue from grr.lib.rdfvalues import crypto as rdf_crypto VERSION = grr.version() config_lib.DEFINE_integer("Source.version_major", VERSION["major"], "Major version number of client binary.") config_lib.DEFINE_integer("Source.version_minor", VERSION["minor"], "Minor version number of client binary.") config_lib.DEFINE_integer("Source.version_revision", VERSION["revision"], "Revision number of client binary.") config_lib.DEFINE_integer("Source.version_release", VERSION["release"], "Release number of client binary.") config_lib.DEFINE_string("Source.version_string", "%(version_major).%(version_minor)." "%(version_revision).%(version_release)", "Version string of the client.") config_lib.DEFINE_integer("Source.version_numeric", "%(version_major)%(version_minor)" "%(version_revision)%(version_release)", "Version string of the client as an integer.") # Note: Each thread adds about 8mb for stack space. config_lib.DEFINE_integer("Threadpool.size", 50, "Number of threads in the shared thread pool.") config_lib.DEFINE_integer("Worker.flow_lease_time", 7200, "Duration of a flow lease time in seconds.") config_lib.DEFINE_integer("Worker.well_known_flow_lease_time", 600, "Duration of a well known flow lease time in " "seconds.") config_lib.DEFINE_integer("Worker.compaction_lease_time", 3600, "Duration of collections lease time for compaction " "in seconds.") config_lib.DEFINE_bool("Worker.enable_packed_versioned_collection_journaling", False, "If True, all Add*() operations and all " "compactions of PackedVersionedCollections will be " "journaled so that these collections can be later " "checked for integrity.") config_lib.DEFINE_integer("Worker.queue_shards", 5, "Queue notifications will be sharded across " "this number of datastore subjects.") config_lib.DEFINE_integer("Worker.notification_expiry_time", 600, "The queue manager expires stale notifications " "after this many seconds.") config_lib.DEFINE_integer("Worker.notification_retry_interval", 30, "The queue manager retries to work on requests it " "could not complete after this many seconds.") # We write a journal entry for the flow when it's about to be processed. # If the journal entry is there after this time, the flow will get terminated. config_lib.DEFINE_integer( "Worker.stuck_flows_timeout", 60 * 60 * 6, "Flows who got stuck in the worker for more than this time (in seconds) " "are forcibly terminated") config_lib.DEFINE_list("Frontend.well_known_flows", ["TransferStore", "Stats"], "Allow these well known flows to run directly on the " "frontend. Other flows are scheduled as normal.") config_lib.DEFINE_list("Frontend.DEBUG_well_known_flows_blacklist", [], "Drop these well known flows requests without " "processing. Useful as an emergency tool to reduce " "the load on the system.") config_lib.DEFINE_string( "Frontend.static_aff4_prefix", "aff4:/web/static/", "The AFF4 URN prefix for all streams served publicly from the frontend.") config_lib.DEFINE_string( "Frontend.static_url_path_prefix", "/static/", "The URL prefix for all streams served publicly from the frontend.") # Smtp settings. config_lib.DEFINE_string("Worker.smtp_server", "localhost", "The smtp server for sending email alerts.") config_lib.DEFINE_integer("Worker.smtp_port", 25, "The smtp server port.") config_lib.DEFINE_bool("Worker.smtp_starttls", False, "Enable TLS for the smtp connection.") config_lib.DEFINE_string("Worker.smtp_user", None, "Username for the smtp connection.") config_lib.DEFINE_string("Worker.smtp_password", None, "Password for the smtp connection.") # Server Cryptographic settings. config_lib.DEFINE_semantic( rdf_crypto.RSAPrivateKey, "PrivateKeys.ca_key", description="CA private key. Used to sign for client enrollment.",) config_lib.DEFINE_semantic( rdf_crypto.RSAPrivateKey, "PrivateKeys.server_key", description="Private key for the front end server.") config_lib.DEFINE_integer("Server.rsa_key_length", 2048, "The length of the server rsa key in bits.") config_lib.DEFINE_semantic( rdf_crypto.RDFX509Cert, "Frontend.certificate", description="An X509 certificate for the frontend server.") config_lib.DEFINE_bool("Cron.active", False, "Set to true to run a cron thread on this binary.") config_lib.DEFINE_list("Cron.enabled_system_jobs", [], "DEPRECATED: Use Cron.disabled_system_jobs instead. " "If Cron.enabled_system_jobs is set, only the listed " "cron flows will be run as system cron jobs. Cannot " "be used together with Cron.disabled_system_jobs.") config_lib.DEFINE_list("Cron.disabled_system_jobs", [], "Normally, all subclasses of SystemCronFlow are " "considered system jobs and run automatically. System " "jobs listed here will not be run. Many system jobs are " "important. Leave empty unless you are sure that you " "know what you are doing.") config_lib.DEFINE_string("Frontend.bind_address", "::", "The ip address to bind.") config_lib.DEFINE_integer("Frontend.bind_port", 8080, "The port to bind.") config_lib.DEFINE_integer("Frontend.port_max", None, "If set and Frontend.bind_port is in use, attempt to " "use ports between Frontend.bind_port and " "Frontend.port_max.") config_lib.DEFINE_integer("Frontend.max_queue_size", 500, "Maximum number of messages to queue for the client.") config_lib.DEFINE_integer("Frontend.max_retransmission_time", 10, "Maximum number of times we are allowed to " "retransmit a request until it fails.") config_lib.DEFINE_integer("Frontend.message_expiry_time", 600, "Maximum time messages remain valid within the " "system.") config_lib.DEFINE_string("Frontend.upload_store", "FileUploadFileStore", "The implementation of the upload file store.") config_lib.DEFINE_string("FileUploadFileStore.root_dir", "/tmp/", "Where to store files uploaded.") config_lib.DEFINE_bool("Server.initialized", False, "True once config_updater initialize has been " "run at least once.") config_lib.DEFINE_string("Server.master_watcher_class", "DefaultMasterWatcher", "The master watcher class to use.") config_lib.DEFINE_string("Server.ip_resolver_class", "IPResolver", "The ip resolver class to use.") config_lib.DEFINE_string("Server.email_alerter_class", "SMTPEmailAlerter", "The email alerter class to use.") config_lib.DEFINE_string( "Rekall.profile_repository", "https://github.com/google/rekall-profiles/raw/master", "The repository to use when downloading Rekall profiles.") config_lib.DEFINE_string( "Rekall.profile_cache_urn", "aff4:/rekall_profiles", "A cache in the aff4 space to store downloaded Rekall profiles.") config_lib.DEFINE_string("Rekall.profile_server", "GRRRekallProfileServer", "Which Rekall profile server to use.") config_lib.DEFINE_string( "Server.username", None, "System account for services to run as after initialization. Note that " "GRR must be running as root first before being able to switch to another " "username. You would normally only need this if you want to bind to a low " "port for some reason.") # Email Template Values config_lib.DEFINE_string("Email.signature", "The GRR Team", "The default signature block for template emails") config_lib.DEFINE_string( "Email.approval_cc_address", None, "A single email address or comma separated list of addresses to CC on all " "approval emails. Will be added" " to all emails and can't be changed or removed by the user.") config_lib.DEFINE_boolean( "Email.send_approval_emails", True, "Approval emails are sent for approvals in addition to notifications " "in the web UI.") config_lib.DEFINE_string( "Email.approval_optional_cc_address", None, "A single email address or comma separated list of addresses to CC on all " "approval emails. The user has the option to" " remove this CC address .") config_lib.DEFINE_string( "Email.approval_signature", None, "If you feel like it, you can add a funny cat picture to approval mails. " "Needs full html: <img src=\"https://imgur.com/path/to/cat.jpg\">.") config_lib.DEFINE_list( "Email.link_regex_list", [], "Strings matching these regexes in approval reasons will be turned into " " HTML links in approval emails. Note you have to use single quoted strings" " when setting this variable to prevent escaping.") config_lib.DEFINE_string( "StatsStore.process_id", default="", help="Id used to identify stats data of the current " "process. This should be different for different GRR " "processes. I.e. if you have 4 workers, for every " "worker the subject should be different. For example: " "worker_1, worker_2, worker_3, worker_4.") config_lib.DEFINE_integer( "StatsStore.write_interval", default=60, help="Time in seconds between the dumps of stats " "data into the stats store.") config_lib.DEFINE_integer( "StatsStore.ttl", default=60 * 60 * 24 * 3, help="Maximum lifetime (in seconds) of data in the " "stats store. Default is three days.") config_lib.DEFINE_bool( "AdminUI.allow_hunt_results_delete", default=False, help="If True, hunts with results can be deleted " "when the delete hunt button is used. Enable with " "caution as this allows erasure of historical usage for" "accountability purposes.") config_lib.DEFINE_integer( "Server.max_unbound_read_size", 10000000, help="The number of bytes allowed for unbounded " "reads from a file object") # Data retention policies. config_lib.DEFINE_semantic( rdfvalue.Duration, "DataRetention.cron_jobs_flows_ttl", default=None, description="Cron job flows TTL specified as the duration string. " "Examples: 90d, 180d, 1y. If not set, cron jobs flows will be retained " "forever.") config_lib.DEFINE_semantic( rdfvalue.Duration, "DataRetention.hunts_ttl", default=None, description="Hunts TTL specified as the duration string. Examples: 90d, " "180d, 1y. If not set, hunts will be retained forever.") config_lib.DEFINE_string( "DataRetention.hunts_ttl_exception_label", default="retain", help="Hunts marked with this label " "will be retained forever.") config_lib.DEFINE_semantic( rdfvalue.Duration, "DataRetention.tmp_ttl", default=None, description="Temp TTL specified as the duration string. Examples: 90d, " "180d, 1y. If not set, temp objects will be retained forever.") config_lib.DEFINE_string( "DataRetention.tmp_ttl_exception_label", default="retain", help="Temp objects marked with this " "label will be retained forever.") config_lib.DEFINE_semantic( rdfvalue.Duration, "DataRetention.inactive_client_ttl", default=None, description="Temp TTL specified as the duration string. Examples: 90d, " "180d, 1y. If not set, inactive clients will be retained forever.") config_lib.DEFINE_string( "DataRetention.inactive_client_ttl_exception_label", default="retain", help="Inactive clients marked with " "this label will be retained forever.")
{ "content_hash": "9103ce57fd463c11e9d924cefdc01d9d", "timestamp": "", "source": "github", "line_count": 311, "max_line_length": 80, "avg_line_length": 41.02893890675241, "alnum_prop": 0.6409090909090909, "repo_name": "pidydx/grr", "id": "be509d673f88d5e085c9be7bc16f2b1f1d1ef9af", "size": "12782", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "grr/config/server.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "227" }, { "name": "Batchfile", "bytes": "3409" }, { "name": "C", "bytes": "10658" }, { "name": "C++", "bytes": "304935" }, { "name": "CMake", "bytes": "3228" }, { "name": "CSS", "bytes": "26531" }, { "name": "HTML", "bytes": "175613" }, { "name": "JavaScript", "bytes": "25418" }, { "name": "Makefile", "bytes": "1711" }, { "name": "Protocol Buffer", "bytes": "308592" }, { "name": "Python", "bytes": "6428769" }, { "name": "Roff", "bytes": "444" }, { "name": "Ruby", "bytes": "5604" }, { "name": "Shell", "bytes": "40128" }, { "name": "Standard ML", "bytes": "8172" } ], "symlink_target": "" }
"""Test i18n module.""" # # (C) Pywikibot team, 2007-2014 # # Distributed under the terms of the MIT license. # from __future__ import absolute_import, unicode_literals __version__ = '$Id$' import sys import pywikibot from pywikibot import i18n, bot, plural from tests.aspects import unittest, TestCase, DefaultSiteTestCase, PwbTestCase if sys.version_info[0] == 3: basestring = (str, ) class TestTranslate(TestCase): """Test translate method.""" net = False def setUp(self): self.msg_localized = {'en': u'test-localized EN', 'nl': u'test-localized NL', 'fy': u'test-localized FY'} self.msg_semi_localized = {'en': u'test-semi-localized EN', 'nl': u'test-semi-localized NL'} self.msg_non_localized = {'en': u'test-non-localized EN'} self.msg_no_english = {'ja': u'test-no-english JA'} super(TestTranslate, self).setUp() def testLocalized(self): """Test fully localized translations.""" self.assertEqual(i18n.translate('en', self.msg_localized, fallback=True), u'test-localized EN') self.assertEqual(i18n.translate('nl', self.msg_localized, fallback=True), u'test-localized NL') self.assertEqual(i18n.translate('fy', self.msg_localized, fallback=True), u'test-localized FY') def testSemiLocalized(self): """Test translate by fallback to an alternative language.""" self.assertEqual(i18n.translate('en', self.msg_semi_localized, fallback=True), u'test-semi-localized EN') self.assertEqual(i18n.translate('nl', self.msg_semi_localized, fallback=True), u'test-semi-localized NL') self.assertEqual(i18n.translate('fy', self.msg_semi_localized, fallback=True), u'test-semi-localized NL') def testNonLocalized(self): """Test translate with missing localisation.""" self.assertEqual(i18n.translate('en', self.msg_non_localized, fallback=True), u'test-non-localized EN') self.assertEqual(i18n.translate('fy', self.msg_non_localized, fallback=True), u'test-non-localized EN') self.assertEqual(i18n.translate('nl', self.msg_non_localized, fallback=True), u'test-non-localized EN') self.assertEqual(i18n.translate('ru', self.msg_non_localized, fallback=True), u'test-non-localized EN') def testNoEnglish(self): """Test translate with missing English text.""" self.assertEqual(i18n.translate('en', self.msg_no_english, fallback=True), u'test-no-english JA') self.assertEqual(i18n.translate('fy', self.msg_no_english, fallback=True), u'test-no-english JA') self.assertEqual(i18n.translate('nl', self.msg_no_english, fallback=True), u'test-no-english JA') class UserInterfaceLangTestCase(TestCase): """Base class for tests using config.userinterface_lang.""" def setUp(self): """Change the userinterface language to the site's code.""" super(UserInterfaceLangTestCase, self).setUp() self.orig_userinterface_lang = pywikibot.config.userinterface_lang pywikibot.config.userinterface_lang = self.get_site().code def tearDown(self): """Reset the userinterface language.""" pywikibot.config.userinterface_lang = self.orig_userinterface_lang super(UserInterfaceLangTestCase, self).tearDown() class TWNSetMessagePackageBase(TestCase): """Partial base class for TranslateWiki tests.""" message_package = None def setUp(self): """Load the test translations.""" self.orig_messages_package_name = i18n._messages_package_name i18n.set_messages_package(self.message_package) super(TWNSetMessagePackageBase, self).setUp() def tearDown(self): """Load the original translations back.""" super(TWNSetMessagePackageBase, self).tearDown() i18n.set_messages_package(self.orig_messages_package_name) class TWNTestCaseBase(TWNSetMessagePackageBase): """Base class for TranslateWiki tests.""" @classmethod def setUpClass(cls): """Verify that the test translations are not empty.""" if not isinstance(cls.message_package, basestring): raise TypeError('%s.message_package must be a package name' % cls.__name__) # The call to set_messages_package below exists only to confirm # that the package exists and messages are available, so # that tests can be skipped if the i18n data doesnt exist. cls.orig_messages_package_name = i18n._messages_package_name i18n.set_messages_package(cls.message_package) has_messages = i18n.messages_available() i18n._messages_package_name = cls.orig_messages_package_name if not has_messages: raise unittest.SkipTest("i18n messages package '%s' not available." % cls.message_package) super(TWNTestCaseBase, cls).setUpClass() class TestTWTranslate(TWNTestCaseBase): """Test twtranslate method.""" net = False message_package = 'tests.i18n' def testLocalized(self): """Test fully localized entry.""" self.assertEqual(i18n.twtranslate('en', 'test-localized'), u'test-localized EN') self.assertEqual(i18n.twtranslate('nl', 'test-localized'), u'test-localized NL') self.assertEqual(i18n.twtranslate('fy', 'test-localized'), u'test-localized FY') def testSemiLocalized(self): """Test translating with fallback to alternative language.""" self.assertEqual(i18n.twtranslate('en', 'test-semi-localized'), u'test-semi-localized EN') self.assertEqual(i18n.twtranslate('nl', 'test-semi-localized'), u'test-semi-localized NL') self.assertEqual(i18n.twtranslate('fy', 'test-semi-localized'), u'test-semi-localized NL') def testNonLocalized(self): """Test translating non localized entries.""" self.assertEqual(i18n.twtranslate('en', 'test-non-localized'), u'test-non-localized EN') self.assertEqual(i18n.twtranslate('fy', 'test-non-localized'), u'test-non-localized EN') self.assertEqual(i18n.twtranslate('nl', 'test-non-localized'), u'test-non-localized EN') self.assertEqual(i18n.twtranslate('ru', 'test-non-localized'), u'test-non-localized EN') def testNoEnglish(self): """Test translating into English with missing entry.""" self.assertRaises(i18n.TranslationError, i18n.twtranslate, 'en', 'test-no-english') class TestTWNTranslate(TWNTestCaseBase): """Test {{PLURAL:}} support.""" net = False message_package = 'tests.i18n' def testNumber(self): """Use a number.""" self.assertEqual( i18n.twntranslate('de', 'test-plural', 0) % {'num': 0}, u'Bot: Ändere 0 Seiten.') self.assertEqual( i18n.twntranslate('de', 'test-plural', 1) % {'num': 1}, u'Bot: Ändere 1 Seite.') self.assertEqual( i18n.twntranslate('de', 'test-plural', 2) % {'num': 2}, u'Bot: Ändere 2 Seiten.') self.assertEqual( i18n.twntranslate('de', 'test-plural', 3) % {'num': 3}, u'Bot: Ändere 3 Seiten.') self.assertEqual( i18n.twntranslate('en', 'test-plural', 0) % {'num': 'no'}, u'Bot: Changing no pages.') self.assertEqual( i18n.twntranslate('en', 'test-plural', 1) % {'num': 'one'}, u'Bot: Changing one page.') self.assertEqual( i18n.twntranslate('en', 'test-plural', 2) % {'num': 'two'}, u'Bot: Changing two pages.') self.assertEqual( i18n.twntranslate('en', 'test-plural', 3) % {'num': 'three'}, u'Bot: Changing three pages.') def testString(self): """Use a string.""" self.assertEqual( i18n.twntranslate('en', 'test-plural', '1') % {'num': 'one'}, u'Bot: Changing one page.') def testDict(self): """Use a dictionary.""" self.assertEqual( i18n.twntranslate('en', 'test-plural', {'num': 2}), u'Bot: Changing 2 pages.') def testExtended(self): """Use additional format strings.""" self.assertEqual( i18n.twntranslate('fr', 'test-plural', {'num': 1, 'descr': 'seulement'}), u'Robot: Changer seulement une page.') def testExtendedOutside(self): """Use additional format strings also outside.""" self.assertEqual( i18n.twntranslate('fr', 'test-plural', 1) % {'descr': 'seulement'}, u'Robot: Changer seulement une page.') def testMultiple(self): """Test using multiple plural entries.""" self.assertEqual( i18n.twntranslate('de', 'test-multiple-plurals', 1) % {'action': u'Ändere', 'line': u'eine'}, u'Bot: Ändere eine Zeile von einer Seite.') self.assertEqual( i18n.twntranslate('de', 'test-multiple-plurals', 2) % {'action': u'Ändere', 'line': u'zwei'}, u'Bot: Ändere zwei Zeilen von mehreren Seiten.') self.assertEqual( i18n.twntranslate('de', 'test-multiple-plurals', 3) % {'action': u'Ändere', 'line': u'drei'}, u'Bot: Ändere drei Zeilen von mehreren Seiten.') self.assertEqual( i18n.twntranslate('de', 'test-multiple-plurals', (1, 2, 2)) % {'action': u'Ändere', 'line': u'eine'}, u'Bot: Ändere eine Zeile von mehreren Seiten.') self.assertEqual( i18n.twntranslate('de', 'test-multiple-plurals', [3, 1, 1]) % {'action': u'Ändere', 'line': u'drei'}, u'Bot: Ändere drei Zeilen von einer Seite.') self.assertEqual( i18n.twntranslate('de', 'test-multiple-plurals', ["3", 1, 1]) % {'action': u'Ändere', 'line': u'drei'}, u'Bot: Ändere drei Zeilen von einer Seite.') self.assertEqual( i18n.twntranslate('de', 'test-multiple-plurals', "321") % {'action': u'Ändere', 'line': u'dreihunderteinundzwanzig'}, u'Bot: Ändere dreihunderteinundzwanzig Zeilen von mehreren Seiten.') self.assertEqual( i18n.twntranslate('de', 'test-multiple-plurals', {'action': u'Ändere', 'line': 1, 'page': 1}), u'Bot: Ändere 1 Zeile von einer Seite.') self.assertEqual( i18n.twntranslate('de', 'test-multiple-plurals', {'action': u'Ändere', 'line': 1, 'page': 2}), u'Bot: Ändere 1 Zeile von mehreren Seiten.') self.assertEqual( i18n.twntranslate('de', 'test-multiple-plurals', {'action': u'Ändere', 'line': "11", 'page': 2}), u'Bot: Ändere 11 Zeilen von mehreren Seiten.') def testMultipleWrongParameterLength(self): """Test wrong parameter length.""" err_msg = 'Length of parameter does not match PLURAL occurrences' with self.assertRaisesRegex(ValueError, err_msg): i18n.twntranslate('de', 'test-multiple-plurals', (1, 2)) with self.assertRaisesRegex(ValueError, err_msg): i18n.twntranslate('de', 'test-multiple-plurals', ["321"]) def testMultipleNonNumbers(self): """Test error handling for multiple non-numbers.""" with self.assertRaisesRegex(ValueError, "invalid literal for int\(\) with base 10: 'drei'"): i18n.twntranslate('de', 'test-multiple-plurals', ["drei", "1", 1]) with self.assertRaisesRegex(ValueError, "invalid literal for int\(\) with base 10: 'elf'"): i18n.twntranslate('de', 'test-multiple-plurals', {'action': u'Ändere', 'line': "elf", 'page': 2}) def testAllParametersExist(self): """Test that all parameters are required when using a dict.""" with self.assertRaisesRegex(KeyError, repr(u'line')): # all parameters must be inside twntranslate i18n.twntranslate('de', 'test-multiple-plurals', {'line': 1, 'page': 1}) % {'action': u'Ändere'} def test_fallback_lang(self): """ Test that twntranslate uses the translation's language. twntranslate calls _twtranslate which might return the translation for a different language and then the plural rules from that language need to be applied. """ # co has fr as altlang but has no plural rules defined (otherwise this # test might not catch problems) so it's using the plural variant for 0 # although French uses the plural variant for numbers > 1 (so not 0) assert 'co' not in plural.plural_rules assert plural.plural_rules['fr']['plural'](0) is False self.assertEqual( i18n.twntranslate('co', 'test-plural', {'num': 0, 'descr': 'seulement'}), u'Robot: Changer seulement une page.') self.assertEqual( i18n.twntranslate('co', 'test-plural', {'num': 1, 'descr': 'seulement'}), u'Robot: Changer seulement une page.') class ScriptMessagesTestCase(TWNTestCaseBase): """Real messages test.""" net = False message_package = 'scripts.i18n' def test_basic(self): """Verify that real messages are able to be loaded.""" self.assertEqual(i18n.twntranslate('en', 'pywikibot-enter-new-text'), 'Please enter the new text:') def test_missing(self): """Test a missing message from a real message bundle.""" self.assertRaises(i18n.TranslationError, i18n.twntranslate, 'en', 'pywikibot-missing-key') class InputTestCase(TWNTestCaseBase, UserInterfaceLangTestCase, PwbTestCase): """Test i18n.input.""" family = 'wikipedia' code = 'arz' message_package = 'scripts.i18n' @classmethod def setUpClass(cls): """Verify that a translation does not yet exist.""" super(InputTestCase, cls).setUpClass() if cls.code in i18n.twget_keys('pywikibot-enter-category-name'): raise unittest.SkipTest( '%s has a translation for %s' % (cls.code, 'pywikibot-enter-category-name')) def test_pagegen_i18n_input(self): """Test i18n.input via .""" result = self._execute(args=['listpages', '-cat'], data_in='non-existant-category\n', timeout=5) self.assertIn('Please enter the category name:', result['stderr']) class MissingPackageTestCase(TWNSetMessagePackageBase, UserInterfaceLangTestCase, DefaultSiteTestCase): """Test misssing messages package.""" message_package = 'scripts.foobar.i18n' def _capture_output(self, text, *args, **kwargs): self.output_text = text def setUp(self): """Patch the output and input methods.""" super(MissingPackageTestCase, self).setUp() self.output_text = '' self.orig_raw_input = bot.ui._raw_input self.orig_output = bot.ui.output bot.ui._raw_input = lambda *args, **kwargs: 'dummy input' bot.ui.output = self._capture_output def tearDown(self): """Restore the output and input methods.""" bot.ui._raw_input = self.orig_raw_input bot.ui.output = self.orig_output super(MissingPackageTestCase, self).tearDown() def test_pagegen_i18n_input(self): """Test i18n.input falls back with missing message package.""" rv = i18n.input('pywikibot-enter-category-name', fallback_prompt='dummy output') self.assertEqual(rv, 'dummy input') self.assertIn('dummy output: ', self.output_text) if __name__ == '__main__': try: unittest.main() except SystemExit: pass
{ "content_hash": "67d4b7d0a5805044a5d1f066cc59960b", "timestamp": "", "source": "github", "line_count": 420, "max_line_length": 100, "avg_line_length": 40.65952380952381, "alnum_prop": 0.5705334660654682, "repo_name": "icyflame/batman", "id": "9ff591355bf217b0d3607b4ff167cbfdcb1c99cf", "size": "17128", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/i18n_tests.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "97" }, { "name": "Python", "bytes": "3922041" } ], "symlink_target": "" }
""" Monitors an obiwan production run using qdo """ import os import numpy as np from glob import glob import re from collections import defaultdict from obiwan.common import dobash,writelist,get_rsdir import qdo QDO_RESULT= ['running', 'succeeded', 'failed'] def get_interm_dir(outdir,brick,rowstart, do_skipids='no',do_more='no'): """Returns paths like outdir/bri/brick/rs0""" rsdir= get_rsdir(rowstart, do_skipids,do_more) return os.path.join(outdir,brick[:3],brick,rsdir) def get_final_dir(outdir,brick,rowstart, do_skipids='no',do_more='no'): """Returns paths like outdir/replaceme/bri/brick/rs0""" rsdir= get_rsdir(rowstart, do_skipids,do_more) return os.path.join(outdir,'replaceme',brick[:3],brick, rsdir) def get_deldirs(outdir,brick,rowstart, do_skipids='no',do_more='no'): """If slurm timeout or failed, logfile will exist in final dir but other outputs will be in interm dir. Return list of dirst to all of these """ dirs= [get_final_dir(outdir,brick,rowstart, do_skipids,do_more).replace('replaceme','logs')] dirs+= [get_interm_dir(outdir,brick,rowstart, do_skipids,do_more)] return dirs def get_checkpoint_fn(outdir,brick,rowstart): return os.path.join(outdir,'checkpoint', brick[:3],brick, 'checkpoint_rs%d.pickle' % int(rowstart)) def get_logdir(outdir,brick,rowstart, do_skipids='no',do_more='no'): return (get_final_dir(outdir,brick,rowstart, do_skipids,do_more) .replace('replaceme','logs')) def get_logfile(outdir,brick,rowstart, do_skipids='no',do_more='no'): logdir= get_logdir(outdir,brick,rowstart, do_skipids,do_more) return os.path.join(logdir,'log.'+brick) def get_slurm_files(outdir): return glob( outdir + '/slurm-*.out') class QdoList(object): """Queries the qdo db and maps log files to tasks and task status Args: outdir: obiwan outdir, the slurm*.out files are there que_name: ie. qdo create que_name skip_suceeded: number succeeded tasks can be very large for production runs, this slows down code so skip those tasks """ def __init__(self,outdir,que_name='obiwan', skip_succeed=False, rand_num=None, firstN=None): print('que_name= ',que_name.upper()) self.outdir= outdir self.que_name= que_name self.skip_succeed= skip_succeed self.rand_num= rand_num self.firstN= firstN def isCosmos(self): return "cosmos" in self.que_name def get_tasks_logs(self): """get tasks and logs for the three types of qdo status""" # Logs for all Failed tasks tasks={} ids={} logs= defaultdict(list) #err= defaultdict(lambda: []) q = qdo.connect(self.que_name) for res in QDO_RESULT: if self.skip_succeed and res == 'succeeded': continue # List of "brick rs" for each QDO_RESULT qdo_tasks= np.array(q.tasks(state= getattr(qdo.Task, res.upper()))) if self.rand_num: qdo_tasks= qdo_tasks[np.random.randint(0,len(qdo_tasks),size=self.rand_num)] elif not self.firstN is None: qdo_tasks= qdo_tasks[:self.firstN] if len(qdo_tasks) > 0: ids[res],tasks[res] = zip(*[(a.id,a.task) for a in qdo_tasks]) else: ids[res],tasks[res]= [],[] # Corresponding log, slurm files for task in tasks[res]: # Logs if self.isCosmos(): brick,rs,do_skipids,do_more,subset = task.split(' ') outdir= os.path.join(self.outdir,'subset%s' % subset) else: brick,rs,do_skipids,do_more = task.split(' ') outdir= self.outdir logfn= get_logfile(outdir,brick,rs, do_skipids=do_skipids,do_more=do_more) logs[res].append( logfn ) return tasks,ids,logs def change_task_state(self,task_ids,to=None, modify=False,rm_files=False): """change qdo tasks state, for tasks with task_ids, to pending,failed, etc Args: to: change qdo state to this, pending,failed rm_files: delete the output files for that task modify: actually do the modifications (fail safe option) """ assert(to in ['pending','failed']) q = qdo.connect(self.que_name) for task_id in task_ids: try: task_obj= q.tasks(id= int(task_id)) if self.isCosmos(): brick,rs,do_skipids,do_more,subset = task_obj.task.split(' ') outdir= os.path.join(self.outdir,'subset%s' % subset) else: brick,rs,do_skipids,do_more = task_obj.task.split(' ') outdir= self.outdir del_dirs= get_deldirs(outdir,brick,rs, do_skipids=do_skipids, do_more=do_more) del_fns= [get_checkpoint_fn(outdir,brick,rs)] if modify: if to == 'pending': # Stuck in pending b/c slurm job timed out task_obj.set_state(qdo.Task.PENDING) #print('id %s --> PENDING' % task_id) elif to == 'failed': # Manually force to failed, keep whatever outputs have task_obj.set_state(qdo.Task.FAILED) print('id %s --> FAILED' % task_id) if rm_files: for dr in del_dirs: dobash('rm -r %s/*' % dr) for fn in del_fns: dobash('rm %s' % fn) else: print('set --modify to affect id=%d, which corresponds to taks_obj=' % task_id,task_obj) print('set --rm_files to remove',del_dirs,del_fns) except ValueError: print('cant find task_id=%d' % task_id) class RunStatus(object): """Tallys which QDO_RESULTS actually finished, what errors occured, etc. Args: tasks: dict, each key is list of qdo tasks logs: dict, each key is list of log files for each task Defaults: regex_errs: list of regular expressions matching possible log file errors """ def __init__(self,tasks,logs): self.tasks= tasks self.logs= logs self.regex_errs= [ r'ValueError:\ starting\ row=[0-9]*?\ exceeds.*?sources', r'\No\ randoms\ in\ brick', r'pool\.py",\sline\s644,\sin\sget\n\s+raise\sself\._value\nAssertionError', r'assert\(len\(R\)\s==\slen\(blobsrcs\)\)\nAssertionError', r"ModuleNotFoundError:\sNo\smodule\snamed\s'fitsio'", r'psycopg2\.OperationalError:', r'MemoryError', r'astropy\.extern\.configobj\.configobj\.ParseError', r'RuntimeError:\ Command\ failed:\ sex\ -c', r'multiprocessing\/pool\.py\",\sline\s567', r"ImportError:\scannot\simport\sname\s'_fitsio_wrap'", r"OSError:\sFile\s not\sfound:", r"NothingToDoError:\sNo\sCCDs\stouching\sbrick", r'SystemError:\ \<built-in\ method\ flush'] self.regex_errs_extra= ['Other','log does not exist'] def get_tally(self): tally= defaultdict(list) for res in ['succeeded','failed','running']: print('res=%s' % res) if res == 'succeeded': for log in self.logs[res]: with open(log,'r') as foo: text= foo.read() if "decals_sim:All done!" in text: tally[res].append( 1 ) else: tally[res].append( 0 ) elif res == 'running': for log in self.logs[res]: tally[res].append(1) elif res == 'failed': for cnt,log in enumerate(self.logs[res]): if (cnt+1) % 25 == 0: print('%d/%d' % (cnt+1,len(self.logs[res]))) if not os.path.exists(log): tally[res].append('log does not exist') continue with open(log,'r') as foo: text= foo.read() found_err= False for regex in self.regex_errs: foundIt= re.search(regex, text) if foundIt: tally[res].append(regex) found_err=True break if not found_err: tally[res].append('Other') # numpy array, not list, works with np.where() for res in tally.keys(): tally[res]= np.array(tally[res]) return tally def print_tally(self,tally): for res in self.tasks.keys(): print('--- Tally %s ---' % res) if res == 'succeeded': print('%d/%d = done' % (len(tally[res]), np.sum(tally[res]))) elif res == 'failed': for regex in self.regex_errs + self.regex_errs_extra: print('%d/%d = %s' % ( np.where(tally[res] == regex)[0].size, len(tally[res]), regex)) elif res == 'running': print('%d/%d : need rerun' % (len(tally[res]),len(tally[res]))) def get_logs_for_failed(self,regex='Other'): """Returns log and slurm filenames for failed tasks labeled as regex""" return self.logs[ tally['failed'] == regex ] if __name__ == '__main__': from argparse import ArgumentParser parser = ArgumentParser() parser.add_argument('--qdo_quename',default='obiwan_9deg',help='',required=False) parser.add_argument('--skip_succeed',action='store_true',default=False,help='speed up, number succeeded tasks can be very large for production runs and slows down status code',required=False) parser.add_argument('--rand_num',type=int,default=None,help='only process this many succeed,failed,running chosen at random from each',required=False) parser.add_argument('--firstN',type=int,default=None,help='speed up, instead of random 1000 do the first N (user specified) qdo tasks',required=False) parser.add_argument('--running_to_pending',action="store_true",default=False,help='set to reset all "running" jobs to "pending"') parser.add_argument('--running_to_failed',action="store_true",default=False,help='set to reset all "running" jobs to "failed"') parser.add_argument('--failed_message_to_pending',action='store',default=None,help='set to message of failed tak and reset all failed tasks with that message to pending') parser.add_argument('--failed_to_pending',action="store_true",default=False,help='set to reset all "failed" jobs to "pending"') parser.add_argument('--modify',action='store_true',default=False,help='set to actually reset the qdo tasks state AND to delete IFF running_to_pending or failed_message_to_pending are set') parser.add_argument('--outdir',default='.',help='',required=False) parser.add_argument('--no_write',action="store_true",default=False,help='modify the qdo DB but dont write out any text files',required=False) args = parser.parse_args() print(args) Q= QdoList(args.outdir,que_name=args.qdo_quename, skip_succeed=args.skip_succeed, rand_num=args.rand_num, firstN=args.firstN) print('Getting tasks,logs') tasks,ids,logs= Q.get_tasks_logs() # Logfile lists grouped by succeeded,running,failed if not args.no_write: for res in logs.keys(): writelist(logs[res],"%s_%s_logfns.txt" % (args.qdo_quename,res)) # Rerun tasks and delete those tasks' outputs if len(ids['running']) > 0: if args.running_to_pending: Q.change_task_state(ids['running'], to='pending',modify=args.modify, rm_files=False) elif args.running_to_failed: Q.change_task_state(ids['running'], to='failed',modify=args.modify, rm_files=False) if len(ids['failed']) > 0: if args.failed_to_pending: Q.change_task_state(ids['failed'], to='pending',modify=args.modify, rm_files=False) # Failed logfile lists, group by error message R= RunStatus(tasks,logs) print('Counting number of failed,suceed,running tasks for ech failure mode') tally= R.get_tally() R.print_tally(tally) # Subset of failed to pending if args.failed_message_to_pending: hasMessage= np.where(tally['failed'] == args.failed_message_to_pending)[0] if hasMessage.size > 0: theIds= np.array(ids['failed'])[hasMessage] Q.change_task_state(theIds, to='pending', modify=args.modify, rm_files=False) # logs,tasks for each type of failure if not args.no_write: print('Writing logs,tasks for each failure mode for failed tasks') for err_key in R.regex_errs + R.regex_errs_extra: err_logs= np.array(logs['failed'])[ tally['failed'] == err_key ] err_tasks= np.array(tasks['failed'])[ tally['failed'] == err_key ] err_string= (err_key[:12] + err_key[-8:]) err_string= ((err_key[:10] + err_key[-10:]) .replace(" ","_") .replace("/","") .replace("*","") .replace("?","") .replace(":","")) writelist(err_logs,"logs_%s_%s.txt" % (args.qdo_quename,err_string)) writelist(err_tasks,"tasks_%s_%s.txt" % (args.qdo_quename,err_string)) print('done')
{ "content_hash": "fb3e37ca52749254c9456261ac034bae", "timestamp": "", "source": "github", "line_count": 332, "max_line_length": 195, "avg_line_length": 43.29216867469879, "alnum_prop": 0.546441243999165, "repo_name": "legacysurvey/obiwan", "id": "586e2b8b673a506cd5647a761f0e5c6a8e40943a", "size": "14373", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "py/obiwan/runmanager/status.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "359896" }, { "name": "Shell", "bytes": "48136" } ], "symlink_target": "" }
import argparse import logging import psycopg2 import fplusdbinsmatch import fumlocxmlmatches import fplusdbschedulechkgroup class Script(fplusdbschedulechkgroup.Script): SQL_LAST_GROUP = 'SELECT max(group_id) from main.tournament;' def __init__(self, dsn, **kwargs): super().__init__(dsn, None, **kwargs) def start(self): self.logger.debug('GETTING LAST GROUP...') with self: conn = self.conn with conn: with conn.cursor() as cur: cur.execute(self.SQL_LAST_GROUP) gid = cur.fetchone()[0] if gid is None: gid = 11000 # TODO self.logger.info('LAST GROUP: {}'.format(gid)) self.ids = iter(range(1, gid + 500)) super().start() def get_main_params(): parser = argparse.ArgumentParser(parents=( fumlocxmlmatches.log_parser, )) parser.add_argument('dsn', help=('PostgreSQL connection string of the ' 'FUMBBLPlus database')) args = parser.parse_args() return dict(args._get_kwargs()) def main(): params = get_main_params() hdlr = logging.StreamHandler(params['logto']) hdlr.setFormatter(fumlocxmlmatches.LOG_FORMATTER) Script.logger.setLevel(params['loglevel']) Script.logger.addHandler(hdlr) script = Script(**params) script.start() if __name__ == '__main__': main()
{ "content_hash": "2d052d04e96d3e521640a7d19f252c0e", "timestamp": "", "source": "github", "line_count": 57, "max_line_length": 63, "avg_line_length": 22.964912280701753, "alnum_prop": 0.6562261268143621, "repo_name": "FUMBBLPlus/fplusdb_main", "id": "05a6aae806bb85c87f4d45281d292200be8f9f69", "size": "1356", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "scripts/fplusdbchkallgroups.py", "mode": "33261", "license": "mit", "language": [ { "name": "HTML", "bytes": "43600" }, { "name": "PLpgSQL", "bytes": "2576" }, { "name": "Python", "bytes": "101566" } ], "symlink_target": "" }
import pytest from integration_tests import AgentlessTestCase from integration_tests.tests.utils import get_resource as resource pytestmark = pytest.mark.group_dsl @pytest.mark.usefixtures('mock_workflows_plugin') @pytest.mark.usefixtures('testmockoperations_plugin') class OperationMappingTest(AgentlessTestCase): def test_operation_mapping(self): dsl_path = resource("dsl/operation_mapping.yaml") deployment, _ = self.deploy_and_execute_workflow(dsl_path, 'workflow1') invocations = self.get_runtime_property(deployment.id, 'mock_operation_invocation')[0] self.assertEqual(3, len(invocations)) for invocation in invocations: self.assertEqual(1, len(invocation)) self.assertEqual(invocation['test_key'], 'test_value') def test_operation_mapping_override(self): dsl_path = resource("dsl/operation_mapping.yaml") deployment, _ = self.deploy_and_execute_workflow(dsl_path, 'workflow2') invocations = self.get_runtime_property(deployment.id, 'mock_operation_invocation')[0] self.assertEqual(3, len(invocations)) for invocation in invocations: self.assertEqual(1, len(invocation)) self.assertEqual(invocation['test_key'], 'overridden_test_value') def test_operation_mapping_undeclared_override(self): dsl_path = resource("dsl/operation_mapping.yaml") self.deploy_and_execute_workflow(dsl_path, 'workflow3')
{ "content_hash": "314ba35c0b30881cb8eb7f0c363e245c", "timestamp": "", "source": "github", "line_count": 35, "max_line_length": 79, "avg_line_length": 44.628571428571426, "alnum_prop": 0.6658130601792573, "repo_name": "cloudify-cosmo/cloudify-manager", "id": "940c415a88764813be6ba5b492314a77af042fa0", "size": "2206", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/integration_tests/tests/agentless_tests/test_operation_mapping.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Clojure", "bytes": "4067" }, { "name": "Dockerfile", "bytes": "3843" }, { "name": "HTML", "bytes": "320" }, { "name": "Mako", "bytes": "494" }, { "name": "PLpgSQL", "bytes": "119062" }, { "name": "Python", "bytes": "3825971" }, { "name": "Shell", "bytes": "49121" } ], "symlink_target": "" }
def read_config(): import os import ConfigParser config_file = os.path.join(os.environ["HOME"], ".genomicoderc") assert os.path.exists(config_file), "File not found: %s" % config_file # Read the configuration. config = ConfigParser.ConfigParser() config.optionxform = str # use case sensitive option names config.read(config_file) # Set a dictionary of name=value from the configuration file, # ignoring section headings. var_dict = {} for section in config.sections(): for (name, value) in config.items(section): var_dict[name] = value return var_dict var_dict = read_config() for name, value in var_dict.iteritems(): vars()[name] = value del var_dict
{ "content_hash": "160173e68782025ef7b195ad68cf6c09", "timestamp": "", "source": "github", "line_count": 25, "max_line_length": 74, "avg_line_length": 29.6, "alnum_prop": 0.6594594594594595, "repo_name": "jefftc/changlab", "id": "56acd5ebdad28a40641751110b6f46a9c33b7442", "size": "962", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "genomicode/config.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "116953" }, { "name": "CSS", "bytes": "75418" }, { "name": "Groff", "bytes": "10237" }, { "name": "HTML", "bytes": "200459" }, { "name": "JavaScript", "bytes": "159618" }, { "name": "Makefile", "bytes": "11719" }, { "name": "Python", "bytes": "9300228" }, { "name": "R", "bytes": "94670" }, { "name": "Shell", "bytes": "63514" }, { "name": "TeX", "bytes": "64" } ], "symlink_target": "" }
from predikto.client import logger # Logging logger = logger # Credential api_key_id = None # "AN_API_KEY_ID" base_url = 'http://localhost:5000' workspace_id = 'Nw6jxvZxdLy9' project_id = 'NQEaxOkxYR2l' workspace_name = 'Hamburg'
{ "content_hash": "2f060da008d50960b9683ba2d82b7e34", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 36, "avg_line_length": 15.733333333333333, "alnum_prop": 0.7161016949152542, "repo_name": "predikto/python-sdk", "id": "d9267b16ac7a7f3d76ca79cadd043b688063ae46", "size": "236", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_util.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "31346" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('accounts', '0016_auto_20150814_1727'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('store', '0002_auto_20150814_0124'), ] operations = [ migrations.CreateModel( name='Cart', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ('quantity', models.PositiveIntegerField()), ('added_or_updated_datetime', models.DateTimeField(default=django.utils.timezone.now)), ], ), migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ('category_name', models.CharField(max_length=50)), ], ), migrations.CreateModel( name='ContactUs', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ('email', models.EmailField(max_length=254)), ('subject', models.CharField(max_length=50)), ('description', models.CharField(max_length=1000)), ], ), migrations.CreateModel( name='Dispute', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ('message', models.CharField(max_length=1000)), ('status', models.CharField(max_length=1, choices=[('PEN', 'Pending'), ('ACC', 'Accepted'), ('DEC', 'Declined')], default='PEN')), ], ), migrations.CreateModel( name='Inventory', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ('price', models.DecimalField(max_digits=19, decimal_places=4)), ('currency', models.CharField(max_length=3)), ('total_available_stock', models.PositiveIntegerField()), ('total_sold', models.PositiveIntegerField()), ('available_countries', models.CharField(max_length=52)), ('domestic_shipping_company', models.CharField(max_length=100, blank=True, null=True)), ('domestic_shipping_cost', models.DecimalField(decimal_places=4, max_digits=19, blank=True, null=True)), ('free_domestic_shipping', models.BooleanField()), ('international_shipping_company', models.CharField(max_length=100, blank=True, null=True)), ('international_shipping_cost', models.DecimalField(decimal_places=4, max_digits=19, blank=True, null=True)), ('free_international_shipping', models.BooleanField()), ('local_pick_up_accepted', models.BooleanField()), ('dispatch_max_time', models.PositiveIntegerField()), ('return_accepted', models.BooleanField()), ], ), migrations.CreateModel( name='Invoice', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ('order_datetime', models.DateTimeField(default=django.utils.timezone.now)), ], ), migrations.CreateModel( name='Item', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ('title', models.CharField(max_length=100)), ('description', models.CharField(max_length=2000)), ('brand', models.CharField(max_length=50)), ('shipping_product_dimension_height', models.DecimalField(max_digits=10, decimal_places=2)), ('shipping_product_dimension_width', models.DecimalField(max_digits=10, decimal_places=2)), ('shipping_product_dimension_length', models.DecimalField(max_digits=10, decimal_places=2)), ('shipping_product_dimension_units', models.CharField(max_length=2, choices=[('IN', 'Inches'), ('FT', 'Feet'), ('MM', 'Millimeters'), ('CM', 'Centimeters')], default='IN')), ('shipping_product_weight', models.DecimalField(max_digits=10, decimal_places=2)), ('shipping_product_weight_units', models.CharField(max_length=2, choices=[('OZ', 'Inches'), ('LB', 'Pounds'), ('MG', 'Milligrams'), ('G', 'Grams'), ('KG', 'Kilograms')], default='OZ')), ('posting_datetime', models.DateTimeField(default=django.utils.timezone.now)), ('last_updated_datetime', models.DateTimeField(blank=True, null=True)), ], ), migrations.CreateModel( name='ItemFeedback', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ('rating', models.PositiveSmallIntegerField()), ('description', models.CharField(max_length=500)), ('visibility', models.BooleanField(default=False)), ('posting_datetime', models.DateTimeField(default=django.utils.timezone.now)), ('last_updated_datetime', models.DateTimeField(blank=True, null=True)), ('item', models.ForeignKey(to='store.Item', related_name='item_in_feedback')), ('user_reviewed', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='user_item_review')), ], ), migrations.CreateModel( name='ItemMedia', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ('url', models.CharField(max_length=250)), ('title', models.CharField(max_length=100, blank=True, null=True)), ('photo_or_video', models.CharField(max_length=1, choices=[('P', 'Photo'), ('V', 'Video')], default='P')), ('added_datetime', models.DateTimeField(default=django.utils.timezone.now)), ('item', models.ForeignKey(to='store.Item', related_name='item_media')), ], ), migrations.CreateModel( name='MainSubCategory', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ('main_sub_category_name', models.CharField(max_length=50)), ], ), migrations.CreateModel( name='Order', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ('tracking_number', models.CharField(max_length=30)), ('shipping_company', models.CharField(max_length=50)), ('quantity', models.PositiveIntegerField()), ('shipping_datetime', models.DateTimeField(blank=True, null=True)), ('delivery_datetime', models.DateTimeField(blank=True, null=True)), ('item_returned', models.NullBooleanField()), ('completed', models.BooleanField(default=False)), ('inventory', models.ForeignKey(to='store.Inventory', related_name='inventory_order')), ('invoice', models.ForeignKey(to='store.Invoice')), ], ), migrations.CreateModel( name='Payment', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ('tax', models.DecimalField(max_digits=19, decimal_places=4)), ('total_amount_payed', models.DecimalField(max_digits=19, decimal_places=4)), ('payment_method', models.CharField(max_length=50)), ], ), migrations.CreateModel( name='SellingAddress', fields=[ ('address_ptr', models.OneToOneField(primary_key=True, auto_created=True, serialize=False, parent_link=True, to='accounts.Address')), ], bases=('accounts.address',), ), migrations.CreateModel( name='StoreName', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ('store_name', models.CharField(max_length=50)), ], ), migrations.CreateModel( name='SubCategory', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ('sub_category_name', models.CharField(max_length=50)), ('category_fk', models.ForeignKey(to='store.StoreName')), ], ), migrations.RemoveField( model_name='customercontactseller', name='contacted_by', ), migrations.RemoveField( model_name='customercontactseller', name='seller', ), migrations.RemoveField( model_name='discussionforcontactingseller', name='reply_for', ), migrations.RemoveField( model_name='discussionforcontactingseller', name='submitted_by', ), migrations.AddField( model_name='sellerfeedback', name='last_updated_datetime', field=models.DateTimeField(blank=True, null=True), ), migrations.DeleteModel( name='CustomerContactSeller', ), migrations.DeleteModel( name='DiscussionForContactingSeller', ), migrations.AddField( model_name='mainsubcategory', name='sub_category_fk', field=models.ForeignKey(to='store.StoreName'), ), migrations.AddField( model_name='item', name='main_sub_category', field=models.ForeignKey(to='store.MainSubCategory', related_name='main_sub_cat_item'), ), migrations.AddField( model_name='invoice', name='payment', field=models.OneToOneField(to='store.Payment'), ), migrations.AddField( model_name='inventory', name='item', field=models.ForeignKey(to='store.Item', related_name='item_inventory'), ), migrations.AddField( model_name='inventory', name='item_location', field=models.ForeignKey(to='store.SellingAddress', related_name='item_location_address'), ), migrations.AddField( model_name='inventory', name='seller', field=models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='seller_inventory'), ), migrations.AddField( model_name='dispute', name='order', field=models.ForeignKey(to='store.Order', related_name='order_no_disputes'), ), migrations.AddField( model_name='category', name='store_fk', field=models.ForeignKey(to='store.StoreName'), ), migrations.AddField( model_name='cart', name='item', field=models.ForeignKey(to='store.Item', related_name='item_in_cart'), ), migrations.AddField( model_name='cart', name='user', field=models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='cart_user'), ), ]
{ "content_hash": "1821fa4632bfd0086e07f7a46c78c648", "timestamp": "", "source": "github", "line_count": 247, "max_line_length": 201, "avg_line_length": 48.07692307692308, "alnum_prop": 0.5623578947368421, "repo_name": "bharathramh92/easy-ecom", "id": "0941e983371434ed17413d80f20dc64b832e95f4", "size": "11899", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "store/migrations/0003_auto_20150815_1608.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "10768" }, { "name": "Python", "bytes": "113947" } ], "symlink_target": "" }
import logging.config from pymongo.connection import Connection from tornado.options import define import tornado.httpserver import tornado.ioloop import tornado.options from pushservices.apns import * from pushservices.gcm import GCMClient from pushservices.wns import WNSClient from pushservices.mpns import MPNSClient from pushservices.clickatell import * from uimodules import * from util import * from constants import DEVICE_TYPE_IOS, DEVICE_TYPE_ANDROID, DEVICE_TYPE_WNS, \ DEVICE_TYPE_MPNS define("port", default=8801, help="Application server listen port", type=int) define("pemdir", default="pemdir", help="Directory to store pems") define("passwordsalt", default="d2o0n1g2s0h3e1n1g", help="Being used to make password hash") define("cookiesecret", default="airnotifiercookiesecret", help="Cookie secret") define("debug", default=False, help="Debug mode") define("https", default=False, help="Enable HTTPS") define("httpscertfile", default="", help="HTTPS cert file") define("httpskeyfile", default="", help="HTTPS key file") define("mongohost", default="localhost", help="MongoDB host name") define("mongoport", default=27017, help="MongoDB port") define("masterdb", default="airnotifier", help="MongoDB DB to store information") define("collectionprefix", default="obj_", help="Collection name prefix") define("dbprefix", default="app_", help="DB name prefix") define("appprefix", default="", help="DB name prefix") loggingconfigfile='logging.ini' if os.path.isfile(loggingconfigfile): logging.config.fileConfig(loggingconfigfile) _logger = logging.getLogger('AirNotifierApp') class AirNotifierApp(tornado.web.Application): def init_routes(self, dir): from routes import RouteLoader return RouteLoader.load(dir) def get_broadcast_status(self, appname): status = "Notification sent!" error = False try: apns = self.services['apns'][appname][0] except (IndexError, KeyError): apns = None if apns is not None and apns.hasError(): status = apns.getError() error = True return {'msg':status, 'error':error} def send_broadcast(self, appname, appdb, **kwargs): channel = kwargs.get('channel', 'default') alert = kwargs.get('alert', None) sound = kwargs.get('sound', None) badge = kwargs.get('badge', None) device = kwargs.get('device', None) extra = kwargs.get('extra', {}) try: apns = self.services['apns'][appname][0] except (IndexError, KeyError): apns = None try: wns = self.services['wns'][appname][0] except (IndexError, KeyError): wns = None try: mpns = self.services['mpns'][appname][0] except (IndexError, KeyError): mpns = None try: gcm = self.services['gcm'][appname][0] except (IndexError, KeyError): gcm = None conditions = [] if channel == 'default': # channel is not set or channel is default conditions.append({'channel': {"$exists": False}}) conditions.append({'channel': 'default'}) else: conditions.append({'channel': channel}) if device: conditions.append({'device': device}) tokens = appdb.tokens.find({"$or": conditions}) regids = [] try: for token in tokens: t = token.get('token') if token['device'] == DEVICE_TYPE_IOS: if apns is not None: apns.process(token=t, alert=alert, extra=extra, apns=kwargs.get('apns', {})) elif token['device'] == DEVICE_TYPE_ANDROID: regids.append(t) elif token['device'] == DEVICE_TYPE_WNS: if wns is not None: wns.process(token=t, alert=alert, extra=extra, wns=kwargs.get('wns', {})) elif token['device'] == DEVICE_TYPE_MPNS: if mpns is not None: mpns.process(token=t, alert=alert, extra=extra, mpns=kwargs.get('mpns', {})) except Exception as ex: _logger.error(ex) # Now sending android notifications try: if (gcm is not None) and regids: response = gcm.process(token=regids, alert=alert, extra=extra, gcm=kwargs.get('gcm', {})) responsedata = response.json() except Exception as ex: _logger.error('GCM problem: ' + str(ex)) def __init__(self, services): app_settings = dict( debug=True, # debug=options.debug, app_title=u'AirNotifier', ui_modules={"AppSideBar": AppSideBar, "NavBar": NavBar, "TabBar": TabBar}, template_path=os.path.join(os.path.dirname(__file__), 'templates'), static_path=os.path.join(os.path.dirname(__file__), 'static'), cookie_secret=options.cookiesecret, login_url=r"/auth/login", autoescape=None, ) self.services = services sitehandlers = self.init_routes('controllers') apihandlers = self.init_routes('api') tornado.web.Application.__init__(self, sitehandlers + apihandlers, **app_settings) mongodb = None while not mongodb: try: mongodb = Connection(options.mongohost, options.mongoport) except: error_log("Cannot not connect to MongoDB") self.mongodb = mongodb self.masterdb = mongodb[options.masterdb] assert self.masterdb.connection == self.mongodb def main(self): _logger.info("Starting AirNotifier server") if options.https: import ssl try: ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_ctx.load_cert_chain(options.httpscertfile, options.httpskeyfile) except IOError: print("Invalid path to SSL certificate and private key") raise http_server = tornado.httpserver.HTTPServer(self, ssl_options=ssl_ctx) else: http_server = tornado.httpserver.HTTPServer(self) http_server.listen(options.port) _logger.info("AirNotifier is ready") try: tornado.ioloop.IOLoop.instance().start() except KeyboardInterrupt: _logger.info("AirNotifier is quiting") tornado.ioloop.IOLoop.instance().stop() def init_messaging_agents(): services = { 'gcm': {}, 'wns': {}, 'apns': {}, 'mpns': {}, 'sms': {}, } mongodb = None while not mongodb: try: mongodb = Connection(options.mongohost, options.mongoport) except Exception as ex: _logger.error(ex) masterdb = mongodb[options.masterdb] apps = masterdb.applications.find() for app in apps: ''' APNs setup ''' services['apns'][app['shortname']] = [] conns = int(app['connections']) if conns < 1: conns = 1 if 'environment' not in app: app['environment'] = 'sandbox' if file_exists(app.get('certfile', False)) and file_exists(app.get('keyfile', False)) and 'shortname' in app: if app.get('enableapns', False): for instanceid in range(0, conns): try: apn = APNClient(app['environment'], app['certfile'], app['keyfile'], app['shortname'], instanceid) except Exception as ex: _logger.error(ex) continue services['apns'][app['shortname']].append(apn) ''' GCMClient setup ''' services['gcm'][app['shortname']] = [] if 'gcmprojectnumber' in app and 'gcmapikey' in app and 'shortname' in app: try: http = GCMClient(app['gcmprojectnumber'], app['gcmapikey'], app['shortname'], 0) except Exception as ex: _logger.error(ex) continue services['gcm'][app['shortname']].append(http) ''' WNS setup ''' services['wns'][app['shortname']] = [] if 'wnsclientid' in app and 'wnsclientsecret' in app and 'shortname' in app: try: wns = WNSClient(masterdb, app, 0) except Exception as ex: _logger.error(ex) continue services['wns'][app['shortname']].append(wns) ''' MPNS setup ''' services['mpns'][app['shortname']] = [] try: mpns = MPNSClient(masterdb, app, 0) except Exception as ex: _logger.error(ex) continue services['mpns'][app['shortname']].append(mpns) ''' clickatell ''' services['sms'][app['shortname']] = [] try: sms = ClickatellClient(masterdb, app, 0) except Exception as ex: _logger.error(ex) continue services['sms'][app['shortname']].append(sms) mongodb.close() return services if __name__ == "__main__": tornado.options.parse_config_file("airnotifier.conf") tornado.options.parse_command_line() services = init_messaging_agents() AirNotifierApp(services=services).main()
{ "content_hash": "a34b5dfdb8cdad5906b17cd1f02af673", "timestamp": "", "source": "github", "line_count": 257, "max_line_length": 122, "avg_line_length": 36.88715953307393, "alnum_prop": 0.5740506329113924, "repo_name": "imolainformatica/airnotifier", "id": "f036dc33d87511d94ec4e409dd0b884ed41c6a12", "size": "11039", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "airnotifier.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "179" }, { "name": "HTML", "bytes": "39261" }, { "name": "Makefile", "bytes": "58" }, { "name": "Python", "bytes": "167810" }, { "name": "Shell", "bytes": "850" } ], "symlink_target": "" }
import itertools from collections import defaultdict import numpy as np import networkx as nx from pgmpy.factors import TabularCPD from pgmpy.base import DirectedGraph, UndirectedGraph class DynamicBayesianNetwork(DirectedGraph): def __init__(self, ebunch=None): """ Base class for Dynamic Bayesian Network This model is a time variant of the static Bayesian model, where each time-slice has some static nodes and is then replicated over a certain time-slice. The nodes can be any hashable python objects. Parameters: ---------- ebunch: Data to initialize graph. If data=None (default) an empty graph is created. The data can be an edge list, or any NetworkX graph object Examples: -------- Create an empty Dynamic Bayesian Network with no nodes and no edges >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> dbn = DBN() Adding nodes and edges inside the dynamic bayesian network. A single node can be added using the method below. For adding edges we need to specify the time slice since edges can be across different time slices. >>> dbn.add_nodes_from(['D','G','I','S','L']) >>> dbn.add_edges_from([(('D',0),('G',0)),(('I',0),('G',0)),(('G',0),('L',0))]) >>> dbn.nodes() ['L', 'G', 'S', 'I', 'D'] >>> dbn.edges() [(('D', 0), ('G', 0)), (('G', 0), ('L', 0)), (('I', 0), ('G', 0))] If any variable is not present in the network while adding an edge, pgmpy will automatically add that variable to the network. Public Methods: --------------- add_cpds add_edge add_edges_from add_node add_nodes_from initialize_initial_state inter_slice intra_slice """ super().__init__() if ebunch: self.add_edges_from(ebunch) self.cpds = [] self.cardinalities = defaultdict(int) def add_node(self, node, **attr): """ Adds a single node to the Network Parameters ---------- node: node A node can be any hashable Python object. Examples -------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> dbn = DBN() >>> dbn.add_node('A') """ super().add_node((node, 0), **attr) def add_nodes_from(self, nodes, **attr): """ Add multiple nodes to the Network. Parameters ---------- nodes: iterable container A container of nodes (list, dict, set, etc.). Examples -------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> dbn = DBN() >>> dbn.add_nodes_from(['A', 'B', 'C']) """ for node in nodes: self.add_node(node) def nodes(self): """ Returns the list of nodes present in the network Examples -------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> dbn = DBN() >>> dbn.add_nodes_from(['A', 'B', 'C']) >>> dbn.nodes() ['B', 'A', 'C'] """ return list(set([node for node, timeslice in super().nodes()])) def add_edge(self, start, end, **kwargs): """ Add an edge between two nodes. The nodes will be automatically added if they are not present in the network. Parameters ---------- start, end: The start, end nodes should contain the (node_name, time_slice) Here, node_name can be a hashable python object while the time_slice is an integer value, which denotes the index of the time_slice that the node belongs to. Examples -------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> model = DBN() >>> model.add_nodes_from(['D', 'I']) >>> model.add_edge(('D',0), ('I',0)) >>> model.edges() [(('D', 1), ('G', 1)), (('D', 0), ('G', 0))] """ try: if len(start) != 2 or len(end) !=2: raise ValueError('Nodes must be of type (node, time_slice).') elif not isinstance(start[1], int) or not isinstance(end[1], int): raise ValueError('Nodes must be of type (node, time_slice).') elif start[1] == end[1]: start = (start[0], 0) end = (end[0], 0) elif start[1] == end[1] - 1: start = (start[0], 0) end = (end[0], 1) elif start[1] == end[1] + 1: raise ValueError('Edges in backward direction are not allowed.') elif start[1] != end[1]: raise ValueError("Edges over multiple time slices is not currently supported") except TypeError: raise ValueError('Nodes must be of type (node, time_slice).') if start == end: raise ValueError('Self Loops are not allowed') elif start in super().nodes() and end in super().nodes() and nx.has_path(self, end, start): raise ValueError( 'Loops are not allowed. Adding the edge from (%s->%s) forms a loop.' % (str(end), str(start))) super().add_edge(start, end, **kwargs) if start[1] == end[1]: super().add_edge((start[0], 1 - start[1]), (end[0], 1 - end[1])) def add_edges_from(self, ebunch, **kwargs): """ Add all the edges in ebunch. If nodes referred in the ebunch are not already present, they will be automatically added. Node names should be strings. Parameters ---------- ebunch : container of edges Each edge given in the container will be added to the graph. The edges must be given as 2-tuples (u, v). Examples -------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> dbn = DBN() >>> dbn.add_edges_from([(('D',0), ('G',0)), (('I',0), ('G',0))]) """ for edge in ebunch: self.add_edge(edge[0], edge[1]) def get_intra_edges(self, time_slice=0): """ returns the intra slice edges present in the 2-TBN. Parameter --------- time_slice:int The timeslice should be a positive value greater than or equal to zero Examples: ------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> dbn = DBN() >>> dbn.add_nodes_from(['D','G','I','S','L']) >>> dbn.add_edges_from([(('D',0),('G',0)),(('I',0),('G',0)),(('G',0),('L',0)),(('D',0),('D',1))]) >>> dbn.get_intra_edges() [(('D', 0), ('G', 0)), (('G', 0), ('L', 0)), (('I', 0), ('G', 0)) """ if not isinstance(time_slice, int) or time_slice < 0: raise ValueError("The timeslice should be a positive value greater than or equal to zero") return [tuple((x[0], time_slice) for x in edge) for edge in self.edges() if edge[0][1] == edge[1][1] == 0] def get_inter_edges(self): """ returns the inter-slice edges present in the 2-TBN Examples: ------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> dbn = DBN() >>> dbn.add_nodes_from(['D','G','I','S','L']) >>> dbn.add_edges_from([(('D',0),('G',0)),(('I',0),('G',0)),(('D',0),('D',1)),(('I',0),('I',1)))]) >>> dbn.get_inter_edges() [(('D', 0), ('D', 1)), (('I', 0), ('I', 1))] """ return [edge for edge in self.edges() if edge[0][1] != edge[1][1]] def get_interface_nodes(self, time_slice=0): """ returns the nodes in the first timeslice whose children are present in the first timeslice. Parameter --------- time_slice:int The timeslice should be a positive value greater than or equal to zero Examples: ------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> dbn = DBN() >>> dbn.add_nodes_from(['D', 'G', 'I', 'S', 'L']) >>> dbn.add_edges_from([(('D',0),('G',0)),(('I',0),('G',0)),(('G',0),('L',0)),(('D',0),('D',1))]) >>> dbn.get_interface_nodes() [('D', 0)] """ if not isinstance(time_slice, int) or time_slice < 0: raise ValueError("The timeslice should be a positive value greater than or equal to zero") return [(edge[0][0], time_slice) for edge in self.get_inter_edges()] def get_slice_nodes(self, time_slice=0): """ returns the nodes present in a particular timeslice Parameter --------- time_slice:int The timeslice should be a positive value greater than or equal to zero Examples: ------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> dbn = DBN() >>> dbn.add_nodes_from(['D', 'G', 'I', 'S', 'L']) >>> dbn.add_edges_from([(('D',0),('G',0)),(('I',0),('G',0)),(('G',0),('L',0)),(('D',0),('D',1))]) >>> dbn.get_slice_nodes() """ if not isinstance(time_slice, int) or time_slice < 0: raise ValueError("The timeslice should be a positive value greater than or equal to zero") return [(node, time_slice) for node in self.nodes()] def add_cpds(self, *cpds): """ This method adds the cpds to the dynamic bayesian network. Note that while adding variables and the evidence in cpd, they have to be of the following form (node_name, time_slice) Here, node_name is the node that is inserted while the time_slice is an integer value, which denotes the index of the time_slice that the node belongs to. Parameter --------- cpds : list, set, tuple (array-like) List of cpds (TabularCPD, TreeCPD, RuleCPD, Factor) which will be associated with the model Examples: ------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> from pgmpy.factors import TabularCPD >>> dbn = DBN() >>> dbn.add_edges_from([(('D',0),('G',0)),(('I',0),('G',0)),(('D',0),('D',1)),(('I',0),('I',1))]) >>> grade_cpd = TabularCPD(('G',0), 3, [[0.3,0.05,0.9,0.5], ... [0.4,0.25,0.8,0.03], ... [0.3,0.7,0.02,0.2]], [('I', 0),('D', 0)],[2,2]) >>> d_i_cpd = TabularCPD(('D',1),2,[[0.6,0.3],[0.4,0.7]],[('D',0)],2) >>> diff_cpd = TabularCPD(('D',0),2,[[0.6,0.4]]) >>> intel_cpd = TabularCPD(('I',0),2,[[0.7,0.3]]) >>> i_i_cpd = TabularCPD(('I',1),2,[[0.5,0.4],[0.5,0.6]],[('I',0)],2) >>> dbn.add_cpds(grade_cpd, d_i_cpd, diff_cpd, intel_cpd, i_i_cpd) >>> dbn.cpds """ for cpd in cpds: if not isinstance(cpd, TabularCPD): raise ValueError('cpds should be an instances of TabularCPD, TreeCPD or RuleCPD') if set(cpd.variables) - set(cpd.variables).intersection(set(super().nodes())): raise ValueError('CPD defined on variable not in the model', cpd) self.cpds.append(cpd) def get_cpds(self, node=None, time_slice=0): """ Returns the cpds that have been added till now to the graph Parameter --------- node: The node should be be of the following form (node_name, time_slice) Here, node_name is the node that is inserted while the time_slice is an integer value, which denotes the index of the time_slice that the node belongs to. time_slice:int The timeslice should be a positive value greater than or equal to zero Examples: ------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> from pgmpy.factors import TabularCPD >>> dbn = DBN() >>> dbn.add_edges_from([(('D',0),('G',0)),(('I',0),('G',0)),(('D',0),('D',1)),(('I',0),('I',1))]) >>> grade_cpd = TabularCPD(('G',0), 3, [[0.3,0.05,0.9,0.5], ... [0.4,0.25,0.8,0.03], ... [0.3,0.7,0.02,0.2]], [('I', 0),('D', 0)],[2,2]) >>> dbn.add_cpds(grade_cpd) >>> dbn.get_cpds() """ if node: if node not in super().nodes(): raise ValueError('Node not present in the model.') else: for cpd in self.cpds: if cpd.variable == node: return cpd else: return [cpd for cpd in self.cpds if set(list(cpd.variables)).issubset(self.get_slice_nodes(time_slice))] def check_model(self): """ Check the model for various errors. This method checks for the following errors. * Checks if the sum of the probabilities for each state is equal to 1 (tol=0.01). * Checks if the CPDs associated with nodes are consistent with their parents. Returns ------- check: boolean True if all the checks are passed """ for node in super().nodes(): cpd = self.get_cpds(node=node) if isinstance(cpd, TabularCPD): evidence = cpd.evidence parents = self.get_parents(node) if set(evidence if evidence else []) != set(parents if parents else []): raise ValueError("CPD associated with %s doesn't have " "proper parents associated with it." % node) if not np.allclose(cpd.marginalize([node], inplace=False).values, np.ones(np.product(cpd.evidence_card)), atol=0.01): raise ValueError('Sum of probabilities of states for node {node}' ' is not equal to 1'.format(node=node)) return True def initialize_initial_state(self): """ This method will automatically re-adjust the cpds and the edges added to the bayesian network. If an edge that is added as an intra time slice edge in the 0th timeslice, this method will automatically add it in the 1st timeslice. It will also add the cpds. However, to call this method, one needs to add cpds as well as the edges in the bayesian network of the whole skeleton including the 0th and the 1st timeslice,. Examples: ------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> from pgmpy.factors import TabularCPD >>> student = DBN() >>> student.add_nodes_from(['D','G','I','S','L']) >>> student.add_edges_from([(('D',0),('G',0)),(('I',0),('G',0)),(('D',0),('D',1)),(('I',0),('I',1))]) >>> grade_cpd = TabularCPD(('G',0), 3, [[0.3,0.05,0.9,0.5], ... [0.4,0.25,0.8,0.03], ... [0.3,0.7,0.02,0.2]], [('I', 0),('D', 0)],[2,2]) >>> d_i_cpd = TabularCPD(('D',1),2,[[0.6,0.3],[0.4,0.7]],[('D',0)],2) >>> diff_cpd = TabularCPD(('D',0),2,[[0.6,0.4]]) >>> intel_cpd = TabularCPD(('I',0),2,[[0.7,0.3]]) >>> i_i_cpd = TabularCPD(('I',1),2,[[0.5,0.4],[0.5,0.6]],[('I',0)],2) >>> student.add_cpds(grade_cpd, d_i_cpd, diff_cpd, intel_cpd, i_i_cpd) >>> student.initialize_initial_state() """ for cpd in self.cpds: temp_var = (cpd.variable[0], 1 - cpd.variable[1]) parents = self.get_parents(temp_var) if not any(x.variable == temp_var for x in self.cpds): if all(x[1] == parents[0][1] for x in parents): if parents: new_cpd = TabularCPD(temp_var, cpd.variable_card, cpd.values.reshape(cpd.variable_card, np.prod(cpd.evidence_card)), parents, cpd.evidence_card) else: new_cpd = TabularCPD(temp_var, cpd.variable_card, np.split(cpd.values, cpd.variable_card)) self.add_cpds(new_cpd) self.check_model() def moralize(self): """ Removes all the immoralities in the Network and creates a moral graph (UndirectedGraph). A v-structure X->Z<-Y is an immorality if there is no directed edge between X and Y. Examples -------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> dbn = DBN([(('D',0), ('G',0)), (('I',0), ('G',0))]) >>> moral_graph = dbn.moralize() >>> moral_graph.edges() [(('G', 0), ('I', 0)), (('G', 0), ('D', 0)), (('D', 1), ('I', 1)), (('D', 1), ('G', 1)), (('I', 0), ('D', 0)), (('G', 1), ('I', 1))] """ moral_graph = UndirectedGraph(self.to_undirected().edges()) for node in super().nodes(): moral_graph.add_edges_from(itertools.combinations( self.get_parents(node), 2)) return moral_graph
{ "content_hash": "a2a6681e78ab47de7c50becee8c8c413", "timestamp": "", "source": "github", "line_count": 437, "max_line_length": 116, "avg_line_length": 39.77345537757437, "alnum_prop": 0.507910937230309, "repo_name": "pratyakshs/pgmpy", "id": "dbcbf9a8d840aa79fe1363b88fe7bc3828a8b73b", "size": "17381", "binary": false, "copies": "2", "ref": "refs/heads/dev", "path": "pgmpy/models/DynamicBayesianNetwork.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "740816" }, { "name": "Shell", "bytes": "1022" } ], "symlink_target": "" }
from .base_settings import * # Network settings ALLOWED_HOSTS = ['api.axiologue.org', '104.236.76.8', 'api.mindful.click'] # DEBUG Tools DEBUG = False # Email settings SITE_ID = 2 EMAIL_BACKEND = 'postmark.django_backend.EmailBackend' POSTMARK_API_KEY = SECRETS['POSTMARK_KEY'] POSTMARK_SENDER = '[email protected]' POSTMARK_TRACK_OPENS = True # Static Files Config STATIC_ROOT = os.path.join(BASE_DIR, 'static') MEDIA_ROOT = os.path.join(BASE_DIR, 'media') # Logging LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'handlers': { 'watched_file': { 'level': 'INFO', 'class': 'logging.handlers.WatchedFileHandler', 'filename': '/var/log/django/mindfulclick.log', }, }, 'loggers': { 'django': { 'handlers': ['watched_file',], 'level': 'INFO', 'propagate': True, }, }, }
{ "content_hash": "05e434b6d0035597d3c94176c75a0768", "timestamp": "", "source": "github", "line_count": 38, "max_line_length": 74, "avg_line_length": 24.07894736842105, "alnum_prop": 0.5934426229508196, "repo_name": "Axiologue/AxiologueAPI", "id": "b51b566be60af8bb6b47b4bd146e67ebde1173a4", "size": "915", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mindfulclick/production_settings.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "8480" }, { "name": "JavaScript", "bytes": "2510" }, { "name": "Python", "bytes": "150246" } ], "symlink_target": "" }
"""This module is deprecated. Please use `airflow.gcp.operators.translate_speech`.""" import warnings # pylint: disable=unused-import from airflow.gcp.operators.translate_speech import GcpTranslateSpeechOperator # noqa warnings.warn( "This module is deprecated. Please use `airflow.gcp.operators.translate_speech`.", DeprecationWarning, stacklevel=2 )
{ "content_hash": "d6730dd5a9ed0d25459d5f73a4a75ee0", "timestamp": "", "source": "github", "line_count": 11, "max_line_length": 86, "avg_line_length": 33.09090909090909, "alnum_prop": 0.7802197802197802, "repo_name": "Fokko/incubator-airflow", "id": "e84d25faf9b9e0e88007b81fdf1bb59fea135c21", "size": "1175", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "airflow/contrib/operators/gcp_translate_speech_operator.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "13715" }, { "name": "Dockerfile", "bytes": "14170" }, { "name": "HTML", "bytes": "145596" }, { "name": "JavaScript", "bytes": "25233" }, { "name": "Mako", "bytes": "1339" }, { "name": "Python", "bytes": "8787104" }, { "name": "Shell", "bytes": "187296" }, { "name": "TSQL", "bytes": "879" } ], "symlink_target": "" }
import datetime from django.db import models from django.utils.translation import ugettext_lazy as _ from django.contrib.auth.models import User from news import settings as news_settings from djangobb_forum.util import convert_text_to_html from specialfields import FramedImageField class NewsItem(models.Model): """One Newsitem""" title = models.CharField(max_length=250) intro = models.TextField(blank=True) text = models.TextField() rendered_text = models.TextField(blank=True, null=True) author = models.ForeignKey(User) published = models.BooleanField(default=True) visible_from = models.DateTimeField(blank=True, null=True) visible_until = models.DateTimeField(blank=True, null=True) bigimage = FramedImageField(_('Big News Display Image'), blank=True, default='', upload_to=news_settings.UPLOAD_TO, width=news_settings.BIGIMG_WIDTH, height=news_settings.BIGIMG_HEIGHT) image = FramedImageField(_('Normal News Display Image'), blank=True, default='', upload_to=news_settings.UPLOAD_TO, width=news_settings.IMG_WIDTH, height=news_settings.IMG_HEIGHT) created = models.DateTimeField(auto_now_add=True) updated = models.DateTimeField(auto_now=True) updated_author = models.ForeignKey(User, related_name='+') class Meta: ordering = ['-updated'] def is_visible(self): """will the current item be visible?""" if not self.published: return False now = datetime.datetime.now() if self.visible_from and self.visible_until: return self.visible_from <= now and now <= self.visible_until if self.visible_from: return self.visible_from <= now if self.visible_until: return now <= self.visible_until return True is_visible.short_description = 'Item visible?' is_visible.boolean = True @models.permalink def get_absolute_url(self): return ('news.detail', [str(self.id)]) def save(self): self.render_text() super(NewsItem, self).save() def render_text(self): self.rendered_text = convert_text_to_html(self.text, 'bbcode')
{ "content_hash": "9dd2988b3f3f00ec6cafe311350702df", "timestamp": "", "source": "github", "line_count": 61, "max_line_length": 189, "avg_line_length": 35.40983606557377, "alnum_prop": 0.687037037037037, "repo_name": "jokey2k/ShockGsite", "id": "17d2dd1f36b189b80e288e479775f5fd50e91cbd", "size": "2160", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "news/models.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "JavaScript", "bytes": "84880" }, { "name": "Python", "bytes": "262576" } ], "symlink_target": "" }
"""Tests for recommendation task.""" import os from absl.testing import parameterized import tensorflow.compat.v2 as tf from tensorflow_examples.lite.model_maker.core.data_util import recommendation_dataloader as _dl from tensorflow_examples.lite.model_maker.core.data_util import recommendation_testutil as _testutil from tensorflow_examples.lite.model_maker.core.export_format import ExportFormat from tensorflow_examples.lite.model_maker.core.task import model_spec as ms from tensorflow_examples.lite.model_maker.core.task import recommendation class RecommendationTest(parameterized.TestCase, tf.test.TestCase): def setUp(self): super().setUp() _testutil.setup_fake_testdata(self) self.input_spec = _testutil.get_input_spec() self.model_hparams = _testutil.get_model_hparams() self.train_loader = _dl.RecommendationDataLoader.from_movielens( self.dataset_dir, 'train', self.input_spec) self.test_loader = _dl.RecommendationDataLoader.from_movielens( self.dataset_dir, 'test', self.input_spec) @parameterized.parameters( ('bow'), ('cnn'), ('lstm'), ) def test_create(self, encoder_type): model_dir = os.path.join(self.test_tempdir, 'recommendation_create') input_spec = _testutil.get_input_spec(encoder_type) model_spec = ms.get( 'recommendation', input_spec=input_spec, model_hparams=self.model_hparams) model = recommendation.create( self.train_loader, model_spec=model_spec, model_dir=model_dir, steps_per_epoch=1) self.assertIsNotNone(model.model) def test_evaluate(self): model_dir = os.path.join(self.test_tempdir, 'recommendation_evaluate') model_spec = ms.get( 'recommendation', input_spec=self.input_spec, model_hparams=self.model_hparams) model = recommendation.create( self.train_loader, model_spec=model_spec, model_dir=model_dir, steps_per_epoch=1) history = model.evaluate(self.test_loader) self.assertIsInstance(history, list) self.assertTrue(history) # Non-empty list. def test_export_and_evaluation(self): model_dir = os.path.join(self.test_tempdir, 'recommendation_export') model_spec = ms.get( 'recommendation', input_spec=self.input_spec, model_hparams=self.model_hparams) model = recommendation.create( self.train_loader, model_spec=model_spec, model_dir=model_dir, steps_per_epoch=1) export_format = [ ExportFormat.TFLITE, ExportFormat.SAVED_MODEL, ] model.export(model_dir, export_format=export_format) # Expect tflite file. expected_tflite = os.path.join(model_dir, 'model.tflite') self.assertTrue(os.path.exists(expected_tflite)) self.assertGreater(os.path.getsize(expected_tflite), 0) # Expect saved model. expected_saved_model = os.path.join(model_dir, 'saved_model', 'saved_model.pb') self.assertTrue(os.path.exists(expected_saved_model)) self.assertGreater(os.path.getsize(expected_saved_model), 0) # Evaluate tflite model. self._test_evaluate_tflite(model, expected_tflite) def _test_evaluate_tflite(self, model, tflite_filepath): result = model.evaluate_tflite(tflite_filepath, self.test_loader) self.assertIsInstance(result, dict) self.assertTrue(result) # Not empty. if __name__ == '__main__': tf.test.main()
{ "content_hash": "dd40c55eaeb43e12b5d8079915fa141a", "timestamp": "", "source": "github", "line_count": 98, "max_line_length": 100, "avg_line_length": 35.51020408163265, "alnum_prop": 0.6853448275862069, "repo_name": "tensorflow/examples", "id": "2ec249ddccabc1f00128fb09f99b923a4871944e", "size": "4088", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tensorflow_examples/lite/model_maker/core/task/recommendation_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C++", "bytes": "106227" }, { "name": "CMake", "bytes": "1553" }, { "name": "CSS", "bytes": "4746" }, { "name": "Dockerfile", "bytes": "467" }, { "name": "HTML", "bytes": "12491" }, { "name": "Java", "bytes": "305092" }, { "name": "JavaScript", "bytes": "24461" }, { "name": "Jupyter Notebook", "bytes": "1733035" }, { "name": "Kotlin", "bytes": "631463" }, { "name": "Objective-C", "bytes": "14639" }, { "name": "Objective-C++", "bytes": "14293" }, { "name": "Python", "bytes": "1232357" }, { "name": "Ruby", "bytes": "3744" }, { "name": "Shell", "bytes": "41573" }, { "name": "Starlark", "bytes": "17498" }, { "name": "Swift", "bytes": "553535" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import models, migrations import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Account', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')), ('email', models.EmailField(unique=True, max_length=75)), ('username', models.CharField(unique=True, max_length=40)), ('first_name', models.CharField(max_length=40, blank=True)), ('last_name', models.CharField(max_length=40, blank=True)), ('tagline', models.CharField(max_length=140, blank=True)), ('is_admin', models.BooleanField(default=False)), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ], options={ 'abstract': False, }, bases=(models.Model,), ), ]
{ "content_hash": "c7faac3ca1407121332c4e709a4b30ec", "timestamp": "", "source": "github", "line_count": 33, "max_line_length": 115, "avg_line_length": 40.18181818181818, "alnum_prop": 0.5708898944193062, "repo_name": "tacitia/ThoughtFlow", "id": "38b19fbbefd1b50293dda3b514b97d8a9e9b0ead", "size": "1350", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "project/authentication/migrations/0001_initial.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "191013" }, { "name": "HTML", "bytes": "47230" }, { "name": "JavaScript", "bytes": "461204" }, { "name": "Perl", "bytes": "57958" }, { "name": "Python", "bytes": "205587" }, { "name": "Ruby", "bytes": "813" }, { "name": "TeX", "bytes": "1241" } ], "symlink_target": "" }
from threading import Thread from time import time, sleep from pymodbus.constants import Defaults from thingsboard_gateway.connectors.modbus.constants import * from thingsboard_gateway.connectors.connector import log from thingsboard_gateway.connectors.modbus.bytes_modbus_uplink_converter import BytesModbusUplinkConverter from thingsboard_gateway.connectors.modbus.bytes_modbus_downlink_converter import BytesModbusDownlinkConverter from thingsboard_gateway.tb_utility.tb_loader import TBModuleLoader class Slave(Thread): def __init__(self, **kwargs): super().__init__() self.timeout = kwargs.get('timeout') self.name = kwargs['deviceName'] self.poll_period = kwargs['pollPeriod'] / 1000 self.byte_order = kwargs['byteOrder'] self.word_order = kwargs.get('wordOrder') self.config = { 'unitId': kwargs['unitId'], 'deviceType': kwargs.get('deviceType', 'default'), 'type': kwargs['type'], 'host': kwargs.get('host'), 'port': kwargs['port'], 'byteOrder': kwargs['byteOrder'], 'wordOrder': kwargs['wordOrder'], 'timeout': kwargs.get('timeout', 35), 'stopbits': kwargs.get('stopbits', Defaults.Stopbits), 'bytesize': kwargs.get('bytesize', Defaults.Bytesize), 'parity': kwargs.get('parity', Defaults.Parity), 'strict': kwargs.get('strict', True), 'retries': kwargs.get('retries', 3), 'connection_attempt': 0, 'last_connection_attempt_time': 0, 'sendDataOnlyOnChange': kwargs.get('sendDataOnlyOnChange', False), 'waitAfterFailedAttemptsMs': kwargs.get('waitAfterFailedAttemptsMs', 0), 'connectAttemptTimeMs': kwargs.get('connectAttemptTimeMs', 0), 'retry_on_empty': kwargs.get('retryOnEmpty', False), 'retry_on_invalid': kwargs.get('retryOnInvalid', False), 'method': kwargs.get('method', 'rtu'), 'baudrate': kwargs.get('baudrate', 19200), 'attributes': kwargs.get('attributes', []), 'timeseries': kwargs.get('timeseries', []), 'attributeUpdates': kwargs.get('attributeUpdates', []), 'rpc': kwargs.get('rpc', []), 'last_attributes': {}, 'last_telemetry': {} } self.__load_converters(kwargs['connector'], kwargs['gateway']) self.callback = kwargs['callback'] self.last_polled_time = None self.daemon = True self.start() def timer(self): self.callback(self) self.last_polled_time = time() while True: if time() - self.last_polled_time >= self.poll_period: self.callback(self) self.last_polled_time = time() sleep(0.001) def run(self): self.timer() def get_name(self): return self.name def __load_converters(self, connector, gateway): try: if self.config.get(UPLINK_PREFIX + CONVERTER_PARAMETER) is not None: converter = TBModuleLoader.import_module(connector.connector_type, self.config[UPLINK_PREFIX + CONVERTER_PARAMETER])(self) else: converter = BytesModbusUplinkConverter({**self.config, 'deviceName': self.name}) if self.config.get(DOWNLINK_PREFIX + CONVERTER_PARAMETER) is not None: downlink_converter = TBModuleLoader.import_module(connector.connector_type, self.config[ DOWNLINK_PREFIX + CONVERTER_PARAMETER])(self) else: downlink_converter = BytesModbusDownlinkConverter(self.config) if self.name not in gateway.get_devices(): gateway.add_device(self.name, {CONNECTOR_PARAMETER: connector}, device_type=self.config.get(DEVICE_TYPE_PARAMETER)) self.config[UPLINK_PREFIX + CONVERTER_PARAMETER] = converter self.config[DOWNLINK_PREFIX + CONVERTER_PARAMETER] = downlink_converter except Exception as e: log.exception(e) def __str__(self): return f'{self.name}'
{ "content_hash": "996e34f4ec1bfd345a530c18490d6cd5", "timestamp": "", "source": "github", "line_count": 103, "max_line_length": 112, "avg_line_length": 41.271844660194176, "alnum_prop": 0.5982121853681487, "repo_name": "thingsboard/thingsboard-gateway", "id": "9c48018a71f7889a6ca9135eaad54989fbff85b9", "size": "4868", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "thingsboard_gateway/connectors/modbus/slave.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "1659" }, { "name": "PLpgSQL", "bytes": "5034" }, { "name": "Python", "bytes": "1076772" }, { "name": "Shell", "bytes": "10610" } ], "symlink_target": "" }
""" :maintainer: Alberto Planas <[email protected]> :platform: Linux """ import salt.modules.freezer as freezer from salt.exceptions import CommandExecutionError from tests.support.mixins import LoaderModuleMockMixin from tests.support.mock import MagicMock, patch from tests.support.unit import TestCase class FreezerTestCase(TestCase, LoaderModuleMockMixin): """ Test cases for salt.modules.freezer """ def setup_loader_modules(self): return {freezer: {"__salt__": {}, "__opts__": {"cachedir": ""}}} @patch("os.path.isfile") def test_status(self, isfile): """ Test if a frozen state exist. """ isfile.side_effect = (True, True) self.assertTrue(freezer.status()) isfile.side_effect = (True, False) self.assertFalse(freezer.status()) @patch("os.listdir") @patch("os.path.isdir") def test_list(self, isdir, listdir): """ Test the listing of all frozen states. """ # There is no freezer directory isdir.return_value = False self.assertEqual(freezer.list_(), []) # There is freezer directory, but is empty isdir.return_value = True listdir.return_value = [] self.assertEqual(freezer.list_(), []) # There is freezer directory with states isdir.return_value = True listdir.return_value = [ "freezer-pkgs.yml", "freezer-reps.yml", "state-pkgs.yml", "state-reps.yml", "random-file", ] self.assertEqual(freezer.list_(), ["freezer", "state"]) @patch("os.makedirs") def test_freeze_fails_cache(self, makedirs): """ Test to freeze a current installation """ # Fails when creating the freeze cache directory makedirs.side_effect = OSError() self.assertRaises(CommandExecutionError, freezer.freeze) @patch("salt.modules.freezer.status") @patch("os.makedirs") def test_freeze_fails_already_frozen(self, makedirs, status): """ Test to freeze a current installation """ # Fails when there is already a frozen state status.return_value = True self.assertRaises(CommandExecutionError, freezer.freeze) makedirs.assert_called_once() @patch("salt.utils.json.dump") @patch("salt.modules.freezer.fopen") @patch("salt.modules.freezer.status") @patch("os.makedirs") def test_freeze_success_two_freeze(self, makedirs, status, fopen, dump): """ Test to freeze a current installation """ # Freeze the current new state status.return_value = False salt_mock = { "pkg.list_pkgs": MagicMock(return_value={}), "pkg.list_repos": MagicMock(return_value={}), } with patch.dict(freezer.__salt__, salt_mock): self.assertTrue(freezer.freeze("one")) self.assertTrue(freezer.freeze("two")) self.assertEqual(makedirs.call_count, 2) self.assertEqual(salt_mock["pkg.list_pkgs"].call_count, 2) self.assertEqual(salt_mock["pkg.list_repos"].call_count, 2) fopen.assert_called() dump.assert_called() @patch("salt.utils.json.dump") @patch("salt.modules.freezer.fopen") @patch("salt.modules.freezer.status") @patch("os.makedirs") def test_freeze_success_new_state(self, makedirs, status, fopen, dump): """ Test to freeze a current installation """ # Freeze the current new state status.return_value = False salt_mock = { "pkg.list_pkgs": MagicMock(return_value={}), "pkg.list_repos": MagicMock(return_value={}), } with patch.dict(freezer.__salt__, salt_mock): self.assertTrue(freezer.freeze()) makedirs.assert_called_once() salt_mock["pkg.list_pkgs"].assert_called_once() salt_mock["pkg.list_repos"].assert_called_once() fopen.assert_called() dump.assert_called() @patch("salt.utils.json.dump") @patch("salt.modules.freezer.fopen") @patch("salt.modules.freezer.status") @patch("os.makedirs") def test_freeze_success_force(self, makedirs, status, fopen, dump): """ Test to freeze a current installation """ # Freeze the current old state status.return_value = True salt_mock = { "pkg.list_pkgs": MagicMock(return_value={}), "pkg.list_repos": MagicMock(return_value={}), } with patch.dict(freezer.__salt__, salt_mock): self.assertTrue(freezer.freeze(force=True)) makedirs.assert_called_once() salt_mock["pkg.list_pkgs"].assert_called_once() salt_mock["pkg.list_repos"].assert_called_once() fopen.assert_called() dump.assert_called() @patch("salt.modules.freezer.status") def test_restore_fails_missing_state(self, status): """ Test to restore an old state """ # Fails if the state is not found status.return_value = False self.assertRaises(CommandExecutionError, freezer.restore) @patch("salt.utils.json.load") @patch("salt.modules.freezer.fopen") @patch("salt.modules.freezer.status") def test_restore_add_missing_repo(self, status, fopen, load): """ Test to restore an old state """ # Only a missing repo is installed status.return_value = True load.side_effect = ({}, {"missing-repo": {}}) salt_mock = { "pkg.list_pkgs": MagicMock(return_value={}), "pkg.list_repos": MagicMock(return_value={}), "pkg.mod_repo": MagicMock(), } with patch.dict(freezer.__salt__, salt_mock): self.assertEqual( freezer.restore(), { "pkgs": {"add": [], "remove": []}, "repos": {"add": ["missing-repo"], "remove": []}, "comment": [], }, ) salt_mock["pkg.list_pkgs"].assert_called() salt_mock["pkg.list_repos"].assert_called() salt_mock["pkg.mod_repo"].assert_called_once() fopen.assert_called() load.assert_called() @patch("salt.utils.json.load") @patch("salt.modules.freezer.fopen") @patch("salt.modules.freezer.status") def test_restore_add_missing_package(self, status, fopen, load): """ Test to restore an old state """ # Only a missing package is installed status.return_value = True load.side_effect = ({"missing-package": {}}, {}) salt_mock = { "pkg.list_pkgs": MagicMock(return_value={}), "pkg.list_repos": MagicMock(return_value={}), "pkg.install": MagicMock(), } with patch.dict(freezer.__salt__, salt_mock): self.assertEqual( freezer.restore(), { "pkgs": {"add": ["missing-package"], "remove": []}, "repos": {"add": [], "remove": []}, "comment": [], }, ) salt_mock["pkg.list_pkgs"].assert_called() salt_mock["pkg.list_repos"].assert_called() salt_mock["pkg.install"].assert_called_once() fopen.assert_called() load.assert_called() @patch("salt.utils.json.load") @patch("salt.modules.freezer.fopen") @patch("salt.modules.freezer.status") def test_restore_remove_extra_package(self, status, fopen, load): """ Test to restore an old state """ # Only an extra package is removed status.return_value = True load.side_effect = ({}, {}) salt_mock = { "pkg.list_pkgs": MagicMock(return_value={"extra-package": {}}), "pkg.list_repos": MagicMock(return_value={}), "pkg.remove": MagicMock(), } with patch.dict(freezer.__salt__, salt_mock): self.assertEqual( freezer.restore(), { "pkgs": {"add": [], "remove": ["extra-package"]}, "repos": {"add": [], "remove": []}, "comment": [], }, ) salt_mock["pkg.list_pkgs"].assert_called() salt_mock["pkg.list_repos"].assert_called() salt_mock["pkg.remove"].assert_called_once() fopen.assert_called() load.assert_called() @patch("salt.utils.json.load") @patch("salt.modules.freezer.fopen") @patch("salt.modules.freezer.status") def test_restore_remove_extra_repo(self, status, fopen, load): """ Test to restore an old state """ # Only an extra repository is removed status.return_value = True load.side_effect = ({}, {}) salt_mock = { "pkg.list_pkgs": MagicMock(return_value={}), "pkg.list_repos": MagicMock(return_value={"extra-repo": {}}), "pkg.del_repo": MagicMock(), } with patch.dict(freezer.__salt__, salt_mock): self.assertEqual( freezer.restore(), { "pkgs": {"add": [], "remove": []}, "repos": {"add": [], "remove": ["extra-repo"]}, "comment": [], }, ) salt_mock["pkg.list_pkgs"].assert_called() salt_mock["pkg.list_repos"].assert_called() salt_mock["pkg.del_repo"].assert_called_once() fopen.assert_called() load.assert_called() @patch("os.remove") @patch("salt.utils.json.load") @patch("salt.modules.freezer.fopen") @patch("salt.modules.freezer.status") def test_restore_clean_yml(self, status, fopen, load, remove): """ Test to restore an old state """ status.return_value = True salt_mock = { "pkg.list_pkgs": MagicMock(return_value={}), "pkg.list_repos": MagicMock(return_value={}), "pkg.install": MagicMock(), } with patch.dict(freezer.__salt__, salt_mock): self.assertEqual( freezer.restore(clean=True), { "pkgs": {"add": [], "remove": []}, "repos": {"add": [], "remove": []}, "comment": [], }, ) salt_mock["pkg.list_pkgs"].assert_called() salt_mock["pkg.list_repos"].assert_called() fopen.assert_called() load.assert_called() self.assertEqual(remove.call_count, 2)
{ "content_hash": "2f1f560b55ca071b82bd28544bf9639a", "timestamp": "", "source": "github", "line_count": 303, "max_line_length": 76, "avg_line_length": 35.83168316831683, "alnum_prop": 0.5392834116238372, "repo_name": "saltstack/salt", "id": "436ec4b7446f6369d1a4d09389248ec59c96358a", "size": "10857", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/unit/modules/test_freezer.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "14911" }, { "name": "C", "bytes": "1571" }, { "name": "Cython", "bytes": "1458" }, { "name": "Dockerfile", "bytes": "184" }, { "name": "Groovy", "bytes": "12318" }, { "name": "HCL", "bytes": "257" }, { "name": "HTML", "bytes": "8031" }, { "name": "Jinja", "bytes": "45598" }, { "name": "Makefile", "bytes": "713" }, { "name": "NSIS", "bytes": "76572" }, { "name": "PowerShell", "bytes": "75891" }, { "name": "Python", "bytes": "41444811" }, { "name": "Rich Text Format", "bytes": "6242" }, { "name": "Roff", "bytes": "191" }, { "name": "Ruby", "bytes": "961" }, { "name": "SaltStack", "bytes": "35856" }, { "name": "Scheme", "bytes": "895" }, { "name": "Scilab", "bytes": "1147" }, { "name": "Shell", "bytes": "524917" } ], "symlink_target": "" }
import json import time import urlparse from datetime import datetime, timedelta from django.conf import settings from django.core import mail from django.core.files import temp from django.core.files.base import File as DjangoFile from django.utils.datastructures import SortedDict from django.test.utils import override_settings import mock from mock import Mock, patch from nose.tools import eq_ from pyquery import PyQuery as pq import amo import amo.tests import reviews from abuse.models import AbuseReport from access.models import Group, GroupUser from addons.models import Addon, AddonDependency, AddonUser from amo.tests import check_links, formset, initial from amo.urlresolvers import reverse from devhub.models import ActivityLog from editors.models import EditorSubscription, ReviewerScore from files.models import File, FileValidation from reviews.models import Review, ReviewFlag from users.models import UserProfile from versions.models import ApplicationsVersions, AppVersion, Version from zadmin.models import get_config, set_config from .test_models import create_addon_file class EditorTest(amo.tests.TestCase): fixtures = ['base/users', 'base/approvals', 'editors/pending-queue'] def login_as_admin(self): assert self.client.login(username='[email protected]', password='password') def login_as_editor(self): assert self.client.login(username='[email protected]', password='password') def login_as_senior_editor(self): assert self.client.login(username='[email protected]', password='password') def make_review(self, username='a'): u = UserProfile.objects.create(username=username) a = Addon.objects.create(name='yermom', type=amo.ADDON_EXTENSION) return Review.objects.create(user=u, addon=a) def _test_breadcrumbs(self, expected=[]): r = self.client.get(self.url) expected.insert(0, ('Editor Tools', reverse('editors.home'))) check_links(expected, pq(r.content)('#breadcrumbs li'), verify=False) class TestEventLog(EditorTest): def setUp(self): super(TestEventLog, self).setUp() self.login_as_editor() self.url = reverse('editors.eventlog') amo.set_user(UserProfile.objects.get(username='editor')) def test_log(self): r = self.client.get(self.url) eq_(r.status_code, 200) def test_start_filter(self): r = self.client.get(self.url, dict(start='2011-01-01')) eq_(r.status_code, 200) def test_enddate_filter(self): """ Make sure that if our end date is 1/1/2011, that we include items from 1/1/2011. To not do as such would be dishonorable. """ review = self.make_review(username='b') amo.log(amo.LOG.APPROVE_REVIEW, review, review.addon, created=datetime(2011, 1, 1)) r = self.client.get(self.url, dict(end='2011-01-01')) eq_(r.status_code, 200) eq_(pq(r.content)('tbody td').eq(0).text(), 'Jan 1, 2011 12:00:00 AM') def test_action_filter(self): """ Based on setup we should see only two items if we filter for deleted reviews. """ review = self.make_review() for i in xrange(2): amo.log(amo.LOG.APPROVE_REVIEW, review, review.addon) amo.log(amo.LOG.DELETE_REVIEW, review.id, review.addon) r = self.client.get(self.url, dict(filter='deleted')) eq_(pq(r.content)('tbody tr').length, 2) def test_no_results(self): r = self.client.get(self.url, dict(end='2004-01-01')) assert '"no-results"' in r.content, 'Expected no results to be found.' def test_breadcrumbs(self): self._test_breadcrumbs([('Moderated Review Log', None)]) class TestEventLogDetail(TestEventLog): def test_me(self): review = self.make_review() amo.log(amo.LOG.APPROVE_REVIEW, review, review.addon) id = ActivityLog.objects.editor_events()[0].id r = self.client.get(reverse('editors.eventlog.detail', args=[id])) eq_(r.status_code, 200) class TestBetaSignedLog(EditorTest): def setUp(self): super(TestBetaSignedLog, self).setUp() self.login_as_editor() self.url = reverse('editors.beta_signed_log') amo.set_user(UserProfile.objects.get(username='editor')) addon = amo.tests.addon_factory() version = addon.versions.get() self.file1 = version.files.get() self.file2 = amo.tests.file_factory(version=version) self.file1_url = reverse('files.list', args=[self.file1.pk]) self.file2_url = reverse('files.list', args=[self.file2.pk]) self.log1 = amo.log(amo.LOG.BETA_SIGNED_VALIDATION_PASSED, self.file1) self.log2 = amo.log(amo.LOG.BETA_SIGNED_VALIDATION_FAILED, self.file2) def test_log(self): response = self.client.get(self.url) assert response.status_code == 200 def test_action_no_filter(self): response = self.client.get(self.url) results = pq(response.content)('tbody tr') assert results.length == 2 assert self.file1_url in unicode(results) assert self.file2_url in unicode(results) def test_action_filter_validation_passed(self): response = self.client.get( self.url, {'filter': amo.LOG.BETA_SIGNED_VALIDATION_PASSED.id}) results = pq(response.content)('tbody tr') assert results.length == 1 assert self.file1_url in unicode(results) assert self.file2_url not in unicode(results) def test_action_filter_validation_failed(self): response = self.client.get( self.url, {'filter': amo.LOG.BETA_SIGNED_VALIDATION_FAILED.id}) results = pq(response.content)('tbody tr') assert results.length == 1 assert self.file1_url not in unicode(results) assert self.file2_url in unicode(results) def test_no_results(self): ActivityLog.objects.all().delete() response = self.client.get(self.url) assert '"no-results"' in response.content def test_breadcrumbs(self): self._test_breadcrumbs([('Signed Beta Files Log', None)]) class TestReviewLog(EditorTest): fixtures = EditorTest.fixtures + ['base/addon_3615'] def setUp(self): super(TestReviewLog, self).setUp() self.login_as_editor() self.url = reverse('editors.reviewlog') def get_user(self): return UserProfile.objects.all()[0] def make_approvals(self): for addon in Addon.objects.all(): amo.log(amo.LOG.REJECT_VERSION, addon, addon.current_version, user=self.get_user(), details={'comments': 'youwin'}) def make_an_approval(self, action, comment='youwin', username=None, addon=None): if username: user = UserProfile.objects.get(username=username) else: user = self.get_user() if not addon: addon = Addon.objects.all()[0] amo.log(action, addon, addon.current_version, user=user, details={'comments': comment}) def test_basic(self): self.make_approvals() r = self.client.get(self.url) assert r.status_code == 200 doc = pq(r.content) assert doc('#log-filter button'), 'No filters.' # Should have 2 showing. rows = doc('tbody tr') assert rows.filter(':not(.hide)').length == 2 assert rows.filter('.hide').eq(0).text() == 'youwin' # Should have none showing if the addons are unlisted. Addon.objects.update(is_listed=False) r = self.client.get(self.url) assert r.status_code == 200 doc = pq(r.content) assert not doc('tbody tr :not(.hide)') # But they should have 2 showing for a senior editor. self.login_as_senior_editor() r = self.client.get(self.url) assert r.status_code == 200 doc = pq(r.content) rows = doc('tbody tr') assert rows.filter(':not(.hide)').length == 2 assert rows.filter('.hide').eq(0).text() == 'youwin' def test_xss(self): a = Addon.objects.all()[0] a.name = '<script>alert("xss")</script>' a.save() amo.log(amo.LOG.REJECT_VERSION, a, a.current_version, user=self.get_user(), details={'comments': 'xss!'}) r = self.client.get(self.url) eq_(r.status_code, 200) inner_html = pq(r.content)('#log-listing tbody td').eq(1).html() assert '&lt;script&gt;' in inner_html assert '<script>' not in inner_html def test_end_filter(self): """ Let's use today as an end-day filter and make sure we see stuff if we filter. """ self.make_approvals() # Make sure we show the stuff we just made. date = time.strftime('%Y-%m-%d') r = self.client.get(self.url, dict(end=date)) eq_(r.status_code, 200) doc = pq(r.content)('#log-listing tbody') eq_(doc('tr:not(.hide)').length, 2) eq_(doc('tr.hide').eq(0).text(), 'youwin') def test_end_filter_wrong(self): """ Let's use today as an end-day filter and make sure we see stuff if we filter. """ self.make_approvals() r = self.client.get(self.url, dict(end='wrong!')) # If this is broken, we'll get a traceback. eq_(r.status_code, 200) eq_(pq(r.content)('#log-listing tr:not(.hide)').length, 3) def test_search_comment_exists(self): """Search by comment.""" self.make_an_approval(amo.LOG.REQUEST_SUPER_REVIEW, comment='hello') r = self.client.get(self.url, dict(search='hello')) eq_(r.status_code, 200) eq_(pq(r.content)('#log-listing tbody tr.hide').eq(0).text(), 'hello') def test_search_comment_case_exists(self): """Search by comment, with case.""" self.make_an_approval(amo.LOG.REQUEST_SUPER_REVIEW, comment='hello') r = self.client.get(self.url, dict(search='HeLlO')) eq_(r.status_code, 200) eq_(pq(r.content)('#log-listing tbody tr.hide').eq(0).text(), 'hello') def test_search_comment_doesnt_exist(self): """Search by comment, with no results.""" self.make_an_approval(amo.LOG.REQUEST_SUPER_REVIEW, comment='hello') r = self.client.get(self.url, dict(search='bye')) eq_(r.status_code, 200) eq_(pq(r.content)('.no-results').length, 1) def test_search_author_exists(self): """Search by author.""" self.make_approvals() self.make_an_approval(amo.LOG.REQUEST_SUPER_REVIEW, username='editor', comment='hi') r = self.client.get(self.url, dict(search='editor')) eq_(r.status_code, 200) rows = pq(r.content)('#log-listing tbody tr') eq_(rows.filter(':not(.hide)').length, 1) eq_(rows.filter('.hide').eq(0).text(), 'hi') def test_search_author_case_exists(self): """Search by author, with case.""" self.make_approvals() self.make_an_approval(amo.LOG.REQUEST_SUPER_REVIEW, username='editor', comment='hi') r = self.client.get(self.url, dict(search='EdItOr')) eq_(r.status_code, 200) rows = pq(r.content)('#log-listing tbody tr') eq_(rows.filter(':not(.hide)').length, 1) eq_(rows.filter('.hide').eq(0).text(), 'hi') def test_search_author_doesnt_exist(self): """Search by author, with no results.""" self.make_approvals() self.make_an_approval(amo.LOG.REQUEST_SUPER_REVIEW, username='editor') r = self.client.get(self.url, dict(search='wrong')) eq_(r.status_code, 200) eq_(pq(r.content)('.no-results').length, 1) def test_search_addon_exists(self): """Search by add-on name.""" self.make_approvals() addon = Addon.objects.all()[0] r = self.client.get(self.url, dict(search=addon.name)) eq_(r.status_code, 200) tr = pq(r.content)('#log-listing tr[data-addonid="%s"]' % addon.id) eq_(tr.length, 1) eq_(tr.siblings('.comments').text(), 'youwin') def test_search_addon_case_exists(self): """Search by add-on name, with case.""" self.make_approvals() addon = Addon.objects.all()[0] r = self.client.get(self.url, dict(search=str(addon.name).swapcase())) eq_(r.status_code, 200) tr = pq(r.content)('#log-listing tr[data-addonid="%s"]' % addon.id) eq_(tr.length, 1) eq_(tr.siblings('.comments').text(), 'youwin') def test_search_addon_doesnt_exist(self): """Search by add-on name, with no results.""" self.make_approvals() r = self.client.get(self.url, dict(search='xxx')) eq_(r.status_code, 200) eq_(pq(r.content)('.no-results').length, 1) def test_breadcrumbs(self): self._test_breadcrumbs([('Add-on Review Log', None)]) @patch('devhub.models.ActivityLog.arguments', new=Mock) def test_addon_missing(self): self.make_approvals() r = self.client.get(self.url) eq_(pq(r.content)('#log-listing tr td').eq(1).text(), 'Add-on has been deleted.') def test_request_info_logs(self): self.make_an_approval(amo.LOG.REQUEST_INFORMATION) r = self.client.get(self.url) eq_(pq(r.content)('#log-listing tr td a').eq(1).text(), 'needs more information') def test_super_review_logs(self): self.make_an_approval(amo.LOG.REQUEST_SUPER_REVIEW) r = self.client.get(self.url) eq_(pq(r.content)('#log-listing tr td a').eq(1).text(), 'needs super review') def test_comment_logs(self): self.make_an_approval(amo.LOG.COMMENT_VERSION) r = self.client.get(self.url) eq_(pq(r.content)('#log-listing tr td a').eq(1).text(), 'commented') class TestHome(EditorTest): fixtures = EditorTest.fixtures + ['base/addon_3615'] def setUp(self): super(TestHome, self).setUp() self.login_as_editor() self.url = reverse('editors.home') self.user = UserProfile.objects.get(id=5497308) self.user.display_name = 'editor' self.user.save() amo.set_user(self.user) def approve_reviews(self): amo.set_user(self.user) for addon in Addon.objects.all(): amo.log(amo.LOG['APPROVE_VERSION'], addon, addon.current_version) def delete_review(self): review = self.make_review() review.delete() amo.log(amo.LOG.DELETE_REVIEW, review.addon, review, details=dict(addon_title='test', title='foo', body='bar', is_flagged=True)) return review def test_approved_review(self): review = self.make_review() amo.log(amo.LOG.APPROVE_REVIEW, review, review.addon, details=dict(addon_name='test', addon_id=review.addon.pk, is_flagged=True)) r = self.client.get(self.url) row = pq(r.content)('.row') assert 'approved' in row.text(), ( 'Expected review to be approved by editor') assert row('a[href*=yermom]'), 'Expected links to approved addon' def test_deleted_review(self): self.delete_review() doc = pq(self.client.get(self.url).content) eq_(doc('.row').eq(0).text().strip().split('.')[0], 'editor deleted Review for yermom ') al_id = ActivityLog.objects.all()[0].id url = reverse('editors.eventlog.detail', args=[al_id]) doc = pq(self.client.get(url).content) elems = zip(doc('dt'), doc('dd')) expected = [ ('Add-on Title', 'test'), ('Review Title', 'foo'), ('Review Text', 'bar'), ] for (dt, dd), texts in zip(elems, expected): eq_(dt.text, texts[0]) eq_(dd.text, texts[1]) def undelete_review(self, review, allowed): al = ActivityLog.objects.order_by('-id')[0] eq_(al.arguments[1], review) url = reverse('editors.eventlog.detail', args=[al.id]) doc = pq(self.client.get(url).content) eq_(doc('#submit-undelete-review').attr('value') == 'Undelete', allowed) r = self.client.post(url, {'action': 'undelete'}) assert r.status_code in (302, 403) post = r.status_code == 302 eq_(post, allowed) def test_undelete_review_own(self): review = self.delete_review() # Undeleting a review you deleted is always allowed. self.undelete_review(review, allowed=True) def test_undelete_review_other(self): amo.set_user(UserProfile.objects.get(email='[email protected]')) review = self.delete_review() # Normal editors undeleting reviews deleted by other editors is # not allowed. amo.set_user(self.user) self.undelete_review(review, allowed=False) def test_undelete_review_admin(self): review = self.delete_review() # Admins can always undelete reviews. self.login_as_admin() self.undelete_review(review, allowed=True) def test_stats_total(self): self.approve_reviews() doc = pq(self.client.get(self.url).content) cols = doc('#editors-stats .editor-stats-table:eq(1)').find('td') eq_(cols.eq(0).text(), self.user.display_name) eq_(int(cols.eq(1).text()), 2, 'Approval count should be 2') def test_stats_monthly(self): self.approve_reviews() doc = pq(self.client.get(self.url).content) cols = doc('#editors-stats .editor-stats-table:eq(1)').find('td') eq_(cols.eq(0).text(), self.user.display_name) eq_(int(cols.eq(1).text()), 2, 'Approval count should be 2') @override_settings(EDITOR_REVIEWS_MAX_DISPLAY=0) def test_stats_user_position_ranked(self): self.approve_reviews() doc = pq(self.client.get(self.url).content) el = doc('#editors-stats .editor-stats-table').eq(0)('div:last-child') eq_(el.text(), "You're #1 with 2 reviews", 'Total reviews should show position') el = doc('#editors-stats .editor-stats-table').eq(1)('div:last-child') eq_(el.text(), "You're #1 with 2 reviews", 'Monthly reviews should show position') def test_stats_user_position_unranked(self): self.approve_reviews() doc = pq(self.client.get(self.url).content) p = doc('#editors-stats .editor-stats-table p:eq(0)') eq_(p.text(), None) p = doc('#editors-stats .editor-stats-table p:eq(1)') eq_(p.text(), None, 'Monthly reviews should not be displayed') def test_new_editors(self): amo.log(amo.LOG.GROUP_USER_ADDED, Group.objects.get(name='Add-on Reviewers'), self.user) doc = pq(self.client.get(self.url).content) anchors = doc('#editors-stats .editor-stats-table:eq(2)').find('td a') eq_(anchors.eq(0).text(), self.user.display_name) def test_unlisted_queues_only_for_senior_reviewers(self): listed_queues_links = [ reverse('editors.queue_fast_track'), reverse('editors.queue_nominated'), reverse('editors.queue_pending'), reverse('editors.queue_prelim'), reverse('editors.queue_moderated')] unlisted_queues_links = [ reverse('editors.unlisted_queue_nominated'), reverse('editors.unlisted_queue_pending'), reverse('editors.unlisted_queue_prelim')] # Only listed queues for editors. doc = pq(self.client.get(self.url).content) queues = doc('#listed-queues ul li a') queues_links = [link.attrib['href'] for link in queues] assert queues_links == listed_queues_links assert not doc('#unlisted-queues') # Unlisted queues are not visible. # Both listed and unlisted queues for senior editors. self.login_as_senior_editor() doc = pq(self.client.get(self.url).content) queues = doc('#listed-queues ul li a') # Listed queues links. queues_links = [link.attrib['href'] for link in queues] assert queues_links == listed_queues_links queues = doc('#unlisted-queues ul li a') # Unlisted queues links. queues_links = [link.attrib['href'] for link in queues] assert queues_links == unlisted_queues_links def test_unlisted_stats_only_for_senior_reviewers(self): # Only listed queues stats for editors. doc = pq(self.client.get(self.url).content) assert doc('#editors-stats-charts') assert not doc('#editors-stats-charts-unlisted') # Both listed and unlisted queues for senior editors. self.login_as_senior_editor() doc = pq(self.client.get(self.url).content) assert doc('#editors-stats-charts') assert doc('#editors-stats-charts-unlisted') def test_stats_listed_unlisted(self): # Make sure the listed addons are displayed in the listed stats, and # that the unlisted addons are listed in the unlisted stats. # Create one listed, and two unlisted. create_addon_file('listed', '0.1', amo.STATUS_NOMINATED, amo.STATUS_UNREVIEWED) create_addon_file('unlisted 1', '0.1', amo.STATUS_NOMINATED, amo.STATUS_UNREVIEWED, listed=False) create_addon_file('unlisted 2', '0.1', amo.STATUS_NOMINATED, amo.STATUS_UNREVIEWED, listed=False) selector = '.editor-stats-title:eq(0)' # The new addons stats header. self.login_as_senior_editor() doc = pq(self.client.get(self.url).content) listed_stats = doc('#editors-stats-charts {0}'.format(selector)) assert 'Full Review (1)' in listed_stats.text() unlisted_stats = doc('#editors-stats-charts-unlisted {0}'.format( selector)) assert 'Unlisted Full Reviews (2)' in unlisted_stats.text() class QueueTest(EditorTest): fixtures = ['base/users'] listed = True def setUp(self): super(QueueTest, self).setUp() if self.listed: self.login_as_editor() else: # Testing unlisted views: needs Addons:ReviewUnlisted perm. self.login_as_senior_editor() self.url = reverse('editors.queue_pending') self.addons = SortedDict() self.expected_addons = [] def generate_files(self, subset=[]): files = SortedDict([ ('Pending One', { 'version_str': '0.1', 'addon_status': amo.STATUS_PUBLIC, 'file_status': amo.STATUS_UNREVIEWED, }), ('Pending Two', { 'version_str': '0.1', 'addon_status': amo.STATUS_PUBLIC, 'file_status': amo.STATUS_UNREVIEWED, }), ('Nominated One', { 'version_str': '0.1', 'addon_status': amo.STATUS_NOMINATED, 'file_status': amo.STATUS_UNREVIEWED, }), ('Nominated Two', { 'version_str': '0.1', 'addon_status': amo.STATUS_LITE_AND_NOMINATED, 'file_status': amo.STATUS_UNREVIEWED, }), ('Prelim One', { 'version_str': '0.1', 'addon_status': amo.STATUS_LITE, 'file_status': amo.STATUS_UNREVIEWED, }), ('Prelim Two', { 'version_str': '0.1', 'addon_status': amo.STATUS_UNREVIEWED, 'file_status': amo.STATUS_UNREVIEWED, }), ('Public', { 'version_str': '0.1', 'addon_status': amo.STATUS_PUBLIC, 'file_status': amo.STATUS_LITE, }), ]) results = {} for name, attrs in files.iteritems(): if not subset or name in subset: results[name] = self.addon_file(name, **attrs) return results def generate_file(self, name): return self.generate_files([name])[name] def get_review_data(self): # Format: (Created n days ago, # percentages of [< 5, 5-10, >10]) return ((1, (0, 0, 100)), (8, (0, 50, 50)), (11, (50, 0, 50))) def addon_file(self, *args, **kw): a = create_addon_file(*args, listed=self.listed, **kw) name = args[0] # Add-on name. self.addons[name] = a['addon'] # If this is an add-on we expect to be in the queue, then add it. if name in getattr(self, 'expected_names', []): self.expected_addons.append(a['addon']) return a['addon'] def get_queue(self, addon): version = addon.latest_version.reload() eq_(version.current_queue.objects.filter(id=addon.id).count(), 1) def _test_get_queue(self): self.generate_files() for addon in self.expected_addons: self.get_queue(addon) def _test_queue_count(self, eq, name, count): self.generate_files() r = self.client.get(self.url) eq_(r.status_code, 200) a = pq(r.content)('.tabnav li a:eq(%s)' % eq) eq_(a.text(), '%s (%s)' % (name, count)) eq_(a.attr('href'), self.url) def _test_results(self): r = self.client.get(self.url) eq_(r.status_code, 200) expected = [] for idx, addon in enumerate(self.expected_addons): name = '%s %s' % (unicode(addon.name), addon.current_version.version) url = reverse('editors.review', args=[addon.slug]) expected.append((name, url)) check_links( expected, pq(r.content)('#addon-queue tr.addon-row td a:not(.app-icon)'), verify=False) class TestQueueBasics(QueueTest): fixtures = QueueTest.fixtures + ['editors/user_persona_reviewer'] def test_only_viewable_by_editor(self): # Addon reviewer has access. r = self.client.get(self.url) eq_(r.status_code, 200) # Regular user doesn't have access. self.client.logout() assert self.client.login(username='[email protected]', password='password') r = self.client.get(self.url) eq_(r.status_code, 403) # Persona reviewer doesn't have access either. self.client.logout() assert self.client.login(username='[email protected]', password='password') r = self.client.get(self.url) eq_(r.status_code, 403) def test_invalid_page(self): r = self.client.get(self.url, {'page': 999}) eq_(r.status_code, 200) eq_(r.context['page'].number, 1) def test_invalid_per_page(self): r = self.client.get(self.url, {'per_page': '<garbage>'}) # No exceptions: eq_(r.status_code, 200) def test_grid_headers(self): r = self.client.get(self.url) eq_(r.status_code, 200) doc = pq(r.content) expected = [ 'Addon', 'Type', 'Waiting Time', 'Flags', 'Applications', 'Platforms', 'Additional', ] eq_([pq(th).text() for th in doc('#addon-queue tr th')[1:]], expected) def test_grid_headers_sort_after_search(self): params = dict(searching=['True'], text_query=['abc'], addon_type_ids=['2'], sort=['addon_type_id']) r = self.client.get(self.url, params) eq_(r.status_code, 200) tr = pq(r.content)('#addon-queue tr') sorts = { # Column index => sort. 1: 'addon_name', # Add-on. 2: '-addon_type_id', # Type. 3: 'waiting_time_min', # Waiting Time. } for idx, sort in sorts.iteritems(): # Get column link. a = tr('th:eq(%s)' % idx).find('a') # Update expected GET parameters with sort type. params.update(sort=[sort]) # Parse querystring of link to make sure `sort` type is correct. eq_(urlparse.parse_qs(a.attr('href').split('?')[1]), params) def test_no_results(self): r = self.client.get(self.url) eq_(r.status_code, 200) eq_(pq(r.content)('.queue-outer .no-results').length, 1) def test_no_paginator_when_on_single_page(self): r = self.client.get(self.url) eq_(r.status_code, 200) eq_(pq(r.content)('.pagination').length, 0) def test_paginator_when_many_pages(self): # 'Pending One' and 'Pending Two' should be the only add-ons in # the pending queue, but we'll generate them all for good measure. self.generate_files() r = self.client.get(self.url, {'per_page': 1}) eq_(r.status_code, 200) doc = pq(r.content) eq_(doc('.data-grid-top .num-results').text(), u'Results 1 \u2013 1 of 2') eq_(doc('.data-grid-bottom .num-results').text(), u'Results 1 \u2013 1 of 2') def test_navbar_queue_counts(self): self.generate_files() r = self.client.get(self.url) eq_(r.status_code, 200) doc = pq(r.content) eq_(doc('#navbar li.top ul').eq(0).text(), 'Fast Track (0) Full Reviews (2) Pending Updates (2) ' 'Preliminary Reviews (2) Moderated Reviews (0)') def test_legacy_queue_sort(self): sorts = ( ['age', 'Waiting Time'], ['name', 'Addon'], ['type', 'Type'], ) for key, text in sorts: r = self.client.get(self.url, {'sort': key}) eq_(r.status_code, 200) eq_(pq(r.content)('th.ordered a').text(), text) def test_full_reviews_bar(self): self.generate_files() addon = self.addons['Nominated Two'] for data in self.get_review_data(): self.check_bar(addon, eq=0, data=data, reset_status=False) def test_pending_bar(self): self.generate_files() addon = self.addons['Pending One'] for data in self.get_review_data(): self.check_bar(addon, eq=1, data=data, reset_status=True) def test_prelim_bar(self): self.generate_files() addon = self.addons['Prelim One'] for data in self.get_review_data(): self.check_bar(addon, eq=2, data=data) def check_bar(self, addon, eq, data, reset_status=False): # `eq` is the table number (0, 1 or 2). def style(w): return 'width:%s%%' % (float(w) if w > 0 else 0) days, widths = data f = addon.versions.all()[0].all_files[0] d = datetime.now() - timedelta(days=days) f.update(created=d) addon.versions.latest().update(nomination=d) # For pending, we must reset the add-on status after saving version. if reset_status: addon.update(status=amo.STATUS_PUBLIC) r = self.client.get(reverse('editors.home')) doc = pq(r.content) sel = '#editors-stats-charts{0}'.format('' if self.listed else '-unlisted') div = doc('{0} .editor-stats-table:eq({1})'.format(sel, eq)) eq_(div('.waiting_old').attr('style'), style(widths[0])) eq_(div('.waiting_med').attr('style'), style(widths[1])) eq_(div('.waiting_new').attr('style'), style(widths[2])) def test_flags_jetpack(self): ad = create_addon_file('Jetpack', '0.1', amo.STATUS_NOMINATED, amo.STATUS_UNREVIEWED) ad_file = ad['version'].files.all()[0] ad_file.update(jetpack_version=1.2) r = self.client.get(reverse('editors.queue_nominated')) rows = pq(r.content)('#addon-queue tr.addon-row') eq_(rows.length, 1) eq_(rows.attr('data-addon'), str(ad['addon'].id)) eq_(rows.find('td').eq(1).text(), 'Jetpack 0.1') eq_(rows.find('.ed-sprite-jetpack').length, 1) eq_(rows.find('.ed-sprite-restartless').length, 0) def test_flags_restartless(self): ad = create_addon_file('Restartless', '0.1', amo.STATUS_NOMINATED, amo.STATUS_UNREVIEWED) ad_file = ad['version'].files.all()[0] ad_file.update(no_restart=True) r = self.client.get(reverse('editors.queue_nominated')) rows = pq(r.content)('#addon-queue tr.addon-row') eq_(rows.length, 1) eq_(rows.attr('data-addon'), str(ad['addon'].id)) eq_(rows.find('td').eq(1).text(), 'Restartless 0.1') eq_(rows.find('.ed-sprite-jetpack').length, 0) eq_(rows.find('.ed-sprite-restartless').length, 1) def test_flags_restartless_and_jetpack(self): ad = create_addon_file('Restartless Jetpack', '0.1', amo.STATUS_NOMINATED, amo.STATUS_UNREVIEWED) ad_file = ad['version'].files.all()[0] ad_file.update(jetpack_version=1.2, no_restart=True) r = self.client.get(reverse('editors.queue_nominated')) rows = pq(r.content)('#addon-queue tr.addon-row') eq_(rows.length, 1) eq_(rows.attr('data-addon'), str(ad['addon'].id)) eq_(rows.find('td').eq(1).text(), 'Restartless Jetpack 0.1') # Show only jetpack if it's both. eq_(rows.find('.ed-sprite-jetpack').length, 1) eq_(rows.find('.ed-sprite-restartless').length, 0) def test_theme_redirect(self): users = [] for x in range(2): user = amo.tests.user_factory() user.set_password('password') user.save() users.append(user) self.grant_permission(users[0], 'Personas:Review') self.client.logout() self.login(users[0]) res = self.client.get(reverse('editors.home')) self.assertRedirects(res, reverse('editors.themes.home')) self.grant_permission(users[1], 'Addons:Review') self.client.logout() self.login(users[1]) res = self.client.get(reverse('editors.home')) eq_(res.status_code, 200) class TestUnlistedQueueBasics(TestQueueBasics): fixtures = QueueTest.fixtures + ['editors/user_persona_reviewer'] listed = False def setUp(self): super(TestUnlistedQueueBasics, self).setUp() self.login_as_senior_editor() self.url = reverse('editors.unlisted_queue_pending') def test_only_viewable_by_senior_editor(self): # Addon reviewer has access. r = self.client.get(self.url) eq_(r.status_code, 200) # Regular user doesn't have access. self.client.logout() assert self.client.login(username='[email protected]', password='password') r = self.client.get(self.url) eq_(r.status_code, 403) # Persona reviewer doesn't have access either. self.client.logout() assert self.client.login(username='[email protected]', password='password') r = self.client.get(self.url) eq_(r.status_code, 403) # Standard reviewer doesn't have access either. self.client.logout() assert self.client.login(username='[email protected]', password='password') r = self.client.get(self.url) eq_(r.status_code, 403) def test_navbar_queue_counts(self): self.generate_files() r = self.client.get(self.url) eq_(r.status_code, 200) doc = pq(r.content) eq_(doc('#navbar li.top ul').eq(1).text(), 'Full Reviews (2) Pending Updates (2) Preliminary Reviews (2)') def test_listed_unlisted_queues(self): # Make sure the listed addons are displayed in the listed queue, and # that the unlisted addons are listed in the unlisted queue. listed = create_addon_file('listed', '0.1', amo.STATUS_NOMINATED, amo.STATUS_UNREVIEWED)['addon'] unlisted = create_addon_file('unlisted', '0.1', amo.STATUS_NOMINATED, amo.STATUS_UNREVIEWED, listed=False)['addon'] # Listed addon is displayed in the listed queue. r = self.client.get(reverse('editors.queue_nominated')) eq_(r.status_code, 200) doc = pq(r.content) assert doc('#addon-queue #addon-{0}'.format(listed.pk)) assert not doc('#addon-queue #addon-{0}'.format(unlisted.pk)) # Unlisted addon is displayed in the unlisted queue. r = self.client.get(reverse('editors.unlisted_queue_nominated')) eq_(r.status_code, 200) doc = pq(r.content) assert not doc('#addon-queue #addon-{0}'.format(listed.pk)) assert doc('#addon-queue #addon-{0}'.format(unlisted.pk)) class TestPendingQueue(QueueTest): def setUp(self): super(TestPendingQueue, self).setUp() # These should be the only ones present in the queue. self.expected_names = ['Pending One', 'Pending Two'] self.url = reverse('editors.queue_pending') def test_results(self): # `generate_files` happens within this test. self._test_results() def test_breadcrumbs(self): self._test_breadcrumbs([('Pending Updates', None)]) def test_queue_count(self): # `generate_files` happens within this test. self._test_queue_count(2, 'Pending Updates', 2) def test_get_queue(self): # `generate_files` happens within this test. self._test_get_queue() class TestNominatedQueue(QueueTest): def setUp(self): super(TestNominatedQueue, self).setUp() # These should be the only ones present. self.expected_names = ['Nominated One', 'Nominated Two'] self.url = reverse('editors.queue_nominated') def test_results(self): self._test_results() def test_breadcrumbs(self): self._test_breadcrumbs([('Full Reviews', None)]) def test_results_two_versions(self): self.generate_files() version1 = self.addons['Nominated One'].versions.all()[0] version2 = self.addons['Nominated Two'].versions.all()[0] file_ = version2.files.get() # Versions are ordered by creation date, so make sure they're set. past = self.days_ago(1) version2.update(created=past, nomination=past) # Create another version, v0.2, by "cloning" v0.1. version2.pk = None version2.version = '0.2' future = datetime.now() - timedelta(seconds=1) version2.created = version2.nomination = future version2.save() # Associate v0.2 it with a file. file_.pk = None file_.version = version2 file_.save() r = self.client.get(self.url) eq_(r.status_code, 200) expected = [ ('Nominated One 0.1', reverse('editors.review', args=[version1.addon.slug])), ('Nominated Two 0.2', reverse('editors.review', args=[version2.addon.slug])), ] check_links( expected, pq(r.content)('#addon-queue tr.addon-row td a:not(.app-icon)'), verify=False) def test_queue_count(self): # `generate_files` happens within this test. self._test_queue_count(1, 'Full Reviews', 2) def test_get_queue(self): # `generate_files` happens within this test. self._test_get_queue() class TestPreliminaryQueue(QueueTest): def setUp(self): super(TestPreliminaryQueue, self).setUp() # These should be the only ones present. self.expected_names = ['Prelim One', 'Prelim Two'] self.url = reverse('editors.queue_prelim') def test_results(self): self._test_results() def test_breadcrumbs(self): self._test_breadcrumbs([('Preliminary Reviews', None)]) def test_queue_count(self): # `generate_files` happens within this test. self._test_queue_count(3, 'Preliminary Reviews', 2) def test_get_queue(self): # `generate_files` happens within this test. self._test_get_queue() class TestModeratedQueue(QueueTest): fixtures = ['base/users', 'reviews/dev-reply'] def setUp(self): super(TestModeratedQueue, self).setUp() self.url = reverse('editors.queue_moderated') url_flag = reverse('addons.reviews.flag', args=['a1865', 218468]) response = self.client.post(url_flag, {'flag': ReviewFlag.SPAM}) eq_(response.status_code, 200) eq_(ReviewFlag.objects.filter(flag=ReviewFlag.SPAM).count(), 1) eq_(Review.objects.filter(editorreview=True).count(), 1) def test_results(self): r = self.client.get(self.url) eq_(r.status_code, 200) doc = pq(r.content)('#reviews-flagged') rows = doc('.review-flagged:not(.review-saved)') eq_(rows.length, 1) eq_(rows.find('h3').text(), ": Don't use Firefox 2.0!") # Default is "Skip." eq_(doc('#id_form-0-action_1:checked').length, 1) flagged = doc('.reviews-flagged-reasons span.light').text() editor = ReviewFlag.objects.all()[0].user.name assert flagged.startswith('Flagged by %s' % editor), ( 'Unexpected text: %s' % flagged) def setup_actions(self, action): ctx = self.client.get(self.url).context fs = initial(ctx['reviews_formset'].forms[0]) eq_(Review.objects.filter(addon=1865).count(), 2) data_formset = formset(fs) data_formset['form-0-action'] = action r = self.client.post(self.url, data_formset) self.assertRedirects(r, self.url) def test_skip(self): self.setup_actions(reviews.REVIEW_MODERATE_SKIP) # Make sure it's still there. r = self.client.get(self.url) doc = pq(r.content) rows = doc('#reviews-flagged .review-flagged:not(.review-saved)') eq_(rows.length, 1) def test_skip_score(self): self.setup_actions(reviews.REVIEW_MODERATE_SKIP) eq_(ReviewerScore.objects.filter(note_key=amo.REVIEWED_ADDON_REVIEW) .count(), 0) def get_logs(self, action): return ActivityLog.objects.filter(action=action.id) def test_remove(self): """Make sure the editor tools can delete a review.""" self.setup_actions(reviews.REVIEW_MODERATE_DELETE) logs = self.get_logs(amo.LOG.DELETE_REVIEW) eq_(logs.count(), 1) # Make sure it's removed from the queue. r = self.client.get(self.url) eq_(pq(r.content)('#reviews-flagged .no-results').length, 1) r = self.client.get(reverse('editors.eventlog')) eq_(pq(r.content)('table .more-details').attr('href'), reverse('editors.eventlog.detail', args=[logs[0].id])) # Make sure it was actually deleted. eq_(Review.objects.filter(addon=1865).count(), 1) # But make sure it wasn't *actually* deleted. eq_(Review.unfiltered.filter(addon=1865).count(), 2) def test_remove_fails_for_own_addon(self): """ Make sure the editor tools can't delete a review for an add-on owned by the user. """ a = Addon.objects.get(pk=1865) u = UserProfile.objects.get(email='[email protected]') AddonUser(addon=a, user=u).save() # Make sure the initial count is as expected eq_(Review.objects.filter(addon=1865).count(), 2) self.setup_actions(reviews.REVIEW_MODERATE_DELETE) logs = self.get_logs(amo.LOG.DELETE_REVIEW) eq_(logs.count(), 0) # Make sure it's not removed from the queue. r = self.client.get(self.url) eq_(pq(r.content)('#reviews-flagged .no-results').length, 0) # Make sure it was not actually deleted. eq_(Review.objects.filter(addon=1865).count(), 2) def test_remove_score(self): self.setup_actions(reviews.REVIEW_MODERATE_DELETE) eq_(ReviewerScore.objects.filter(note_key=amo.REVIEWED_ADDON_REVIEW) .count(), 1) def test_keep(self): """Make sure the editor tools can remove flags and keep a review.""" self.setup_actions(reviews.REVIEW_MODERATE_KEEP) logs = self.get_logs(amo.LOG.APPROVE_REVIEW) eq_(logs.count(), 1) # Make sure it's removed from the queue. r = self.client.get(self.url) eq_(pq(r.content)('#reviews-flagged .no-results').length, 1) review = Review.objects.filter(addon=1865) # Make sure it's NOT deleted... eq_(review.count(), 2) # ...but it's no longer flagged. eq_(review.filter(editorreview=1).count(), 0) def test_keep_score(self): self.setup_actions(reviews.REVIEW_MODERATE_KEEP) eq_(ReviewerScore.objects.filter(note_key=amo.REVIEWED_ADDON_REVIEW) .count(), 1) def test_queue_count(self): # `generate_files` happens within this test. self._test_queue_count(4, 'Moderated Review', 1) def test_breadcrumbs(self): self._test_breadcrumbs([('Moderated Reviews', None)]) def test_no_reviews(self): Review.objects.all().delete() r = self.client.get(self.url) eq_(r.status_code, 200) doc = pq(r.content)('#reviews-flagged') eq_(doc('.no-results').length, 1) eq_(doc('.review-saved button').length, 1) # Show only one button. class TestUnlistedPendingQueue(TestPendingQueue): listed = False def setUp(self): super(TestUnlistedPendingQueue, self).setUp() # These should be the only ones present in the queue. self.expected_names = ['Pending One', 'Pending Two'] self.url = reverse('editors.unlisted_queue_pending') def test_breadcrumbs(self): self._test_breadcrumbs([('Unlisted Pending Updates', None)]) def test_queue_count(self): # `generate_files` happens within this test. self._test_queue_count(1, 'Unlisted Pending Updates', 2) class TestUnlistedNominatedQueue(TestNominatedQueue): listed = False def setUp(self): super(TestUnlistedNominatedQueue, self).setUp() # These should be the only ones present. self.expected_names = ['Nominated One', 'Nominated Two'] self.url = reverse('editors.unlisted_queue_nominated') def test_breadcrumbs(self): self._test_breadcrumbs([('Unlisted Full Reviews', None)]) def test_queue_count(self): # `generate_files` happens within this test. self._test_queue_count(0, 'Unlisted Full Reviews', 2) class TestUnlistedPreliminaryQueue(TestPreliminaryQueue): listed = False def setUp(self): super(TestUnlistedPreliminaryQueue, self).setUp() # These should be the only ones present. self.expected_names = ['Prelim One', 'Prelim Two'] self.url = reverse('editors.unlisted_queue_prelim') def test_breadcrumbs(self): self._test_breadcrumbs([('Unlisted Preliminary Reviews', None)]) def test_queue_count(self): # `generate_files` happens within this test. self._test_queue_count(2, 'Unlisted Preliminary Reviews', 2) class TestPerformance(QueueTest): fixtures = ['base/users', 'editors/pending-queue', 'base/addon_3615'] """Test the page at /editors/performance.""" def setUpEditor(self): self.login_as_editor() amo.set_user(UserProfile.objects.get(username='editor')) self.create_logs() def setUpSeniorEditor(self): self.login_as_senior_editor() amo.set_user(UserProfile.objects.get(username='senioreditor')) self.create_logs() def setUpAdmin(self): self.login_as_admin() amo.set_user(UserProfile.objects.get(username='admin')) self.create_logs() def get_url(self, args=[]): return reverse('editors.performance', args=args) def create_logs(self): addon = Addon.objects.all()[0] version = addon.versions.all()[0] for i in amo.LOG_REVIEW_QUEUE: amo.log(amo.LOG_BY_ID[i], addon, version) def _test_chart(self): r = self.client.get(self.get_url()) eq_(r.status_code, 200) doc = pq(r.content) # The ' - 1' is to account for REQUEST_VERSION not being displayed. num = len(amo.LOG_REVIEW_QUEUE) - 1 label = datetime.now().strftime('%Y-%m') data = {label: {u'teamcount': num, u'teamavg': u'%s.0' % num, u'usercount': num, u'teamamt': 1, u'label': datetime.now().strftime('%b %Y')}} eq_(json.loads(doc('#monthly').attr('data-chart')), data) def test_performance_chart_editor(self): self.setUpEditor() self._test_chart() def test_performance_chart_as_senior_editor(self): self.setUpSeniorEditor() self._test_chart() def test_performance_chart_as_admin(self): self.setUpAdmin() self._test_chart() def test_usercount_with_more_than_one_editor(self): self.client.login(username='[email protected]', password='password') amo.set_user(UserProfile.objects.get(username='clouserw')) self.create_logs() self.setUpEditor() r = self.client.get(self.get_url()) eq_(r.status_code, 200) doc = pq(r.content) data = json.loads(doc('#monthly').attr('data-chart')) label = datetime.now().strftime('%Y-%m') eq_(data[label]['usercount'], 18) def _test_performance_other_user_as_admin(self): userid = amo.get_user().pk r = self.client.get(self.get_url([10482])) doc = pq(r.content) eq_(doc('#select_user').length, 1) # Let them choose editors. options = doc('#select_user option') eq_(options.length, 3) eq_(options.eq(2).val(), str(userid)) assert 'clouserw' in doc('#reviews_user').text() def test_performance_other_user_as_admin(self): self.setUpAdmin() self._test_performance_other_user_as_admin() def test_performance_other_user_as_senior_editor(self): self.setUpSeniorEditor() self._test_performance_other_user_as_admin() def test_performance_other_user_not_admin(self): self.setUpEditor() r = self.client.get(self.get_url([10482])) doc = pq(r.content) eq_(doc('#select_user').length, 0) # Don't let them choose editors. eq_(doc('#reviews_user').text(), 'Your Reviews') class SearchTest(EditorTest): def setUp(self): super(SearchTest, self).setUp() self.login_as_editor() def named_addons(self, request): return [r.data.addon_name for r in request.context['page'].object_list] def search(self, *args, **kw): r = self.client.get(self.url, kw) eq_(r.status_code, 200) eq_(r.context['search_form'].errors.as_text(), '') return r class TestQueueSearch(SearchTest): fixtures = ['base/users', 'base/appversion'] def setUp(self): super(TestQueueSearch, self).setUp() self.url = reverse('editors.queue_nominated') def generate_files(self, subset=[]): files = SortedDict([ ('Not Admin Reviewed', { 'version_str': '0.1', 'addon_status': amo.STATUS_NOMINATED, 'file_status': amo.STATUS_UNREVIEWED, }), ('Another Not Admin Reviewed', { 'version_str': '0.1', 'addon_status': amo.STATUS_NOMINATED, 'file_status': amo.STATUS_UNREVIEWED, }), ('Admin Reviewed', { 'version_str': '0.1', 'addon_status': amo.STATUS_NOMINATED, 'file_status': amo.STATUS_UNREVIEWED, 'admin_review': True, }), ('Justin Bieber Theme', { 'version_str': '0.1', 'addon_status': amo.STATUS_NOMINATED, 'file_status': amo.STATUS_UNREVIEWED, 'addon_type': amo.ADDON_THEME, }), ('Justin Bieber Search Bar', { 'version_str': '0.1', 'addon_status': amo.STATUS_NOMINATED, 'file_status': amo.STATUS_UNREVIEWED, 'addon_type': amo.ADDON_SEARCH, }), ('Bieber For Mobile', { 'version_str': '0.1', 'addon_status': amo.STATUS_NOMINATED, 'file_status': amo.STATUS_UNREVIEWED, 'application': amo.MOBILE, }), ('Linux Widget', { 'version_str': '0.1', 'addon_status': amo.STATUS_NOMINATED, 'file_status': amo.STATUS_UNREVIEWED, 'platform': amo.PLATFORM_LINUX, }), ('Mac Widget', { 'version_str': '0.1', 'addon_status': amo.STATUS_NOMINATED, 'file_status': amo.STATUS_UNREVIEWED, 'platform': amo.PLATFORM_MAC, }), ]) results = {} for name, attrs in files.iteritems(): if not subset or name in subset: results[name] = create_addon_file(name, **attrs) return results def generate_file(self, name): return self.generate_files([name])[name] def test_search_by_admin_reviewed_admin(self): self.login_as_admin() self.generate_files(['Not Admin Reviewed', 'Admin Reviewed']) r = self.search(admin_review=1) eq_(self.named_addons(r), ['Admin Reviewed']) def test_queue_counts_admin(self): self.login_as_admin() self.generate_files(['Not Admin Reviewed', 'Admin Reviewed']) r = self.search(text_query='admin', per_page=1) doc = pq(r.content) eq_(doc('.data-grid-top .num-results').text(), u'Results 1 \u2013 1 of 2') def test_search_by_addon_name_admin(self): self.login_as_admin() self.generate_files(['Not Admin Reviewed', 'Admin Reviewed', 'Justin Bieber Theme']) r = self.search(text_query='admin') eq_(sorted(self.named_addons(r)), ['Admin Reviewed', 'Not Admin Reviewed']) def test_not_searching(self): self.generate_files(['Not Admin Reviewed', 'Admin Reviewed']) r = self.search() eq_(sorted(self.named_addons(r)), ['Not Admin Reviewed']) def test_search_by_nothing(self): self.generate_files(['Not Admin Reviewed', 'Admin Reviewed']) r = self.search(searching='True') eq_(sorted(self.named_addons(r)), ['Admin Reviewed', 'Not Admin Reviewed']) def test_search_by_admin_reviewed(self): self.generate_files(['Not Admin Reviewed', 'Admin Reviewed']) r = self.search(admin_review=1, searching='True') eq_(self.named_addons(r), ['Admin Reviewed']) def test_queue_counts(self): self.generate_files(['Not Admin Reviewed', 'Another Not Admin Reviewed', 'Admin Reviewed']) r = self.search(text_query='admin', per_page=1, searching='True') doc = pq(r.content) eq_(doc('.data-grid-top .num-results').text(), u'Results 1 \u2013 1 of 3') def test_search_by_addon_name(self): self.generate_files(['Not Admin Reviewed', 'Admin Reviewed', 'Justin Bieber Theme']) r = self.search(text_query='admin', searching='True') eq_(sorted(self.named_addons(r)), ['Admin Reviewed', 'Not Admin Reviewed']) def test_search_by_addon_in_locale(self): name = 'Not Admin Reviewed' d = self.generate_file(name) uni = 'フォクすけといっしょ'.decode('utf8') a = Addon.objects.get(pk=d['addon'].id) a.name = {'ja': uni} a.save() r = self.client.get('/ja/' + self.url, {'text_query': uni}, follow=True) eq_(r.status_code, 200) eq_(self.named_addons(r), [name]) def test_search_by_addon_author(self): name = 'Not Admin Reviewed' d = self.generate_file(name) u = UserProfile.objects.all()[0] email = u.email.swapcase() author = AddonUser.objects.create(user=u, addon=d['addon']) for role in [amo.AUTHOR_ROLE_OWNER, amo.AUTHOR_ROLE_DEV]: author.role = role author.save() r = self.search(text_query=email) eq_(self.named_addons(r), [name]) author.role = amo.AUTHOR_ROLE_VIEWER author.save() r = self.search(text_query=email) eq_(self.named_addons(r), []) def test_search_by_supported_email_in_locale(self): name = 'Not Admin Reviewed' d = self.generate_file(name) uni = 'フォクすけといっしょ@site.co.jp'.decode('utf8') a = Addon.objects.get(pk=d['addon'].id) a.support_email = {'ja': uni} a.save() r = self.client.get('/ja/' + self.url, {'text_query': uni}, follow=True) eq_(r.status_code, 200) eq_(self.named_addons(r), [name]) def test_search_by_addon_type(self): self.generate_files(['Not Admin Reviewed', 'Justin Bieber Theme', 'Justin Bieber Search Bar']) r = self.search(addon_type_ids=[amo.ADDON_THEME]) eq_(self.named_addons(r), ['Justin Bieber Theme']) def test_search_by_addon_type_any(self): self.generate_file('Not Admin Reviewed') r = self.search(addon_type_ids=[amo.ADDON_ANY]) assert self.named_addons(r), 'Expected some add-ons' def test_search_by_many_addon_types(self): self.generate_files(['Not Admin Reviewed', 'Justin Bieber Theme', 'Justin Bieber Search Bar']) r = self.search(addon_type_ids=[amo.ADDON_THEME, amo.ADDON_SEARCH]) eq_(sorted(self.named_addons(r)), ['Justin Bieber Search Bar', 'Justin Bieber Theme']) def test_search_by_platform_mac(self): self.generate_files(['Bieber For Mobile', 'Linux Widget', 'Mac Widget']) r = self.search(platform_ids=[amo.PLATFORM_MAC.id]) eq_(r.status_code, 200) eq_(self.named_addons(r), ['Mac Widget']) def test_search_by_platform_linux(self): self.generate_files(['Bieber For Mobile', 'Linux Widget', 'Mac Widget']) r = self.search(platform_ids=[amo.PLATFORM_LINUX.id]) eq_(r.status_code, 200) eq_(self.named_addons(r), ['Linux Widget']) def test_search_by_platform_mac_linux(self): self.generate_files(['Bieber For Mobile', 'Linux Widget', 'Mac Widget']) r = self.search(platform_ids=[amo.PLATFORM_MAC.id, amo.PLATFORM_LINUX.id]) eq_(r.status_code, 200) eq_(sorted(self.named_addons(r)), ['Linux Widget', 'Mac Widget']) def test_preserve_multi_platform_files(self): for plat in (amo.PLATFORM_WIN, amo.PLATFORM_MAC): create_addon_file('Multi Platform', '0.1', amo.STATUS_NOMINATED, amo.STATUS_UNREVIEWED, platform=plat) r = self.search(platform_ids=[amo.PLATFORM_WIN.id]) eq_(r.status_code, 200) # Should not say Windows only. td = pq(r.content)('#addon-queue tbody td').eq(5) eq_(td.find('div').attr('title'), 'Firefox') eq_(td.text(), '') def test_preserve_single_platform_files(self): create_addon_file('Windows', '0.1', amo.STATUS_NOMINATED, amo.STATUS_UNREVIEWED, platform=amo.PLATFORM_WIN) r = self.search(platform_ids=[amo.PLATFORM_WIN.id]) doc = pq(r.content) eq_(doc('#addon-queue tbody td').eq(6).find('div').attr('title'), 'Windows') def test_search_by_app(self): self.generate_files(['Bieber For Mobile', 'Linux Widget']) r = self.search(application_id=[amo.MOBILE.id]) eq_(r.status_code, 200) eq_(self.named_addons(r), ['Bieber For Mobile']) def test_preserve_multi_apps(self): self.generate_files(['Bieber For Mobile', 'Linux Widget']) for app in (amo.MOBILE, amo.FIREFOX): create_addon_file('Multi Application', '0.1', amo.STATUS_NOMINATED, amo.STATUS_UNREVIEWED, application=app) r = self.search(application_id=[amo.MOBILE.id]) doc = pq(r.content) td = doc('#addon-queue tr').eq(2).children('td').eq(5) eq_(td.children().length, 2) eq_(td.children('.ed-sprite-firefox').length, 1) eq_(td.children('.ed-sprite-mobile').length, 1) def test_search_by_version_requires_app(self): r = self.client.get(self.url, {'max_version': '3.6'}) eq_(r.status_code, 200) # This is not the most descriptive message but it's # the easiest to show. This missing app scenario is unlikely. eq_(r.context['search_form'].errors.as_text(), '* max_version\n * Select a valid choice. 3.6 is not ' 'one of the available choices.') def test_search_by_app_version(self): d = create_addon_file('Bieber For Mobile 4.0b2pre', '0.1', amo.STATUS_NOMINATED, amo.STATUS_UNREVIEWED, application=amo.MOBILE) max = AppVersion.objects.get(application=amo.MOBILE.id, version='4.0b2pre') (ApplicationsVersions.objects.filter( application=amo.MOBILE.id, version=d['version']).update(max=max)) r = self.search(application_id=amo.MOBILE.id, max_version='4.0b2pre') eq_(self.named_addons(r), [u'Bieber For Mobile 4.0b2pre']) def test_age_of_submission(self): self.generate_files(['Not Admin Reviewed', 'Admin Reviewed', 'Justin Bieber Theme']) Version.objects.update(nomination=datetime.now() - timedelta(days=1)) title = 'Justin Bieber Theme' bieber = Version.objects.filter(addon__name__localized_string=title) # Exclude anything out of range: bieber.update(nomination=datetime.now() - timedelta(days=5)) r = self.search(waiting_time_days=2) addons = self.named_addons(r) assert title not in addons, ('Unexpected results: %r' % addons) # Include anything submitted up to requested days: bieber.update(nomination=datetime.now() - timedelta(days=2)) r = self.search(waiting_time_days=5) addons = self.named_addons(r) assert title in addons, ('Unexpected results: %r' % addons) # Special case: exclude anything under 10 days: bieber.update(nomination=datetime.now() - timedelta(days=8)) r = self.search(waiting_time_days='10+') addons = self.named_addons(r) assert title not in addons, ('Unexpected results: %r' % addons) # Special case: include anything 10 days and over: bieber.update(nomination=datetime.now() - timedelta(days=12)) r = self.search(waiting_time_days='10+') addons = self.named_addons(r) assert title in addons, ('Unexpected results: %r' % addons) def test_form(self): self.generate_file('Bieber For Mobile') r = self.search() doc = pq(r.content) eq_(doc('#id_application_id').attr('data-url'), reverse('editors.application_versions_json')) eq_(doc('#id_max_version option').text(), 'Select an application first') r = self.search(application_id=amo.MOBILE.id) doc = pq(r.content) eq_(doc('#id_max_version option').text(), ' '.join([av.version for av in AppVersion.objects.filter(application=amo.MOBILE.id)])) def test_application_versions_json(self): self.generate_file('Bieber For Mobile') r = self.client.post(reverse('editors.application_versions_json'), {'application_id': amo.MOBILE.id}) eq_(r.status_code, 200) data = json.loads(r.content) eq_(data['choices'], [[av, av] for av in [u''] + [av.version for av in AppVersion.objects.filter(application=amo.MOBILE.id)]]) def test_clear_search_visible(self): r = self.search(text_query='admin', searching=True) eq_(r.status_code, 200) eq_(pq(r.content)('.clear-queue-search').text(), 'clear search') def test_clear_search_hidden(self): r = self.search(text_query='admin') eq_(r.status_code, 200) eq_(pq(r.content)('.clear-queue-search').text(), None) def test_clear_search_uses_correct_queue(self): # The "clear search" link points to the right listed or unlisted queue. # Listed queue. url = reverse('editors.queue_nominated') r = self.client.get(url, {'text_query': 'admin', 'searching': True}) assert pq(r.content)('.clear-queue-search').attr('href') == url # Unlisted queue. Needs the Addons:ReviewUnlisted perm. self.login_as_senior_editor() url = reverse('editors.unlisted_queue_nominated') r = self.client.get(url, {'text_query': 'admin', 'searching': True}) assert pq(r.content)('.clear-queue-search').attr('href') == url class TestQueueSearchVersionSpecific(SearchTest): def setUp(self): super(TestQueueSearchVersionSpecific, self).setUp() self.url = reverse('editors.queue_prelim') create_addon_file('Not Admin Reviewed', '0.1', amo.STATUS_LITE, amo.STATUS_UNREVIEWED) create_addon_file('Justin Bieber Theme', '0.1', amo.STATUS_LITE, amo.STATUS_UNREVIEWED, addon_type=amo.ADDON_THEME) self.bieber = Version.objects.filter( addon__name__localized_string='Justin Bieber Theme') def update_beiber(self, days): new_created = datetime.now() - timedelta(days=days) self.bieber.update(created=new_created, nomination=new_created) self.bieber[0].files.update(created=new_created) def test_age_of_submission(self): Version.objects.update(created=datetime.now() - timedelta(days=1)) # Exclude anything out of range: self.update_beiber(5) r = self.search(waiting_time_days=2) addons = self.named_addons(r) assert 'Justin Bieber Theme' not in addons, ( 'Unexpected results: %r' % addons) # Include anything submitted up to requested days: self.update_beiber(2) r = self.search(waiting_time_days=4) addons = self.named_addons(r) assert 'Justin Bieber Theme' in addons, ( 'Unexpected results: %r' % addons) # Special case: exclude anything under 10 days: self.update_beiber(8) r = self.search(waiting_time_days='10+') addons = self.named_addons(r) assert 'Justin Bieber Theme' not in addons, ( 'Unexpected results: %r' % addons) # Special case: include anything 10 days and over: self.update_beiber(12) r = self.search(waiting_time_days='10+') addons = self.named_addons(r) assert 'Justin Bieber Theme' in addons, ( 'Unexpected results: %r' % addons) class ReviewBase(QueueTest): def setUp(self): super(QueueTest, self).setUp() self.login_as_editor() self.addons = {} self.addon = self.generate_file('Public') self.version = self.addon.current_version self.file = self.version.files.get() self.editor = UserProfile.objects.get(username='editor') self.editor.update(display_name='An editor') self.url = reverse('editors.review', args=[self.addon.slug]) AddonUser.objects.create(addon=self.addon, user_id=999) def get_addon(self): return Addon.objects.get(pk=self.addon.pk) def get_dict(self, **kw): files = [self.version.files.all()[0].pk] d = {'operating_systems': 'win', 'applications': 'something', 'comments': 'something', 'addon_files': files} d.update(kw) return d class TestReview(ReviewBase): def test_reviewer_required(self): eq_(self.client.head(self.url).status_code, 200) def test_not_anonymous(self): self.client.logout() r = self.client.head(self.url) self.assertRedirects( r, '%s?to=%s' % (reverse('users.login'), self.url)) @patch.object(settings, 'ALLOW_SELF_REVIEWS', False) def test_not_author(self): AddonUser.objects.create(addon=self.addon, user=self.editor) eq_(self.client.head(self.url).status_code, 302) def test_needs_unlisted_reviewer_for_unlisted_addons(self): self.addon.update(is_listed=False) assert self.client.head(self.url).status_code == 404 self.login_as_senior_editor() assert self.client.head(self.url).status_code == 200 def test_not_flags(self): response = self.client.get(self.url) eq_(response.status_code, 200) eq_(len(response.context['flags']), 0) def test_flags(self): self.addon.update(admin_review=True) response = self.client.get(self.url) eq_(len(response.context['flags']), 1) def test_info_comments_requested(self): response = self.client.post(self.url, {'action': 'info'}) eq_(response.context['form'].errors['comments'][0], 'This field is required.') def test_comment(self): response = self.client.post(self.url, {'action': 'comment', 'comments': 'hello sailor'}) eq_(response.status_code, 302) eq_(len(mail.outbox), 0) comment_version = amo.LOG.COMMENT_VERSION eq_(ActivityLog.objects.filter(action=comment_version.id).count(), 1) def test_info_requested(self): response = self.client.post(self.url, {'action': 'info', 'comments': 'hello sailor'}) eq_(response.status_code, 302) eq_(len(mail.outbox), 1) self.assertTemplateUsed(response, 'editors/emails/info.ltxt') def test_super_review_requested(self): response = self.client.post(self.url, {'action': 'super', 'comments': 'hello sailor'}) eq_(response.status_code, 302) eq_(len(mail.outbox), 2) self.assertTemplateUsed(response, 'editors/emails/author_super_review.ltxt') self.assertTemplateUsed(response, 'editors/emails/super_review.ltxt') def test_info_requested_canned_response(self): response = self.client.post(self.url, {'action': 'info', 'comments': 'hello sailor', 'canned_response': 'foo'}) eq_(response.status_code, 302) eq_(len(mail.outbox), 1) self.assertTemplateUsed(response, 'editors/emails/info.ltxt') def test_notify(self): response = self.client.post(self.url, {'action': 'info', 'comments': 'hello sailor', 'notify': True}) eq_(response.status_code, 302) eq_(EditorSubscription.objects.count(), 1) def test_no_notify(self): response = self.client.post(self.url, {'action': 'info', 'comments': 'hello sailor'}) eq_(response.status_code, 302) eq_(EditorSubscription.objects.count(), 0) def test_page_title(self): response = self.client.get(self.url) eq_(response.status_code, 200) doc = pq(response.content) eq_(doc('title').text(), '%s :: Editor Tools :: Add-ons for Firefox' % self.addon.name) def test_breadcrumbs(self): self.generate_files() expected = [ ('Pending Updates', reverse('editors.queue_pending')), (unicode(self.addon.name), None), ] self._test_breadcrumbs(expected) def test_breadcrumbs_unlisted_addons(self): self.addon.update(is_listed=False) self.generate_files() self.login_as_admin() expected = [ ('Unlisted Pending Updates', reverse('editors.unlisted_queue_pending')), (unicode(self.addon.name), None), ] self._test_breadcrumbs(expected) def test_files_shown(self): r = self.client.get(self.url) eq_(r.status_code, 200) items = pq(r.content)('#review-files .files .file-info') eq_(items.length, 1) f = self.version.all_files[0] expected = [ ('All Platforms', f.get_url_path('editor')), ('Validation', reverse('devhub.file_validation', args=[self.addon.slug, f.id])), ('Contents', None), ] check_links(expected, items.find('a'), verify=False) def test_item_history(self): self.addon_file(u'something', u'0.2', amo.STATUS_PUBLIC, amo.STATUS_UNREVIEWED) eq_(self.addon.versions.count(), 1) self.review_version(self.version, self.url) v2 = self.addons['something'].versions.all()[0] v2.addon = self.addon v2.created = v2.created + timedelta(days=1) v2.save() self.review_version(v2, self.url) eq_(self.addon.versions.count(), 2) r = self.client.get(self.url) table = pq(r.content)('#review-files') # Check the history for both versions. ths = table.children('tr > th') eq_(ths.length, 2) assert '0.1' in ths.eq(0).text() assert '0.2' in ths.eq(1).text() rows = table('td.files') eq_(rows.length, 2) comments = rows.siblings('td') eq_(comments.length, 2) for idx in xrange(comments.length): td = comments.eq(idx) eq_(td.find('.history-comment').text(), 'something') eq_(td.find('th').text(), 'Preliminarily approved') eq_(td.find('td a').text(), self.editor.display_name) def generate_deleted_versions(self): self.addon = Addon.objects.create(type=amo.ADDON_EXTENSION, name=u'something') self.url = reverse('editors.review', args=[self.addon.slug]) versions = ({'version': '0.1', 'action': 'comment', 'comments': 'millenium hand and shrimp'}, {'version': '0.1', 'action': 'prelim', 'comments': 'buggrit'}, {'version': '0.2', 'action': 'comment', 'comments': 'I told em'}, {'version': '0.3'}) for i, version in enumerate(versions): a = create_addon_file(self.addon.name, version['version'], amo.STATUS_PUBLIC, amo.STATUS_UNREVIEWED) v = a['version'] v.update(created=v.created + timedelta(days=i)) if 'action' in version: d = dict(action=version['action'], operating_systems='win', applications='something', comments=version['comments'], addon_files=[v.files.all()[0].pk]) self.client.post(self.url, d) v.delete() @patch('editors.helpers.sign_file') def test_item_history_deleted(self, mock_sign): self.generate_deleted_versions() r = self.client.get(self.url) table = pq(r.content)('#review-files') # Check the history for all versions. ths = table.children('tr > th') eq_(ths.length, 3) # The two with the same number will be coalesced eq_('0.1' in ths.eq(0).text(), True) eq_('0.2' in ths.eq(1).text(), True) eq_('0.3' in ths.eq(2).text(), True) for idx in xrange(2): eq_('Deleted' in ths.eq(idx).text(), True) bodies = table.children('.listing-body') eq_('millenium hand and shrimp' in bodies.eq(0).text(), True) eq_('buggrit' in bodies.eq(0).text(), True) eq_('I told em' in bodies.eq(1).text(), True) assert mock_sign.called def test_item_history_compat_ordered(self): """ Make sure that apps in compatibility are ordered. """ self.addon_file(u'something', u'0.2', amo.STATUS_PUBLIC, amo.STATUS_UNREVIEWED) av = AppVersion.objects.all()[0] v = self.addon.versions.all()[0] ApplicationsVersions.objects.create( version=v, application=amo.THUNDERBIRD.id, min=av, max=av) ApplicationsVersions.objects.create( version=v, application=amo.SEAMONKEY.id, min=av, max=av) eq_(self.addon.versions.count(), 1) url = reverse('editors.review', args=[self.addon.slug]) doc = pq(self.client.get(url).content) icons = doc('.listing-body .app-icon') eq_(icons.eq(0).attr('title'), "Firefox") eq_(icons.eq(1).attr('title'), "SeaMonkey") eq_(icons.eq(2).attr('title'), "Thunderbird") def test_item_history_notes(self): v = self.addon.versions.all()[0] v.releasenotes = 'hi' v.approvalnotes = 'secret hi' v.save() r = self.client.get(self.url) doc = pq(r.content)('#review-files') version = doc('.activity_version') eq_(version.length, 1) eq_(version.text(), 'hi') approval = doc('.activity_approval') eq_(approval.length, 1) eq_(approval.text(), 'secret hi') def test_item_history_header(self): doc = pq(self.client.get(self.url).content) assert ('Preliminarily Reviewed' in doc('#review-files .listing-header .light').text()) def test_item_history_comment(self): # Add Comment. self.addon_file(u'something', u'0.1', amo.STATUS_PUBLIC, amo.STATUS_UNREVIEWED) self.client.post(self.url, {'action': 'comment', 'comments': 'hello sailor'}) r = self.client.get(self.url) doc = pq(r.content)('#review-files') eq_(doc('th').eq(1).text(), 'Comment') eq_(doc('.history-comment').text(), 'hello sailor') @patch('editors.helpers.sign_file') def test_files_in_item_history(self, mock_sign): data = {'action': 'public', 'operating_systems': 'win', 'applications': 'something', 'comments': 'something', 'addon_files': [self.version.files.all()[0].pk]} self.client.post(self.url, data) r = self.client.get(self.url) items = pq(r.content)('#review-files .files .file-info') eq_(items.length, 1) eq_(items.find('a.editors-install').text(), 'All Platforms') assert mock_sign.called def test_no_items(self): r = self.client.get(self.url) eq_(pq(r.content)('#review-files .no-activity').length, 1) def test_hide_beta(self): version = self.addon.latest_version f = version.files.all()[0] version.pk = None version.version = '0.3beta' version.save() doc = pq(self.client.get(self.url).content) eq_(doc('#review-files tr.listing-header').length, 2) f.pk = None f.status = amo.STATUS_BETA f.version = version f.save() doc = pq(self.client.get(self.url).content) eq_(doc('#review-files tr.listing-header').length, 1) def test_action_links(self): r = self.client.get(self.url) expected = [ ('View Listing', self.addon.get_url_path()), ] check_links(expected, pq(r.content)('#actions-addon a'), verify=False) def test_action_links_as_admin(self): self.login_as_admin() r = self.client.get(self.url) expected = [ ('View Listing', self.addon.get_url_path()), ('Edit', self.addon.get_dev_url()), ('Admin Page', reverse('zadmin.addon_manage', args=[self.addon.id])), ] check_links(expected, pq(r.content)('#actions-addon a'), verify=False) def test_unlisted_addon_action_links_as_admin(self): """No "View Listing" link for unlisted addons, "edit"/"manage" links for the admins.""" self.addon.update(is_listed=False) self.login_as_admin() r = self.client.get(self.url) expected = [ ('Edit', self.addon.get_dev_url()), ('Admin Page', reverse('zadmin.addon_manage', args=[self.addon.id])), ] check_links(expected, pq(r.content)('#actions-addon a'), verify=False) def test_admin_links_as_non_admin(self): self.login_as_editor() response = self.client.get(self.url) doc = pq(response.content) admin = doc('#actions-addon li') eq_(admin.length, 1) def test_unflag_option_forflagged_as_admin(self): self.login_as_admin() self.addon.update(admin_review=True) response = self.client.get(self.url) doc = pq(response.content) eq_(doc('#id_adminflag').length, 1) def test_unflag_option_forflagged_as_editor(self): self.login_as_editor() self.addon.update(admin_review=True) response = self.client.get(self.url) doc = pq(response.content) eq_(doc('#id_adminflag').length, 0) def test_unflag_option_notflagged_as_admin(self): self.login_as_admin() self.addon.update(admin_review=False) response = self.client.get(self.url) doc = pq(response.content) eq_(doc('#id_adminflag').length, 0) def test_unadmin_flag_as_admin(self): self.addon.update(admin_review=True) self.login_as_admin() response = self.client.post(self.url, {'action': 'info', 'comments': 'hello sailor', 'adminflag': True}) eq_(response.status_code, 302, "Review should be processed as normal and redirect") self.assertRedirects(response, reverse('editors.queue_pending'), status_code=302) eq_(Addon.objects.get(pk=self.addon.pk).admin_review, False, "Admin flag should still be removed if admin") def test_unadmin_flag_as_editor(self): self.addon.update(admin_review=True) self.login_as_editor() response = self.client.post(self.url, {'action': 'info', 'comments': 'hello sailor', 'adminflag': True}) eq_(response.status_code, 302, "Review should be processed as normal and redirect") # Should silently fail to set adminflag but work otherwise. self.assertRedirects(response, reverse('editors.queue_pending'), status_code=302) eq_(Addon.objects.get(pk=self.addon.pk).admin_review, True, "Admin flag should still be in place if editor") def test_no_public(self): s = amo.STATUS_PUBLIC has_public = self.version.files.filter(status=s).exists() assert not has_public for version_file in self.version.files.all(): version_file.status = amo.STATUS_PUBLIC version_file.save() has_public = self.version.files.filter(status=s).exists() assert has_public response = self.client.get(self.url) validation = pq(response.content).find('.files') eq_(validation.find('a').eq(1).text(), "Validation") eq_(validation.find('a').eq(2).text(), "Contents") eq_(validation.find('a').length, 3) def test_public_search(self): self.version.files.update(status=amo.STATUS_PUBLIC) self.addon.update(type=amo.ADDON_SEARCH) r = self.client.get(self.url) eq_(pq(r.content)('#review-files .files ul .file-info').length, 1) def test_version_deletion(self): """ Make sure that we still show review history for deleted versions. """ # Add a new version to the add-on. self.addon_file(u'something', u'0.2', amo.STATUS_PUBLIC, amo.STATUS_UNREVIEWED) eq_(self.addon.versions.count(), 1) self.review_version(self.version, self.url) v2 = self.addons['something'].versions.all()[0] v2.addon = self.addon v2.created = v2.created + timedelta(days=1) v2.save() self.review_version(v2, self.url) eq_(self.addon.versions.count(), 2) r = self.client.get(self.url) doc = pq(r.content) # View the history verify two versions: ths = doc('table#review-files > tr > th:first-child') assert '0.1' in ths.eq(0).text() assert '0.2' in ths.eq(1).text() # Delete a version: v2.delete() # Verify two versions, one deleted: r = self.client.get(self.url) doc = pq(r.content) ths = doc('table#review-files > tr > th:first-child') eq_(ths.length, 2) assert '0.1' in ths.text() @patch('editors.helpers.sign_file') def review_version(self, version, url, mock_sign): version.files.all()[0].update(status=amo.STATUS_UNREVIEWED) d = dict(action='prelim', operating_systems='win', applications='something', comments='something', addon_files=[version.files.all()[0].pk]) self.client.post(url, d) assert mock_sign.called def test_dependencies_listed(self): AddonDependency.objects.create(addon=self.addon, dependent_addon=self.addon) r = self.client.get(self.url) deps = pq(r.content)('#addon-summary .addon-dependencies') eq_(deps.length, 1) eq_(deps.find('li').length, 1) eq_(deps.find('a').attr('href'), self.addon.get_url_path()) def test_eula_displayed(self): eq_(bool(self.addon.has_eula), False) r = self.client.get(self.url) eq_(r.status_code, 200) self.assertNotContains(r, 'View End-User License Agreement') self.addon.eula = 'Test!' self.addon.save() eq_(bool(self.addon.has_eula), True) r = self.client.get(self.url) eq_(r.status_code, 200) self.assertContains(r, 'View End-User License Agreement') def test_privacy_policy_displayed(self): eq_(self.addon.privacy_policy, None) r = self.client.get(self.url) eq_(r.status_code, 200) self.assertNotContains(r, 'View Privacy Policy') self.addon.privacy_policy = 'Test!' self.addon.save() r = self.client.get(self.url) eq_(r.status_code, 200) self.assertContains(r, 'View Privacy Policy') def test_breadcrumbs_all(self): queues = {'Full Reviews': [amo.STATUS_NOMINATED, amo.STATUS_LITE_AND_NOMINATED], 'Preliminary Reviews': [amo.STATUS_UNREVIEWED, amo.STATUS_LITE], 'Pending Updates': [amo.STATUS_PENDING, amo.STATUS_PUBLIC]} for text, queue_ids in queues.items(): for qid in queue_ids: self.addon.update(status=qid) doc = pq(self.client.get(self.url).content) eq_(doc('#breadcrumbs li:eq(1)').text(), text) def test_viewing(self): url = reverse('editors.review_viewing') r = self.client.post(url, {'addon_id': self.addon.id}) data = json.loads(r.content) eq_(data['current'], self.editor.id) eq_(data['current_name'], self.editor.name) eq_(data['is_user'], 1) # Now, login as someone else and test. self.login_as_admin() r = self.client.post(url, {'addon_id': self.addon.id}) data = json.loads(r.content) eq_(data['current'], self.editor.id) eq_(data['current_name'], self.editor.name) eq_(data['is_user'], 0) def test_viewing_queue(self): r = self.client.post(reverse('editors.review_viewing'), {'addon_id': self.addon.id}) data = json.loads(r.content) eq_(data['current'], self.editor.id) eq_(data['current_name'], self.editor.name) eq_(data['is_user'], 1) # Now, login as someone else and test. self.login_as_admin() r = self.client.post(reverse('editors.queue_viewing'), {'addon_ids': self.addon.id}) data = json.loads(r.content) eq_(data[str(self.addon.id)], self.editor.display_name) def test_display_same_files_only_once(self): """ Test whether identical files for different platforms show up as one link with the appropriate text. """ version = Version.objects.create(addon=self.addon, version='0.2') version.created = datetime.today() + timedelta(days=1) version.save() for plat in (amo.PLATFORM_WIN, amo.PLATFORM_MAC): File.objects.create(platform=plat.id, version=version, status=amo.STATUS_PUBLIC) self.addon.update(_current_version=version) r = self.client.get(self.url) text = pq(r.content)('.editors-install').eq(1).text() assert text == "Windows / Mac OS X" def test_no_compare_link(self): r = self.client.get(self.url) eq_(r.status_code, 200) info = pq(r.content)('#review-files .file-info') eq_(info.length, 1) eq_(info.find('a.compare').length, 0) def test_compare_link(self): version = Version.objects.create(addon=self.addon, version='0.2') version.created = datetime.today() + timedelta(days=1) version.save() f1 = self.addon.versions.order_by('created')[0].files.all()[0] f1.status = amo.STATUS_PUBLIC f1.save() f2 = File.objects.create(version=version, status=amo.STATUS_PUBLIC) self.addon.update(_current_version=version) eq_(self.addon.current_version, version) r = self.client.get(self.url) assert r.context['show_diff'] links = pq(r.content)('#review-files .file-info .compare') expected = [ reverse('files.compare', args=[f2.pk, f1.pk]), ] check_links(expected, links, verify=False) def test_download_sources_link(self): version = self.addon._latest_version tdir = temp.gettempdir() source_file = temp.NamedTemporaryFile(suffix='.zip', dir=tdir) source_file.write('a' * (2 ** 21)) source_file.seek(0) version.source = DjangoFile(source_file) version.save() url = reverse('editors.review', args=[self.addon.pk]) # Admin reviewer: able to download sources. user = UserProfile.objects.get(email='[email protected]') self.client.login(username=user.email, password='password') response = self.client.get(url, follow=True) assert 'Download files' in response.content # Standard reviewer: should know that sources were provided. user = UserProfile.objects.get(email='[email protected]') self.client.login(username=user.email, password='password') response = self.client.get(url, follow=True) assert 'The developer has provided source code.' in response.content @patch('editors.helpers.sign_file') def test_admin_flagged_addon_actions_as_admin(self, mock_sign_file): self.addon.update(admin_review=True, status=amo.STATUS_NOMINATED) self.login_as_admin() response = self.client.post(self.url, self.get_dict(action='public'), follow=True) eq_(response.status_code, 200) eq_(self.get_addon().status, amo.STATUS_PUBLIC) assert mock_sign_file.called def test_admin_flagged_addon_actions_as_editor(self): self.addon.update(admin_review=True, status=amo.STATUS_NOMINATED) self.version.files.update(status=amo.STATUS_UNREVIEWED) self.login_as_editor() response = self.client.post(self.url, self.get_dict(action='public')) eq_(response.status_code, 200) # Form error. # The add-on status must not change as non-admin editors are not # allowed to review admin-flagged add-ons. eq_(self.get_addon().status, amo.STATUS_NOMINATED) eq_(response.context['form'].errors['action'], [u'Select a valid choice. public is not one of the available ' u'choices.']) def test_user_changes_log(self): # Activity logs related to user changes should be displayed. # Create an activy log for each of the following: user addition, role # change and deletion. author = self.addon.addonuser_set.get() from amo import set_user set_user(author.user) amo.log(amo.LOG.ADD_USER_WITH_ROLE, author.user, author.get_role_display(), self.addon) amo.log(amo.LOG.CHANGE_USER_WITH_ROLE, author.user, author.get_role_display(), self.addon) amo.log(amo.LOG.REMOVE_USER_WITH_ROLE, author.user, author.get_role_display(), self.addon) response = self.client.get(self.url) assert 'user_changes' in response.context user_changes_log = response.context['user_changes'] actions = [log.activity_log.action for log in user_changes_log] assert actions == [ amo.LOG.ADD_USER_WITH_ROLE.id, amo.LOG.CHANGE_USER_WITH_ROLE.id, amo.LOG.REMOVE_USER_WITH_ROLE.id] # Make sure the logs are displayed in the page. doc = pq(response.content) user_changes = doc('#user-changes li') assert len(user_changes) == 3 assert '(Owner) added to ' in user_changes[0].text assert 'role changed to Owner for ' in user_changes[1].text assert '(Owner) removed from ' in user_changes[2].text @override_settings(CELERY_ALWAYS_EAGER=True) @mock.patch('devhub.tasks.validate') def test_validation_not_run_eagerly(self, validate): """Tests that validation is not run in eager mode.""" assert not self.file.has_been_validated self.client.get(self.url) assert not validate.called @override_settings(CELERY_ALWAYS_EAGER=False) @mock.patch('devhub.tasks.validate') def test_validation_run(self, validate): """Tests that validation is run if necessary.""" assert not self.file.has_been_validated self.client.get(self.url) validate.assert_called_once_with(self.file) @override_settings(CELERY_ALWAYS_EAGER=False) @mock.patch('devhub.tasks.validate') def test_validation_not_run_again(self, validate): """Tests that validation is not run for files which have cached results.""" FileValidation.objects.create(file=self.file, validation=json.dumps( amo.VALIDATOR_SKELETON_RESULTS)) self.client.get(self.url) assert not validate.called class TestReviewPreliminary(ReviewBase): def prelim_dict(self): return self.get_dict(action='prelim') def test_prelim_comments_requested(self): response = self.client.post(self.url, {'action': 'prelim'}) eq_(response.context['form'].errors['comments'][0], 'This field is required.') @patch('editors.helpers.sign_file') def test_prelim_from_lite(self, mock_sign): self.addon.update(status=amo.STATUS_LITE) self.version.files.all()[0].update(status=amo.STATUS_UNREVIEWED) response = self.client.post(self.url, self.prelim_dict()) eq_(response.status_code, 302) eq_(self.get_addon().status, amo.STATUS_LITE) assert mock_sign.called def test_prelim_from_lite_required(self): self.addon.update(status=amo.STATUS_LITE) response = self.client.post(self.url, {'action': 'prelim'}) eq_(response.context['form'].errors['comments'][0], 'This field is required.') def test_prelim_from_lite_no_files(self): self.addon.update(status=amo.STATUS_LITE) data = self.prelim_dict() del data['addon_files'] response = self.client.post(self.url, data) eq_(response.context['form'].errors['addon_files'][0], 'You must select some files.') def test_prelim_from_lite_wrong(self): self.addon.update(status=amo.STATUS_LITE) response = self.client.post(self.url, self.prelim_dict()) eq_(response.context['form'].errors['addon_files'][0], 'File Public.xpi is not pending review.') def test_prelim_from_lite_wrong_two(self): self.addon.update(status=amo.STATUS_LITE) data = self.prelim_dict() f = self.version.files.all()[0] statuses = dict(File.STATUS_CHOICES) # Shallow copy. del statuses[amo.STATUS_BETA], statuses[amo.STATUS_UNREVIEWED] for status in statuses: f.update(status=status) response = self.client.post(self.url, data) eq_(response.context['form'].errors['addon_files'][0], 'File Public.xpi is not pending review.') def test_prelim_from_lite_files(self): self.addon.update(status=amo.STATUS_LITE) self.client.post(self.url, self.prelim_dict()) eq_(self.get_addon().status, amo.STATUS_LITE) @patch('editors.helpers.sign_file') def test_prelim_from_unreviewed(self, mock_sign): self.addon.update(status=amo.STATUS_UNREVIEWED) response = self.client.post(self.url, self.prelim_dict()) eq_(response.status_code, 302) eq_(self.get_addon().status, amo.STATUS_LITE) assert mock_sign.called def test_prelim_multiple_files(self): file_ = self.version.files.all()[0] file_.pk = None file_.status = amo.STATUS_DISABLED file_.save() self.addon.update(status=amo.STATUS_LITE) data = self.prelim_dict() data['addon_files'] = [file_.pk] self.client.post(self.url, data) eq_([amo.STATUS_DISABLED, amo.STATUS_LITE], [f.status for f in self.version.files.all().order_by('status')]) class TestReviewPending(ReviewBase): def setUp(self): super(TestReviewPending, self).setUp() self.file = File.objects.create(version=self.version, status=amo.STATUS_UNREVIEWED) self.addon.update(status=amo.STATUS_PUBLIC) def pending_dict(self): files = list(self.version.files.values_list('id', flat=True)) return self.get_dict(action='public', addon_files=files) @patch('editors.helpers.sign_file') def test_pending_to_public(self, mock_sign): statuses = (self.version.files.values_list('status', flat=True) .order_by('status')) eq_(list(statuses), [amo.STATUS_UNREVIEWED, amo.STATUS_LITE]) r = self.client.post(self.url, self.pending_dict()) eq_(self.get_addon().status, amo.STATUS_PUBLIC) self.assertRedirects(r, reverse('editors.queue_pending')) statuses = (self.version.files.values_list('status', flat=True) .order_by('status')) eq_(list(statuses), [amo.STATUS_PUBLIC] * 2) assert mock_sign.called @patch('editors.helpers.sign_file') def test_pending_to_public_unlisted_addon(self, mock_sign): self.addon.update(is_listed=False) statuses = (self.version.files.values_list('status', flat=True) .order_by('status')) assert list(statuses) == [amo.STATUS_UNREVIEWED, amo.STATUS_LITE] self.login_as_admin() response = self.client.post(self.url, self.pending_dict()) assert self.addon.reload().status == amo.STATUS_PUBLIC self.assertRedirects(response, reverse('editors.unlisted_queue_pending')) statuses = (self.version.files.values_list('status', flat=True) .order_by('status')) assert list(statuses) == [amo.STATUS_PUBLIC] * 2 assert mock_sign.called def test_disabled_file(self): obj = File.objects.create(version=self.version, status=amo.STATUS_DISABLED) response = self.client.get(self.url, self.pending_dict()) doc = pq(response.content) assert 'disabled' in doc('#file-%s' % obj.pk)[0].keys() assert 'disabled' not in doc('#file-%s' % self.file.pk)[0].keys() class TestEditorMOTD(EditorTest): def get_url(self, save=False): return reverse('editors.%smotd' % ('save_' if save else '')) def test_change_motd(self): self.login_as_admin() motd = "Let's get crazy" r = self.client.post(self.get_url(save=True), {'motd': motd}) url = self.get_url() self.assertRedirects(r, url) r = self.client.get(url) eq_(pq(r.content)('.daily-message p').text(), motd) def test_require_editor_to_view(self): url = self.get_url() r = self.client.head(url) self.assertRedirects(r, '%s?to=%s' % (reverse('users.login'), url)) def test_require_admin_to_change_motd(self): self.login_as_editor() r = self.client.post(reverse('editors.save_motd'), {'motd': "I'm a sneaky editor"}) eq_(r.status_code, 403) def test_editor_can_view_not_edit(self): motd = 'Some announcement' set_config('editors_review_motd', motd) self.login_as_editor() r = self.client.get(self.get_url()) eq_(pq(r.content)('.daily-message p').text(), motd) eq_(r.context['form'], None) def test_motd_edit_group(self): user = UserProfile.objects.get(email='[email protected]') group = Group.objects.create(name='Add-on Reviewer MOTD', rules='AddonReviewerMOTD:Edit') GroupUser.objects.create(user=user, group=group) self.login_as_editor() r = self.client.post(reverse('editors.save_motd'), {'motd': 'I am the keymaster.'}) eq_(r.status_code, 302) eq_(get_config('editors_review_motd'), 'I am the keymaster.') def test_form_errors(self): self.login_as_admin() r = self.client.post(self.get_url(save=True)) doc = pq(r.content) eq_(doc('#editor-motd .errorlist').text(), 'This field is required.') class TestStatusFile(ReviewBase): def get_file(self): return self.version.files.all()[0] def check_status(self, expected): r = self.client.get(self.url) eq_(pq(r.content)('#review-files .file-info div').text(), expected) def test_status_prelim(self): for status in [amo.STATUS_UNREVIEWED, amo.STATUS_LITE]: self.addon.update(status=status) self.check_status('Pending Preliminary Review') def test_status_full(self): for status in [amo.STATUS_NOMINATED, amo.STATUS_LITE_AND_NOMINATED, amo.STATUS_PUBLIC]: self.addon.update(status=status) self.check_status('Pending Full Review') def test_status_full_reviewed(self): self.get_file().update(status=amo.STATUS_PUBLIC) for status in set(amo.UNDER_REVIEW_STATUSES + amo.LITE_STATUSES): self.addon.update(status=status) self.check_status('Fully Reviewed') def test_other(self): self.addon.update(status=amo.STATUS_BETA) self.check_status(unicode(File.STATUS_CHOICES[self.get_file().status])) class TestWhiteboard(ReviewBase): def test_whiteboard_addition(self): whiteboard_info = u'Whiteboard info.' url = reverse('editors.whiteboard', args=[self.addon.slug]) response = self.client.post(url, {'whiteboard': whiteboard_info}) assert response.status_code == 302 assert self.get_addon().whiteboard == whiteboard_info @patch('addons.decorators.owner_or_unlisted_reviewer', lambda r, a: True) def test_whiteboard_addition_unlisted_addon(self): self.addon.update(is_listed=False) whiteboard_info = u'Whiteboard info.' url = reverse('editors.whiteboard', args=[self.addon.slug]) response = self.client.post(url, {'whiteboard': whiteboard_info}) assert response.status_code == 302 assert self.addon.reload().whiteboard == whiteboard_info class TestAbuseReports(amo.tests.TestCase): fixtures = ['base/users', 'base/addon_3615'] def setUp(self): user = UserProfile.objects.all()[0] AbuseReport.objects.create(addon_id=3615, message='woo') AbuseReport.objects.create(addon_id=3615, message='yeah', reporter=user) # Make a user abuse report to make sure it doesn't show up. AbuseReport.objects.create(user=user, message='hey now') def test_abuse_reports_list(self): assert self.client.login(username='[email protected]', password='password') r = self.client.get(reverse('editors.abuse_reports', args=['a3615'])) eq_(r.status_code, 200) # We see the two abuse reports created in setUp. eq_(len(r.context['reports']), 2) def test_no_abuse_reports_link_for_unlisted_addons(self): """Unlisted addons aren't public, and thus have no abuse reports.""" addon = Addon.objects.get(pk=3615) addon.update(is_listed=False) self.client.login(username='[email protected]', password='password') response = reverse('editors.review', args=[addon.slug]) abuse_report_url = reverse('editors.abuse_reports', args=['a3615']) assert abuse_report_url not in response class TestLeaderboard(EditorTest): fixtures = ['base/users'] def setUp(self): super(TestLeaderboard, self).setUp() self.url = reverse('editors.leaderboard') self.user = UserProfile.objects.get(email='[email protected]') self.login_as_editor() amo.set_user(self.user) def _award_points(self, user, score): ReviewerScore.objects.create(user=user, note_key=amo.REVIEWED_MANUAL, score=score, note='Thing.') def test_leaderboard_ranks(self): users = (self.user, UserProfile.objects.get(email='[email protected]'), UserProfile.objects.get(email='[email protected]')) self._award_points(users[0], amo.REVIEWED_LEVELS[0]['points'] - 1) self._award_points(users[1], amo.REVIEWED_LEVELS[0]['points'] + 1) self._award_points(users[2], amo.REVIEWED_LEVELS[0]['points'] + 2) def get_cells(): doc = pq(self.client.get(self.url).content.decode('utf-8')) cells = doc('#leaderboard > tbody > tr > .name, ' '#leaderboard > tbody > tr > .level') return [cells.eq(i).text() for i in range(0, cells.length)] eq_(get_cells(), [users[2].display_name, users[1].display_name, amo.REVIEWED_LEVELS[0]['name'], users[0].display_name]) self._award_points(users[0], 1) eq_(get_cells(), [users[2].display_name, users[1].display_name, users[0].display_name, amo.REVIEWED_LEVELS[0]['name']]) self._award_points(users[0], -1) self._award_points(users[2], (amo.REVIEWED_LEVELS[1]['points'] - amo.REVIEWED_LEVELS[0]['points'])) eq_(get_cells(), [users[2].display_name, amo.REVIEWED_LEVELS[1]['name'], users[1].display_name, amo.REVIEWED_LEVELS[0]['name'], users[0].display_name]) class TestXssOnAddonName(amo.tests.TestXss): def test_editors_abuse_report_page(self): url = reverse('editors.abuse_reports', args=[self.addon.slug]) self.assertNameAndNoXSS(url) def test_editors_review_page(self): url = reverse('editors.review', args=[self.addon.slug]) self.assertNameAndNoXSS(url)
{ "content_hash": "46cc27830efa00735d5ebdc51eb9b7b9", "timestamp": "", "source": "github", "line_count": 2863, "max_line_length": 79, "avg_line_length": 38.24449877750611, "alnum_prop": 0.5823058797742342, "repo_name": "mrrrgn/olympia", "id": "9413cd9bd9a6415ddeaf0d8c171e331d5481100c", "size": "109558", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "apps/editors/tests/test_views.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "ApacheConf", "bytes": "249" }, { "name": "C", "bytes": "4145" }, { "name": "CSS", "bytes": "656259" }, { "name": "HTML", "bytes": "1631808" }, { "name": "JavaScript", "bytes": "1289210" }, { "name": "Makefile", "bytes": "3945" }, { "name": "PLSQL", "bytes": "74" }, { "name": "Python", "bytes": "3901513" }, { "name": "Shell", "bytes": "10332" }, { "name": "Smarty", "bytes": "2229" } ], "symlink_target": "" }
from .constants import FIFF from .tag import find_tag, has_tag from .write import (write_int, start_block, end_block, write_float_matrix, write_name_list) from ..utils import logger, verbose def _transpose_named_matrix(mat): """Transpose mat inplace (no copy)""" mat['nrow'], mat['ncol'] = mat['ncol'], mat['nrow'] mat['row_names'], mat['col_names'] = mat['col_names'], mat['row_names'] mat['data'] = mat['data'].T @verbose def _read_named_matrix(fid, node, matkind, indent=' ', transpose=False, verbose=None): """Read named matrix from the given node Parameters ---------- fid : file The opened file descriptor. node : dict The node in the tree. matkind : int The type of matrix. transpose : bool If True, transpose the matrix. Default is False. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- mat: dict The matrix data """ # Descend one level if necessary if node['block'] != FIFF.FIFFB_MNE_NAMED_MATRIX: for k in range(node['nchild']): if node['children'][k]['block'] == FIFF.FIFFB_MNE_NAMED_MATRIX: if has_tag(node['children'][k], matkind): node = node['children'][k] break else: logger.info(indent + 'Desired named matrix (kind = %d) not ' 'available' % matkind) return None else: if not has_tag(node, matkind): logger.info(indent + 'Desired named matrix (kind = %d) not ' 'available' % matkind) return None # Read everything we need tag = find_tag(fid, node, matkind) if tag is None: raise ValueError('Matrix data missing') else: data = tag.data nrow, ncol = data.shape tag = find_tag(fid, node, FIFF.FIFF_MNE_NROW) if tag is not None and tag.data != nrow: raise ValueError('Number of rows in matrix data and FIFF_MNE_NROW ' 'tag do not match') tag = find_tag(fid, node, FIFF.FIFF_MNE_NCOL) if tag is not None and tag.data != ncol: raise ValueError('Number of columns in matrix data and ' 'FIFF_MNE_NCOL tag do not match') tag = find_tag(fid, node, FIFF.FIFF_MNE_ROW_NAMES) row_names = tag.data.split(':') if tag is not None else [] tag = find_tag(fid, node, FIFF.FIFF_MNE_COL_NAMES) col_names = tag.data.split(':') if tag is not None else [] mat = dict(nrow=nrow, ncol=ncol, row_names=row_names, col_names=col_names, data=data) if transpose: _transpose_named_matrix(mat) return mat def write_named_matrix(fid, kind, mat): """Write named matrix from the given node Parameters ---------- fid : file The opened file descriptor. kind : int The kind of the matrix. matkind : int The type of matrix. """ # let's save ourselves from disaster n_tot = mat['nrow'] * mat['ncol'] if mat['data'].size != n_tot: ratio = n_tot / float(mat['data'].size) if n_tot < mat['data'].size and ratio > 0: ratio = 1 / ratio raise ValueError('Cannot write matrix: row (%i) and column (%i) ' 'total element (%i) mismatch with data size (%i), ' 'appears to be off by a factor of %gx' % (mat['nrow'], mat['ncol'], n_tot, mat['data'].size, ratio)) start_block(fid, FIFF.FIFFB_MNE_NAMED_MATRIX) write_int(fid, FIFF.FIFF_MNE_NROW, mat['nrow']) write_int(fid, FIFF.FIFF_MNE_NCOL, mat['ncol']) if len(mat['row_names']) > 0: # let's prevent unintentional stupidity if len(mat['row_names']) != mat['nrow']: raise ValueError('len(mat["row_names"]) != mat["nrow"]') write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, mat['row_names']) if len(mat['col_names']) > 0: # let's prevent unintentional stupidity if len(mat['col_names']) != mat['ncol']: raise ValueError('len(mat["col_names"]) != mat["ncol"]') write_name_list(fid, FIFF.FIFF_MNE_COL_NAMES, mat['col_names']) write_float_matrix(fid, kind, mat['data']) end_block(fid, FIFF.FIFFB_MNE_NAMED_MATRIX)
{ "content_hash": "0e218f8c00a77a133656c52a57677ca9", "timestamp": "", "source": "github", "line_count": 126, "max_line_length": 78, "avg_line_length": 35.20634920634921, "alnum_prop": 0.5606402164111812, "repo_name": "alexandrebarachant/mne-python", "id": "e636a65975786aaf56b3a8f8bbbf61cebabd5a2f", "size": "4591", "binary": false, "copies": "7", "ref": "refs/heads/master", "path": "mne/io/matrix.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Csound Document", "bytes": "69806" }, { "name": "Makefile", "bytes": "3679" }, { "name": "Python", "bytes": "5469295" }, { "name": "Shell", "bytes": "936" } ], "symlink_target": "" }
class Foo(object): @property def value(self): return 'foo' class Bar(object): def __init__(self, foo): self._foo = foo @property def value(self): return self._foo.value + 'bar' class Baz(object): @property def value(self): return 'baz' class Qux(object): def __init__(self, foo, bar, baz): self._foo = foo self._bar = bar self._baz = baz @property def value(self): return self._foo.value + self._bar.value + self._baz.value + 'qux' class Spam(object): def __init__(self, ham=None, eggs=None): self._ham = ham self._eggs = eggs @property def ham(self): return self._ham def set_ham(self, ham): self._ham = ham @property def eggs(self): return self._eggs def set_eggs(self, eggs=None): self._eggs = eggs class Factory(object): @classmethod def get_foo(cls): return Foo() @classmethod def get_spam(cls, ham, eggs): return Spam(ham, eggs) @classmethod def get_more_spam(cls, ham='MAH', eggs='sgge'): return Spam(ham, eggs) class Wibble(object): value = 'wobble' def __init__(self): self.value = 'webble' class Wobble(object): def __init__(self, foo=None, bar=None, baz=None, spam=None): self._foo = foo self._bar = bar self._baz = baz self._spam = spam @property def foo(self): return self._foo @property def bar(self): return self._bar @property def baz(self): return self._baz @property def spam(self): return self._spam class Weeble(object): def __init__(self, config): self._config = config def find(self, key): return self._config[key] class TestLogger(object): def __init__(self, *arg, **unused_kwargs): self._config = arg[0] @property def config(self): return self._config
{ "content_hash": "c9322639bba0ffadbf40f87cc3a97769", "timestamp": "", "source": "github", "line_count": 112, "max_line_length": 74, "avg_line_length": 17.901785714285715, "alnum_prop": 0.5476309226932669, "repo_name": "refinery29/chassis", "id": "809457e8779d010324010632a98f91b7d01364be", "size": "2115", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "chassis/test/example_classes.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "84288" }, { "name": "Shell", "bytes": "401" } ], "symlink_target": "" }
"""Quantized distribution.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import distribution as distributions from tensorflow.python.ops.distributions import util as distribution_util from tensorflow.python.util import deprecation __all__ = ["QuantizedDistribution"] @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def _logsum_expbig_minus_expsmall(big, small): """Stable evaluation of `Log[exp{big} - exp{small}]`. To work correctly, we should have the pointwise relation: `small <= big`. Args: big: Floating-point `Tensor` small: Floating-point `Tensor` with same `dtype` as `big` and broadcastable shape. Returns: `Tensor` of same `dtype` of `big` and broadcast shape. """ with ops.name_scope("logsum_expbig_minus_expsmall", values=[small, big]): return math_ops.log(1. - math_ops.exp(small - big)) + big _prob_base_note = """ For whole numbers `y`, ``` P[Y = y] := P[X <= low], if y == low, := P[X > high - 1], y == high, := 0, if j < low or y > high, := P[y - 1 < X <= y], all other y. ``` """ _prob_note = _prob_base_note + """ The base distribution's `cdf` method must be defined on `y - 1`. If the base distribution has a `survival_function` method, results will be more accurate for large values of `y`, and in this case the `survival_function` must also be defined on `y - 1`. """ _log_prob_note = _prob_base_note + """ The base distribution's `log_cdf` method must be defined on `y - 1`. If the base distribution has a `log_survival_function` method results will be more accurate for large values of `y`, and in this case the `log_survival_function` must also be defined on `y - 1`. """ _cdf_base_note = """ For whole numbers `y`, ``` cdf(y) := P[Y <= y] = 1, if y >= high, = 0, if y < low, = P[X <= y], otherwise. ``` Since `Y` only has mass at whole numbers, `P[Y <= y] = P[Y <= floor(y)]`. This dictates that fractional `y` are first floored to a whole number, and then above definition applies. """ _cdf_note = _cdf_base_note + """ The base distribution's `cdf` method must be defined on `y - 1`. """ _log_cdf_note = _cdf_base_note + """ The base distribution's `log_cdf` method must be defined on `y - 1`. """ _sf_base_note = """ For whole numbers `y`, ``` survival_function(y) := P[Y > y] = 0, if y >= high, = 1, if y < low, = P[X <= y], otherwise. ``` Since `Y` only has mass at whole numbers, `P[Y <= y] = P[Y <= floor(y)]`. This dictates that fractional `y` are first floored to a whole number, and then above definition applies. """ _sf_note = _sf_base_note + """ The base distribution's `cdf` method must be defined on `y - 1`. """ _log_sf_note = _sf_base_note + """ The base distribution's `log_cdf` method must be defined on `y - 1`. """ class QuantizedDistribution(distributions.Distribution): """Distribution representing the quantization `Y = ceiling(X)`. #### Definition in Terms of Sampling ``` 1. Draw X 2. Set Y <-- ceiling(X) 3. If Y < low, reset Y <-- low 4. If Y > high, reset Y <-- high 5. Return Y ``` #### Definition in Terms of the Probability Mass Function Given scalar random variable `X`, we define a discrete random variable `Y` supported on the integers as follows: ``` P[Y = j] := P[X <= low], if j == low, := P[X > high - 1], j == high, := 0, if j < low or j > high, := P[j - 1 < X <= j], all other j. ``` Conceptually, without cutoffs, the quantization process partitions the real line `R` into half open intervals, and identifies an integer `j` with the right endpoints: ``` R = ... (-2, -1](-1, 0](0, 1](1, 2](2, 3](3, 4] ... j = ... -1 0 1 2 3 4 ... ``` `P[Y = j]` is the mass of `X` within the `jth` interval. If `low = 0`, and `high = 2`, then the intervals are redrawn and `j` is re-assigned: ``` R = (-infty, 0](0, 1](1, infty) j = 0 1 2 ``` `P[Y = j]` is still the mass of `X` within the `jth` interval. #### Examples We illustrate a mixture of discretized logistic distributions [(Salimans et al., 2017)][1]. This is used, for example, for capturing 16-bit audio in WaveNet [(van den Oord et al., 2017)][2]. The values range in a 1-D integer domain of `[0, 2**16-1]`, and the discretization captures `P(x - 0.5 < X <= x + 0.5)` for all `x` in the domain excluding the endpoints. The lowest value has probability `P(X <= 0.5)` and the highest value has probability `P(2**16 - 1.5 < X)`. Below we assume a `wavenet` function. It takes as `input` right-shifted audio samples of shape `[..., sequence_length]`. It returns a real-valued tensor of shape `[..., num_mixtures * 3]`, i.e., each mixture component has a `loc` and `scale` parameter belonging to the logistic distribution, and a `logits` parameter determining the unnormalized probability of that component. ```python tfd = tf.contrib.distributions tfb = tfd.bijectors net = wavenet(inputs) loc, unconstrained_scale, logits = tf.split(net, num_or_size_splits=3, axis=-1) scale = tf.nn.softplus(unconstrained_scale) # Form mixture of discretized logistic distributions. Note we shift the # logistic distribution by -0.5. This lets the quantization capture "rounding" # intervals, `(x-0.5, x+0.5]`, and not "ceiling" intervals, `(x-1, x]`. discretized_logistic_dist = tfd.QuantizedDistribution( distribution=tfd.TransformedDistribution( distribution=tfd.Logistic(loc=loc, scale=scale), bijector=tfb.AffineScalar(shift=-0.5)), low=0., high=2**16 - 1.) mixture_dist = tfd.MixtureSameFamily( mixture_distribution=tfd.Categorical(logits=logits), components_distribution=discretized_logistic_dist) neg_log_likelihood = -tf.reduce_sum(mixture_dist.log_prob(targets)) train_op = tf.train.AdamOptimizer().minimize(neg_log_likelihood) ``` After instantiating `mixture_dist`, we illustrate maximum likelihood by calculating its log-probability of audio samples as `target` and optimizing. #### References [1]: Tim Salimans, Andrej Karpathy, Xi Chen, and Diederik P. Kingma. PixelCNN++: Improving the PixelCNN with discretized logistic mixture likelihood and other modifications. _International Conference on Learning Representations_, 2017. https://arxiv.org/abs/1701.05517 [2]: Aaron van den Oord et al. Parallel WaveNet: Fast High-Fidelity Speech Synthesis. _arXiv preprint arXiv:1711.10433_, 2017. https://arxiv.org/abs/1711.10433 """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, distribution, low=None, high=None, validate_args=False, name="QuantizedDistribution"): """Construct a Quantized Distribution representing `Y = ceiling(X)`. Some properties are inherited from the distribution defining `X`. Example: `allow_nan_stats` is determined for this `QuantizedDistribution` by reading the `distribution`. Args: distribution: The base distribution class to transform. Typically an instance of `Distribution`. low: `Tensor` with same `dtype` as this distribution and shape able to be added to samples. Should be a whole number. Default `None`. If provided, base distribution's `prob` should be defined at `low`. high: `Tensor` with same `dtype` as this distribution and shape able to be added to samples. Should be a whole number. Default `None`. If provided, base distribution's `prob` should be defined at `high - 1`. `high` must be strictly greater than `low`. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. name: Python `str` name prefixed to Ops created by this class. Raises: TypeError: If `dist_cls` is not a subclass of `Distribution` or continuous. NotImplementedError: If the base distribution does not implement `cdf`. """ parameters = dict(locals()) values = ( list(distribution.parameters.values()) + [low, high]) with ops.name_scope(name, values=values) as name: self._dist = distribution if low is not None: low = ops.convert_to_tensor(low, name="low") if high is not None: high = ops.convert_to_tensor(high, name="high") check_ops.assert_same_float_dtype( tensors=[self.distribution, low, high]) # We let QuantizedDistribution access _graph_parents since this class is # more like a baseclass. graph_parents = self._dist._graph_parents # pylint: disable=protected-access checks = [] if validate_args and low is not None and high is not None: message = "low must be strictly less than high." checks.append( check_ops.assert_less( low, high, message=message)) self._validate_args = validate_args # self._check_integer uses this. with ops.control_dependencies(checks if validate_args else []): if low is not None: self._low = self._check_integer(low) graph_parents += [self._low] else: self._low = None if high is not None: self._high = self._check_integer(high) graph_parents += [self._high] else: self._high = None super(QuantizedDistribution, self).__init__( dtype=self._dist.dtype, reparameterization_type=distributions.NOT_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=self._dist.allow_nan_stats, parameters=parameters, graph_parents=graph_parents, name=name) def _batch_shape_tensor(self): return self.distribution.batch_shape_tensor() def _batch_shape(self): return self.distribution.batch_shape def _event_shape_tensor(self): return self.distribution.event_shape_tensor() def _event_shape(self): return self.distribution.event_shape def _sample_n(self, n, seed=None): low = self._low high = self._high with ops.name_scope("transform"): n = ops.convert_to_tensor(n, name="n") x_samps = self.distribution.sample(n, seed=seed) ones = array_ops.ones_like(x_samps) # Snap values to the intervals (j - 1, j]. result_so_far = math_ops.ceil(x_samps) if low is not None: result_so_far = array_ops.where(result_so_far < low, low * ones, result_so_far) if high is not None: result_so_far = array_ops.where(result_so_far > high, high * ones, result_so_far) return result_so_far @distribution_util.AppendDocstring(_log_prob_note) def _log_prob(self, y): if not hasattr(self.distribution, "_log_cdf"): raise NotImplementedError( "'log_prob' not implemented unless the base distribution implements " "'log_cdf'") y = self._check_integer(y) try: return self._log_prob_with_logsf_and_logcdf(y) except NotImplementedError: return self._log_prob_with_logcdf(y) def _log_prob_with_logcdf(self, y): return _logsum_expbig_minus_expsmall(self.log_cdf(y), self.log_cdf(y - 1)) def _log_prob_with_logsf_and_logcdf(self, y): """Compute log_prob(y) using log survival_function and cdf together.""" # There are two options that would be equal if we had infinite precision: # Log[ sf(y - 1) - sf(y) ] # = Log[ exp{logsf(y - 1)} - exp{logsf(y)} ] # Log[ cdf(y) - cdf(y - 1) ] # = Log[ exp{logcdf(y)} - exp{logcdf(y - 1)} ] logsf_y = self.log_survival_function(y) logsf_y_minus_1 = self.log_survival_function(y - 1) logcdf_y = self.log_cdf(y) logcdf_y_minus_1 = self.log_cdf(y - 1) # Important: Here we use select in a way such that no input is inf, this # prevents the troublesome case where the output of select can be finite, # but the output of grad(select) will be NaN. # In either case, we are doing Log[ exp{big} - exp{small} ] # We want to use the sf items precisely when we are on the right side of the # median, which occurs when logsf_y < logcdf_y. big = array_ops.where(logsf_y < logcdf_y, logsf_y_minus_1, logcdf_y) small = array_ops.where(logsf_y < logcdf_y, logsf_y, logcdf_y_minus_1) return _logsum_expbig_minus_expsmall(big, small) @distribution_util.AppendDocstring(_prob_note) def _prob(self, y): if not hasattr(self.distribution, "_cdf"): raise NotImplementedError( "'prob' not implemented unless the base distribution implements " "'cdf'") y = self._check_integer(y) try: return self._prob_with_sf_and_cdf(y) except NotImplementedError: return self._prob_with_cdf(y) def _prob_with_cdf(self, y): return self.cdf(y) - self.cdf(y - 1) def _prob_with_sf_and_cdf(self, y): # There are two options that would be equal if we had infinite precision: # sf(y - 1) - sf(y) # cdf(y) - cdf(y - 1) sf_y = self.survival_function(y) sf_y_minus_1 = self.survival_function(y - 1) cdf_y = self.cdf(y) cdf_y_minus_1 = self.cdf(y - 1) # sf_prob has greater precision iff we're on the right side of the median. return array_ops.where( sf_y < cdf_y, # True iff we're on the right side of the median. sf_y_minus_1 - sf_y, cdf_y - cdf_y_minus_1) @distribution_util.AppendDocstring(_log_cdf_note) def _log_cdf(self, y): low = self._low high = self._high # Recall the promise: # cdf(y) := P[Y <= y] # = 1, if y >= high, # = 0, if y < low, # = P[X <= y], otherwise. # P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in # between. j = math_ops.floor(y) result_so_far = self.distribution.log_cdf(j) # Broadcast, because it's possible that this is a single distribution being # evaluated on a number of samples, or something like that. j += array_ops.zeros_like(result_so_far) # Re-define values at the cutoffs. if low is not None: neg_inf = -np.inf * array_ops.ones_like(result_so_far) result_so_far = array_ops.where(j < low, neg_inf, result_so_far) if high is not None: result_so_far = array_ops.where(j >= high, array_ops.zeros_like(result_so_far), result_so_far) return result_so_far @distribution_util.AppendDocstring(_cdf_note) def _cdf(self, y): low = self._low high = self._high # Recall the promise: # cdf(y) := P[Y <= y] # = 1, if y >= high, # = 0, if y < low, # = P[X <= y], otherwise. # P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in # between. j = math_ops.floor(y) # P[X <= j], used when low < X < high. result_so_far = self.distribution.cdf(j) # Broadcast, because it's possible that this is a single distribution being # evaluated on a number of samples, or something like that. j += array_ops.zeros_like(result_so_far) # Re-define values at the cutoffs. if low is not None: result_so_far = array_ops.where(j < low, array_ops.zeros_like(result_so_far), result_so_far) if high is not None: result_so_far = array_ops.where(j >= high, array_ops.ones_like(result_so_far), result_so_far) return result_so_far @distribution_util.AppendDocstring(_log_sf_note) def _log_survival_function(self, y): low = self._low high = self._high # Recall the promise: # survival_function(y) := P[Y > y] # = 0, if y >= high, # = 1, if y < low, # = P[X > y], otherwise. # P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in # between. j = math_ops.ceil(y) # P[X > j], used when low < X < high. result_so_far = self.distribution.log_survival_function(j) # Broadcast, because it's possible that this is a single distribution being # evaluated on a number of samples, or something like that. j += array_ops.zeros_like(result_so_far) # Re-define values at the cutoffs. if low is not None: result_so_far = array_ops.where(j < low, array_ops.zeros_like(result_so_far), result_so_far) if high is not None: neg_inf = -np.inf * array_ops.ones_like(result_so_far) result_so_far = array_ops.where(j >= high, neg_inf, result_so_far) return result_so_far @distribution_util.AppendDocstring(_sf_note) def _survival_function(self, y): low = self._low high = self._high # Recall the promise: # survival_function(y) := P[Y > y] # = 0, if y >= high, # = 1, if y < low, # = P[X > y], otherwise. # P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in # between. j = math_ops.ceil(y) # P[X > j], used when low < X < high. result_so_far = self.distribution.survival_function(j) # Broadcast, because it's possible that this is a single distribution being # evaluated on a number of samples, or something like that. j += array_ops.zeros_like(result_so_far) # Re-define values at the cutoffs. if low is not None: result_so_far = array_ops.where(j < low, array_ops.ones_like(result_so_far), result_so_far) if high is not None: result_so_far = array_ops.where(j >= high, array_ops.zeros_like(result_so_far), result_so_far) return result_so_far def _check_integer(self, value): with ops.name_scope("check_integer", values=[value]): value = ops.convert_to_tensor(value, name="value") if not self.validate_args: return value dependencies = [distribution_util.assert_integer_form( value, message="value has non-integer components.")] return control_flow_ops.with_dependencies(dependencies, value) @property def distribution(self): """Base distribution, p(x).""" return self._dist
{ "content_hash": "0309cb8738e3468580bbf76de6fc61fd", "timestamp": "", "source": "github", "line_count": 562, "max_line_length": 83, "avg_line_length": 35.15124555160142, "alnum_prop": 0.6109339407744875, "repo_name": "lukeiwanski/tensorflow", "id": "ef3bdfa75fcaa8df17db1238ceadadf788601356", "size": "20444", "binary": false, "copies": "8", "ref": "refs/heads/master", "path": "tensorflow/contrib/distributions/python/ops/quantized_distribution.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "9258" }, { "name": "C", "bytes": "305344" }, { "name": "C++", "bytes": "44091926" }, { "name": "CMake", "bytes": "206801" }, { "name": "Go", "bytes": "1163771" }, { "name": "HTML", "bytes": "4680032" }, { "name": "Java", "bytes": "768682" }, { "name": "Jupyter Notebook", "bytes": "2245985" }, { "name": "LLVM", "bytes": "6536" }, { "name": "Makefile", "bytes": "49862" }, { "name": "Objective-C", "bytes": "15650" }, { "name": "Objective-C++", "bytes": "99265" }, { "name": "PHP", "bytes": "2140" }, { "name": "Perl", "bytes": "7536" }, { "name": "PureBasic", "bytes": "25356" }, { "name": "Python", "bytes": "37482296" }, { "name": "Ruby", "bytes": "533" }, { "name": "Shell", "bytes": "443812" }, { "name": "Smarty", "bytes": "6870" } ], "symlink_target": "" }
import os, sys import logging as log from optparse import OptionParser import numpy as np import unsupervised.util import unsupervised.rankings # -------------------------------------------------------------- def main(): parser = OptionParser(usage="usage: %prog [options] reference_rank_file test_rank_file1 test_rank_file2 ...") parser.add_option("-t", "--top", action="store", type="int", dest="top", help="number of top terms to use", default=20) parser.add_option('-d','--debug',type="int",help="Level of log output; 0 is less, 5 is all", default=3) (options, args) = parser.parse_args() if( len(args) < 2 ): parser.error( "Must specify at least two ranking sets" ) log.basicConfig(level=max(50 - (options.debug * 10), 10), format='%(asctime)-18s %(levelname)-10s %(message)s', datefmt='%d/%m/%Y %H:%M',) # Load cached ranking sets log.info( "Reading %d term ranking sets (top=%d) ..." % ( len(args), options.top ) ) all_term_rankings = [] for rank_path in args: # first set is the reference set if len(all_term_rankings) == 0: log.debug( "Loading reference term ranking set from %s ..." % rank_path ) else: log.debug( "Loading test term ranking set from %s ..." % rank_path ) (term_rankings,labels) = unsupervised.util.load_term_rankings( rank_path ) log.debug( "Set has %d rankings covering %d terms" % ( len(term_rankings), unsupervised.rankings.term_rankings_size( term_rankings ) ) ) # do we need to truncate the number of terms in the ranking? if options.top > 1: term_rankings = unsupervised.rankings.truncate_term_rankings( term_rankings, options.top ) log.debug( "Truncated to %d -> set now has %d rankings covering %d terms" % ( options.top, len(term_rankings), unsupervised.rankings.term_rankings_size( term_rankings ) ) ) all_term_rankings.append( term_rankings ) # First argument was the reference term ranking reference_term_ranking = all_term_rankings[0] all_term_rankings = all_term_rankings[1:] r = len(all_term_rankings) log.info( "Loaded %d non-reference term rankings" % r ) # Perform the evaluation metric = unsupervised.rankings.AverageJaccard() matcher = unsupervised.rankings.RankingSetAgreement( metric ) log.info( "Performing reference comparisons with %s ..." % str(metric) ) all_scores = [] for i in range(r): score = matcher.similarity( reference_term_ranking, all_term_rankings[i] ) all_scores.append( score ) # Get overall score across all candidates all_scores = np.array( all_scores ) log.info( "Stability=%.4f [%.4f,%.4f]" % ( all_scores.mean(), all_scores.min(), all_scores.max() ) ) # -------------------------------------------------------------- if __name__ == "__main__": main()
{ "content_hash": "1b9168526e8d66155ae6503d655f5465", "timestamp": "", "source": "github", "line_count": 59, "max_line_length": 175, "avg_line_length": 45.644067796610166, "alnum_prop": 0.662086891942072, "repo_name": "akiratu/topic-stability", "id": "773a4bcd8265dd75ddbc8c4b44c4237e92cb3265", "size": "2715", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "topic-stability.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "93182" } ], "symlink_target": "" }
import copy import unittest from typing import Optional, List, Union from slack_sdk.errors import SlackObjectFormationError from slack_sdk.models import JsonObject, JsonValidator from slack_sdk.models.blocks import ( ConfirmObject, MarkdownTextObject, Option, OptionGroup, PlainTextObject, ) from slack_sdk.models.messages import ( ChannelLink, DateLink, EveryoneLink, HereLink, Link, ObjectLink, ) from . import STRING_301_CHARS, STRING_51_CHARS class SimpleJsonObject(JsonObject): attributes = {"some", "test", "keys"} def __init__(self): self.some = "this is" self.test = "a test" self.keys = "object" @JsonValidator("some validation message") def test_valid(self): return len(self.test) <= 10 @JsonValidator("this should never fail") def always_valid_test(self): return True class KeyValueObject(JsonObject): attributes = {"name", "value"} def __init__( self, *, name: Optional[str] = None, value: Optional[str] = None, ): self.name = name self.value = value class NestedObject(JsonObject): attributes = {"initial", "options"} def __init__( self, *, initial: Union[dict, KeyValueObject], options: List[Union[dict, KeyValueObject]], ): self.initial = ( KeyValueObject(**initial) if isinstance(initial, dict) else initial ) self.options = [ KeyValueObject(**o) if isinstance(o, dict) else o for o in options ] class JsonObjectTests(unittest.TestCase): def setUp(self) -> None: self.good_test_object = SimpleJsonObject() obj = SimpleJsonObject() obj.test = STRING_51_CHARS self.bad_test_object = obj def test_json_formation(self): self.assertDictEqual( self.good_test_object.to_dict(), {"some": "this is", "test": "a test", "keys": "object"}, ) def test_validate_json_fails(self): with self.assertRaises(SlackObjectFormationError): self.bad_test_object.validate_json() def test_to_dict_performs_validation(self): with self.assertRaises(SlackObjectFormationError): self.bad_test_object.to_dict() def test_get_non_null_attributes(self): expected = {"name": "something"} obj = KeyValueObject(name="something", value=None) obj2 = copy.deepcopy(obj) self.assertDictEqual(expected, obj.get_non_null_attributes()) self.assertEqual(str(obj2), str(obj)) def test_get_non_null_attributes_nested(self): expected = { "initial": {"name": "something"}, "options": [ {"name": "something"}, {"name": "message", "value": "That's great!"}, ], } obj1 = KeyValueObject(name="something", value=None) obj2 = KeyValueObject(name="message", value="That's great!") options = [obj1, obj2] nested = NestedObject(initial=obj1, options=options) self.assertEqual(type(obj1), KeyValueObject) self.assertTrue(hasattr(obj1, "value")) self.assertEqual(type(nested.initial), KeyValueObject) self.assertEqual(type(options[0]), KeyValueObject) self.assertTrue(hasattr(options[0], "value")) self.assertEqual(type(nested.options[0]), KeyValueObject) self.assertTrue(hasattr(nested.options[0], "value")) dict_value = nested.get_non_null_attributes() self.assertDictEqual(expected, dict_value) self.assertEqual(type(obj1), KeyValueObject) self.assertTrue(hasattr(obj1, "value")) self.assertEqual(type(nested.initial), KeyValueObject) self.assertEqual(type(options[0]), KeyValueObject) self.assertTrue(hasattr(options[0], "value")) self.assertEqual(type(nested.options[0]), KeyValueObject) self.assertTrue(hasattr(nested.options[0], "value")) def test_get_non_null_attributes_nested_2(self): expected = { "initial": {"name": "something"}, "options": [ {"name": "something"}, {"name": "message", "value": "That's great!"}, ], } nested = NestedObject( initial={"name": "something"}, options=[ {"name": "something"}, {"name": "message", "value": "That's great!"}, ], ) self.assertDictEqual(expected, nested.get_non_null_attributes()) class JsonValidatorTests(unittest.TestCase): def setUp(self) -> None: self.validator_instance = JsonValidator("message") self.class_instance = SimpleJsonObject() def test_isolated_class(self): def does_nothing(): return False wrapped = self.validator_instance(does_nothing) # noinspection PyUnresolvedReferences self.assertTrue(wrapped.validator) def test_wrapped_class(self): for attribute in dir(self.class_instance): attr = getattr(self.class_instance, attribute, None) if attribute in ("test_valid", "always_valid_test"): self.assertTrue(attr.validator) else: with self.assertRaises(AttributeError): # noinspection PyStatementEffect attr.validator class LinkTests(unittest.TestCase): def test_without_text(self): link = Link(url="http://google.com", text="") self.assertEqual(f"{link}", "<http://google.com>") def test_with_text(self): link = Link(url="http://google.com", text="google") self.assertEqual(f"{link}", "<http://google.com|google>") class DateLinkTests(unittest.TestCase): def setUp(self) -> None: self.epoch = 1234567890 def test_simple_formation(self): datelink = DateLink( date=self.epoch, date_format="{date_long}", fallback=f"{self.epoch}" ) self.assertEqual( f"{datelink}", f"<!date^{self.epoch}^{{date_long}}|{self.epoch}>" ) def test_with_url(self): datelink = DateLink( date=self.epoch, date_format="{date_long}", link="http://google.com", fallback=f"{self.epoch}", ) self.assertEqual( f"{datelink}", f"<!date^{self.epoch}^{{date_long}}^http://google.com|{self.epoch}>", ) class ObjectLinkTests(unittest.TestCase): def test_channel(self): objlink = ObjectLink(object_id="C12345") self.assertEqual(f"{objlink}", "<#C12345>") def test_group_message(self): objlink = ObjectLink(object_id="G12345") self.assertEqual(f"{objlink}", "<#G12345>") def test_subteam_message(self): objlink = ObjectLink(object_id="S12345") self.assertEqual(f"{objlink}", "<!subteam^S12345>") def test_with_label(self): objlink = ObjectLink(object_id="C12345", text="abc") self.assertEqual(f"{objlink}", "<#C12345|abc>") def test_unknown_prefix(self): objlink = ObjectLink(object_id="Z12345") self.assertEqual(f"{objlink}", "<@Z12345>") class SpecialLinkTests(unittest.TestCase): def test_channel_link(self): self.assertEqual(f"{ChannelLink()}", "<!channel|channel>") def test_here_link(self): self.assertEqual(f"{HereLink()}", "<!here|here>") def test_everyone_link(self): self.assertEqual(f"{EveryoneLink()}", "<!everyone|everyone>") class PlainTextObjectTests(unittest.TestCase): def test_basic_json(self): self.assertDictEqual( {"text": "some text", "type": "plain_text"}, PlainTextObject(text="some text").to_dict(), ) self.assertDictEqual( {"text": "some text", "emoji": False, "type": "plain_text"}, PlainTextObject(text="some text", emoji=False).to_dict(), ) def test_from_string(self): plaintext = PlainTextObject(text="some text", emoji=True) self.assertDictEqual( plaintext.to_dict(), PlainTextObject.direct_from_string("some text") ) class MarkdownTextObjectTests(unittest.TestCase): def test_basic_json(self): self.assertDictEqual( {"text": "some text", "type": "mrkdwn"}, MarkdownTextObject(text="some text").to_dict(), ) self.assertDictEqual( {"text": "some text", "verbatim": True, "type": "mrkdwn"}, MarkdownTextObject(text="some text", verbatim=True).to_dict(), ) def test_from_string(self): markdown = MarkdownTextObject(text="some text") self.assertDictEqual( markdown.to_dict(), MarkdownTextObject.direct_from_string("some text") ) class ConfirmObjectTests(unittest.TestCase): def test_basic_json(self): expected = { "confirm": {"emoji": True, "text": "Yes", "type": "plain_text"}, "deny": {"emoji": True, "text": "No", "type": "plain_text"}, "text": {"text": "are you sure?", "type": "mrkdwn"}, "title": {"emoji": True, "text": "some title", "type": "plain_text"}, } simple_object = ConfirmObject(title="some title", text="are you sure?") self.assertDictEqual(expected, simple_object.to_dict()) self.assertDictEqual(expected, simple_object.to_dict("block")) self.assertDictEqual( { "text": "are you sure?", "title": "some title", "ok_text": "Okay", "dismiss_text": "Cancel", }, simple_object.to_dict("action"), ) def test_confirm_overrides(self): confirm = ConfirmObject( title="some title", text="are you sure?", confirm="I'm really sure", deny="Nevermind", ) expected = { "confirm": {"text": "I'm really sure", "type": "plain_text", "emoji": True}, "deny": {"text": "Nevermind", "type": "plain_text", "emoji": True}, "text": {"text": "are you sure?", "type": "mrkdwn"}, "title": {"text": "some title", "type": "plain_text", "emoji": True}, } self.assertDictEqual(expected, confirm.to_dict()) self.assertDictEqual(expected, confirm.to_dict("block")) self.assertDictEqual( { "text": "are you sure?", "title": "some title", "ok_text": "I'm really sure", "dismiss_text": "Nevermind", }, confirm.to_dict("action"), ) def test_passing_text_objects(self): direct_construction = ConfirmObject(title="title", text="Are you sure?") mrkdwn = MarkdownTextObject(text="Are you sure?") preconstructed = ConfirmObject(title="title", text=mrkdwn) self.assertDictEqual(direct_construction.to_dict(), preconstructed.to_dict()) plaintext = PlainTextObject(text="Are you sure?", emoji=False) passed_plaintext = ConfirmObject(title="title", text=plaintext) self.assertDictEqual( { "confirm": {"emoji": True, "text": "Yes", "type": "plain_text"}, "deny": {"emoji": True, "text": "No", "type": "plain_text"}, "text": {"emoji": False, "text": "Are you sure?", "type": "plain_text"}, "title": {"emoji": True, "text": "title", "type": "plain_text"}, }, passed_plaintext.to_dict(), ) def test_title_length(self): with self.assertRaises(SlackObjectFormationError): ConfirmObject(title=STRING_301_CHARS, text="Are you sure?").to_dict() def test_text_length(self): with self.assertRaises(SlackObjectFormationError): ConfirmObject(title="title", text=STRING_301_CHARS).to_dict() def test_text_length_with_object(self): with self.assertRaises(SlackObjectFormationError): plaintext = PlainTextObject(text=STRING_301_CHARS) ConfirmObject(title="title", text=plaintext).to_dict() with self.assertRaises(SlackObjectFormationError): markdown = MarkdownTextObject(text=STRING_301_CHARS) ConfirmObject(title="title", text=markdown).to_dict() def test_confirm_length(self): with self.assertRaises(SlackObjectFormationError): ConfirmObject( title="title", text="Are you sure?", confirm=STRING_51_CHARS ).to_dict() def test_deny_length(self): with self.assertRaises(SlackObjectFormationError): ConfirmObject( title="title", text="Are you sure?", deny=STRING_51_CHARS ).to_dict() class OptionTests(unittest.TestCase): def setUp(self) -> None: self.common = Option(label="an option", value="option_1") def test_block_style_json(self): expected = { "text": {"type": "plain_text", "text": "an option", "emoji": True}, "value": "option_1", } self.assertDictEqual(expected, self.common.to_dict("block")) self.assertDictEqual(expected, self.common.to_dict()) def test_dialog_style_json(self): expected = {"label": "an option", "value": "option_1"} self.assertDictEqual(expected, self.common.to_dict("dialog")) def test_action_style_json(self): expected = {"text": "an option", "value": "option_1"} self.assertDictEqual(expected, self.common.to_dict("action")) def test_from_single_value(self): option = Option(label="option_1", value="option_1") self.assertDictEqual( option.to_dict("text"), option.from_single_value("option_1").to_dict("text"), ) def test_label_length(self): with self.assertRaises(SlackObjectFormationError): Option(label=STRING_301_CHARS, value="option_1").to_dict("text") def test_value_length(self): with self.assertRaises(SlackObjectFormationError): Option(label="option_1", value=STRING_301_CHARS).to_dict("text") def test_valid_description_for_blocks(self): option = Option(label="label", value="v", description="this is an option") self.assertDictEqual( option.to_dict(), { "text": { "type": "plain_text", "text": "label", "emoji": True, }, "value": "v", "description": { "type": "plain_text", "text": "this is an option", "emoji": True, }, }, ) option = Option( # Note that mrkdwn type is not allowed for this (as of April 2021) text=PlainTextObject(text="label"), value="v", description="this is an option", ) self.assertDictEqual( option.to_dict(), { "text": {"type": "plain_text", "text": "label"}, "value": "v", "description": { "type": "plain_text", "text": "this is an option", "emoji": True, }, }, ) def test_valid_description_for_attachments(self): option = Option(label="label", value="v", description="this is an option") # legacy message actions in attachments self.assertDictEqual( option.to_dict("action"), { "text": "label", "value": "v", "description": "this is an option", }, ) self.assertDictEqual( option.to_dict("attachment"), { "text": "label", "value": "v", "description": "this is an option", }, ) class OptionGroupTests(unittest.TestCase): maxDiff = None def setUp(self) -> None: self.common_options = [ Option.from_single_value("one"), Option.from_single_value("two"), Option.from_single_value("three"), ] self.common = OptionGroup(label="an option", options=self.common_options) def test_block_style_json(self): expected = { "label": {"emoji": True, "text": "an option", "type": "plain_text"}, "options": [ { "text": {"emoji": True, "text": "one", "type": "plain_text"}, "value": "one", }, { "text": {"emoji": True, "text": "two", "type": "plain_text"}, "value": "two", }, { "text": {"emoji": True, "text": "three", "type": "plain_text"}, "value": "three", }, ], } self.assertDictEqual(expected, self.common.to_dict("block")) self.assertDictEqual(expected, self.common.to_dict()) def test_dialog_style_json(self): self.assertDictEqual( { "label": "an option", "options": [ {"label": "one", "value": "one"}, {"label": "two", "value": "two"}, {"label": "three", "value": "three"}, ], }, self.common.to_dict("dialog"), ) def test_action_style_json(self): self.assertDictEqual( { "text": "an option", "options": [ {"text": "one", "value": "one"}, {"text": "two", "value": "two"}, {"text": "three", "value": "three"}, ], }, self.common.to_dict("action"), ) def test_label_length(self): with self.assertRaises(SlackObjectFormationError): OptionGroup(label=STRING_301_CHARS, options=self.common_options).to_dict( "text" ) def test_options_length(self): with self.assertRaises(SlackObjectFormationError): OptionGroup(label="option_group", options=self.common_options * 34).to_dict( "text" ) def test_confirm_style(self): obj = ConfirmObject.parse( { "title": {"type": "plain_text", "text": "Are you sure?"}, "text": { "type": "mrkdwn", "text": "Wouldn't you prefer a good game of _chess_?", }, "confirm": {"type": "plain_text", "text": "Do it"}, "deny": {"type": "plain_text", "text": "Stop, I've changed my mind!"}, "style": "primary", } ) obj.validate_json() self.assertEqual("primary", obj.style) def test_confirm_style_validation(self): with self.assertRaises(SlackObjectFormationError): ConfirmObject.parse( { "title": {"type": "plain_text", "text": "Are you sure?"}, "text": { "type": "mrkdwn", "text": "Wouldn't you prefer a good game of _chess_?", }, "confirm": {"type": "plain_text", "text": "Do it"}, "deny": { "type": "plain_text", "text": "Stop, I've changed my mind!", }, "style": "something-wrong", } ).validate_json()
{ "content_hash": "8635639b7c1e680444dc0b51e5d9461f", "timestamp": "", "source": "github", "line_count": 578, "max_line_length": 88, "avg_line_length": 34.21453287197232, "alnum_prop": 0.537671925566343, "repo_name": "slackapi/python-slackclient", "id": "f04057ee64183c665507232a47b3bb69b6dd05df", "size": "19776", "binary": false, "copies": "2", "ref": "refs/heads/main", "path": "tests/slack_sdk/models/test_objects.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "7756" }, { "name": "HTML", "bytes": "5961" }, { "name": "Makefile", "bytes": "7656" }, { "name": "Python", "bytes": "360940" }, { "name": "Shell", "bytes": "110" } ], "symlink_target": "" }
import ldns import sys import os import inspect class_name = "ldns_resolver" method_name = None error_detected = False temp_fname = "tmp_resolver.txt" def set_error(): """ Writes an error message and sets error flag. """ global class_name global method_name global error_detected error_detected = True sys.stderr.write("(line %d): malfunctioning method %s.\n" % \ (inspect.currentframe().f_back.f_lineno, method_name)) #if not error_detected: if True: method_name = class_name + ".axfr_complete()" sys.stderr.write("%s not tested.\n" % (method_name)) #if not error_detected: if True: method_name = class_name + ".axfr_last_pkt()" sys.stderr.write("%s not tested.\n" % (method_name)) #if not error_detected: if True: method_name = class_name + ".axfr_next()" sys.stderr.write("%s not tested.\n" % (method_name)) #if not error_detected: if True: method_name = class_name + ".axfr_start()" sys.stderr.write("%s not tested.\n" % (method_name)) #if not error_detected: if True: method_name = class_name + ".debug()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") resolver.set_debug(False) try: ret = resolver.debug() if not isinstance(ret, bool): set_error() if ret != False: set_error() except: set_error() resolver.set_debug(True) try: ret = resolver.debug() if not isinstance(ret, bool): set_error() if ret != True: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".dec_nameserver_count()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") cnt = resolver.nameserver_count() try: resolver.dec_nameserver_count() except: set_error() if cnt != (resolver.nameserver_count() + 1): set_error() #if not error_detected: if True: method_name = class_name + ".defnames()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") resolver.set_defnames(False) try: ret = resolver.defnames() if not isinstance(ret, bool): set_error() if ret != False: set_error() except: set_error() resolver.set_defnames(True) try: ret = resolver.defnames() if not isinstance(ret, bool): set_error() if ret != True: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".dnsrch()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") resolver.set_dnsrch(False) try: ret = resolver.dnsrch() if not isinstance(ret, bool): set_error() if ret != False: set_error() except: set_error() resolver.set_dnsrch(True) try: ret = resolver.dnsrch() if not isinstance(ret, bool): set_error() if ret != True: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".dnssec()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") resolver.set_dnssec(False) try: ret = resolver.dnssec() if not isinstance(ret, bool): set_error() if ret != False: set_error() except: set_error() resolver.set_dnssec(True) try: ret = resolver.dnssec() if not isinstance(ret, bool): set_error() if ret != True: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".dnssec_anchors()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") rrl = ldns.ldns_rr_list.new() try: ret = resolver.dnssec_anchors() if ret != None: set_error() except: set_error() resolver.set_dnssec_anchors(rrl) try: ret = resolver.dnssec_anchors() if not isinstance(ret, ldns.ldns_rr_list): set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".dnssec_cd()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") resolver.set_dnssec_cd(False) try: ret = resolver.dnssec_cd() if not isinstance(ret, bool): set_error() if ret != False: set_error() except: set_error() resolver.set_dnssec_cd(True) try: ret = resolver.dnssec_cd() if not isinstance(ret, bool): set_error() if ret != True: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".domain()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") resolver.set_domain(None) try: ret = resolver.domain() if ret != None: set_error() except: set_error() dname = ldns.ldns_dname("example.com.") resolver.set_domain(dname) try: ret = resolver.domain() if not isinstance(ret, ldns.ldns_dname): set_error() if ret != dname: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".edns_udp_size()" try: resolver = ldns.ldns_resolver.new() if not isinstance(resolver, ldns.ldns_resolver): set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".edns_udp_size()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") resolver.set_edns_udp_size(4096) try: ret = resolver.edns_udp_size() if (not isinstance(ret, int)) and (not isinstance(ret, long)): set_error() if ret != 4096: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".fail()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") resolver.set_fail(False) try: ret = resolver.fail() if not isinstance(ret, bool): set_error() if ret != False: set_error() except: set_error() resolver.set_fail(True) try: ret = resolver.fail() if not isinstance(ret, bool): set_error() if ret != True: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".fallback()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") resolver.set_fallback(False) try: ret = resolver.fallback() if not isinstance(ret, bool): set_error() if ret != False: set_error() except: set_error() resolver.set_fallback(True) try: ret = resolver.fallback() if not isinstance(ret, bool): set_error() if ret != True: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".get_addr_by_name()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: ret = resolver.get_addr_by_name("www.google.com", ldns.LDNS_RR_CLASS_IN, ldns.LDNS_RD) if not isinstance(ret, ldns.ldns_rr_list): set_error() except: set_error() try: ret = resolver.get_addr_by_name(1, ldns.LDNS_RR_CLASS_IN, ldns.LDNS_RD) set_error() except TypeError as e: pass except: set_error() try: ret = resolver.get_addr_by_name("www.google.com", "bad argument", ldns.LDNS_RD) set_error() except TypeError as e: pass except: set_error() try: ret = resolver.get_addr_by_name("www.google.com", ldns.LDNS_RR_CLASS_IN, "bad argument") set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".get_name_by_addr()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: addr = resolver.get_name_by_addr("8.8.8.8", ldns.LDNS_RR_CLASS_IN, ldns.LDNS_RD) if not isinstance(addr, ldns.ldns_rr_list): set_error() except: set_error() try: addr = resolver.get_name_by_addr(1, ldns.LDNS_RR_CLASS_IN, ldns.LDNS_RD) set_error() except TypeError as e: pass except: set_error() try: addr = resolver.get_name_by_addr("8.8.8.8", "bad argument", ldns.LDNS_RD) set_error() except TypeError as e: pass except: set_error() try: addr = resolver.get_name_by_addr("8.8.8.8", ldns.LDNS_RR_CLASS_IN, "bad argument") set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".igntc()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") resolver.set_igntc(False) try: ret = resolver.igntc() if not isinstance(ret, bool): set_error() if ret != False: set_error() except: set_error() resolver.set_igntc(True) try: ret = resolver.igntc() if not isinstance(ret, bool): set_error() if ret != True: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".incr_nameserver_count()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") cnt = resolver.nameserver_count() try: resolver.incr_nameserver_count() except: set_error() if (cnt + 1) != resolver.nameserver_count(): set_error() #if not error_detected: if True: method_name = class_name + ".ip6()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") resolver.set_ip6(0) try: ret = resolver.ip6() if (not isinstance(ret, int)) and (not isinstance(ret, long)): set_error() if ret != 0: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".nameserver_count()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") resolver.set_nameserver_count(1) try: ret = resolver.nameserver_count() if (not isinstance(ret, int)) and (not isinstance(ret, long)): set_error() if ret != 1: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".nameserver_rtt()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") num = resolver.nameserver_count() for i in range(0, num): resolver.set_nameserver_rtt(i, i + 1) try: for i in range(0, num): ret = resolver.nameserver_rtt(i) if (not isinstance(ret, int)) and (not isinstance(ret, long)): set_error() if (i + 1) != ret: set_error() except: set_error() try: ret = resolver.nameserver_rtt("bad argument") set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".nameservers()" sys.stderr.write("%s not tested.\n" % (method_name)) #if not error_detected: if True: method_name = class_name + ".nameservers_randomize()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: resolver.nameservers_randomize() except: set_error() #if not error_detected: if True: method_name = class_name + ".new_frm_file()" try: ret = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf", raiseException=True) if not isinstance(ret, ldns.ldns_resolver): set_error() except: set_error() try: ret = ldns.ldns_resolver.new_frm_file(1, raiseException=True) set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".new_frm_fp()" fi = open("/etc/resolv.conf") try: ret = ldns.ldns_resolver.new_frm_fp(fi, raiseException=True) if not isinstance(ret, ldns.ldns_resolver): set_error() except: set_error() fi.close() try: ret = ldns.ldns_resolver.new_frm_fp(1, raiseException=True) set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".new_frm_fp_l()" fi = open("/etc/resolv.conf") try: ret, line = ldns.ldns_resolver.new_frm_fp_l(fi, raiseException=True) if not isinstance(ret, ldns.ldns_resolver): set_error() if (not isinstance(line, int)) and (not isinstance(line, long)): set_error() except: set_error() fi.close() try: ret, line = ldns.ldns_resolver.new_frm_fp_l(1, raiseException=True) set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".pop_nameserver()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") cnt = resolver.nameserver_count() try: for i in range(0, cnt): ret = resolver.pop_nameserver() if not isinstance(ret, ldns.ldns_rdf): set_error() except: set_error() try: ret = resolver.pop_nameserver() if ret != None: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".port()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") resolver.set_port(12345) try: ret = resolver.port() if (not isinstance(ret, int)) and (not isinstance(ret, long)): set_error() if ret != 12345: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".prepare_query_pkt()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: ret = resolver.prepare_query_pkt("example.com.", ldns.LDNS_RR_TYPE_A, ldns.LDNS_RR_CLASS_IN, ldns.LDNS_RD, raiseException=True) if not isinstance(ret, ldns.ldns_pkt): set_error() except: set_error() try: ret = resolver.prepare_query_pkt(1, ldns.LDNS_RR_TYPE_A, ldns.LDNS_RR_CLASS_IN, ldns.LDNS_RD, raiseException=True) set_error() except TypeError as e: pass except: set_error() try: ret = resolver.prepare_query_pkt("example.com.", "bad argument", ldns.LDNS_RR_CLASS_IN, ldns.LDNS_RD, raiseException=True) set_error() except TypeError as e: pass except: set_error() try: ret = resolver.prepare_query_pkt("example.com.", ldns.LDNS_RR_TYPE_A, "bad argument", ldns.LDNS_RD, raiseException=True) set_error() except TypeError as e: pass except: set_error() try: ret = resolver.prepare_query_pkt("example.com.", ldns.LDNS_RR_TYPE_A, ldns.LDNS_RR_CLASS_IN, "bad argument", raiseException=True) set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".push_dnssec_anchor()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") key = ldns.ldns_key.new_frm_algorithm(ldns.LDNS_SIGN_DSA, 512) domain = ldns.ldns_dname("example.") key.set_pubkey_owner(domain) pubkey = key.key_to_rr() ds = ldns.ldns_key_rr2ds(pubkey, ldns.LDNS_SHA1) try: ret = resolver.push_dnssec_anchor(ds) if ret != ldns.LDNS_STATUS_OK: set_error() except: set_error() rr = ldns.ldns_rr.new_frm_str("test1 600 IN A 0.0.0.0") try: ret = resolver.push_dnssec_anchor(rr) if ret == ldns.LDNS_STATUS_OK: set_error() except: set_error() try: ret = resolver.push_dnssec_anchor("bad argument") set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".push_nameserver()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") rdf = ldns.ldns_rdf.new_frm_str("127.0.0.1", ldns.LDNS_RDF_TYPE_A) try: ret = resolver.push_nameserver(rdf) if ret != ldns.LDNS_STATUS_OK: set_error() except: set_error() rdf = ldns.ldns_rdf.new_frm_str("::1", ldns.LDNS_RDF_TYPE_AAAA) try: ret = resolver.push_nameserver(rdf) if ret != ldns.LDNS_STATUS_OK: set_error() except: set_error() rdf = ldns.ldns_rdf.new_frm_str("example.com.", ldns.LDNS_RDF_TYPE_DNAME) try: ret = resolver.push_nameserver(rdf) if ret == ldns.LDNS_STATUS_OK: set_error() except: set_error() try: ret = resolver.push_nameserver("bad argument") set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".push_nameserver_rr()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") rr = ldns.ldns_rr.new_frm_str("test 600 IN A 127.0.0.1") try: ret = resolver.push_nameserver_rr(rr) if ret != ldns.LDNS_STATUS_OK: set_error() except: set_error() rr = ldns.ldns_rr.new_frm_str("test 600 IN AAAA ::1") try: ret = resolver.push_nameserver_rr(rr) if ret != ldns.LDNS_STATUS_OK: set_error() except: set_error() rr = ldns.ldns_rr.new_frm_str("test 600 IN NS 8.8.8.8") try: ret = resolver.push_nameserver_rr(rr) if ret == ldns.LDNS_STATUS_OK: set_error() except: set_error() try: ret = resolver.push_nameserver_rr("bad argument") set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".push_nameserver_rr_list()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") rrl = ldns.ldns_rr_list.new() rr = ldns.ldns_rr.new_frm_str("test 600 IN A 127.0.0.1") rrl.push_rr(rr) try: ret = resolver.push_nameserver_rr_list(rrl) if ret != ldns.LDNS_STATUS_OK: set_error() except: set_error() rrl = ldns.ldns_rr_list.new() rr = ldns.ldns_rr.new_frm_str("test 600 IN AAAA ::1") rrl.push_rr(rr) try: ret = resolver.push_nameserver_rr_list(rrl) if ret != ldns.LDNS_STATUS_OK: set_error() except: set_error() rrl = ldns.ldns_rr_list.new() rr = ldns.ldns_rr.new_frm_str("test 600 IN NS 8.8.8.8") rrl.push_rr(rr) try: ret = resolver.push_nameserver_rr_list(rrl) if ret == ldns.LDNS_STATUS_OK: set_error() except: set_error() try: ret = resolver.push_nameserver_rr_list("bad argument") set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".push_searchlist()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") resolver.push_searchlist("example.com.") try: resolver.push_searchlist("example.com.") except: set_error() try: resolver.push_searchlist(1) set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".query()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: ret = resolver.query("www.nic.cz", ldns.LDNS_RR_TYPE_A, ldns.LDNS_RR_CLASS_IN, ldns.LDNS_RD) if not isinstance(ret, ldns.ldns_pkt): set_error() except: set_error() try: ret = resolver.query(1, ldns.LDNS_RR_TYPE_A, ldns.LDNS_RR_CLASS_IN, ldns.LDNS_RD) set_error() except TypeError as e: pass except: set_error() try: ret = resolver.query("www.nic.cz", "bad argument", ldns.LDNS_RR_CLASS_IN, ldns.LDNS_RD) set_error() except TypeError as e: pass except: set_error() try: ret = resolver.query("www.nic.cz", ldns.LDNS_RR_TYPE_A, "bad argument", ldns.LDNS_RD) set_error() except TypeError as e: pass except: set_error() try: ret = resolver.query("www.nic.cz", ldns.LDNS_RR_TYPE_A, ldns.LDNS_RR_CLASS_IN, "bad argument") set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".random()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") resolver.set_random(False) try: ret = resolver.random() if not isinstance(ret, bool): set_error() if ret != False: set_error() except: set_error() resolver.set_random(True) try: ret = resolver.random() if not isinstance(ret, bool): set_error() if ret != True: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".recursive()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") resolver.set_recursive(False) try: ret = resolver.recursive() if not isinstance(ret, bool): set_error() if ret != False: set_error() except: set_error() resolver.set_recursive(True) try: ret = resolver.recursive() if not isinstance(ret, bool): set_error() if ret != True: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".retrans()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") resolver.set_retrans(127) try: ret = resolver.retrans() if (not isinstance(ret, int)) and (not isinstance(ret, long)): set_error() if ret != 127: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".retry()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") resolver.set_retry(4) try: ret = resolver.retry() if (not isinstance(ret, int)) and (not isinstance(ret, long)): set_error() if ret != 4: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".rtt()" sys.stderr.write("%s not tested.\n" % (method_name)) #if not error_detected: if True: method_name = class_name + ".search()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: ret = resolver.search("www.nic.cz", ldns.LDNS_RR_TYPE_A, ldns.LDNS_RR_CLASS_IN, ldns.LDNS_RD) if not isinstance(ret, ldns.ldns_pkt): set_error() except: set_error() try: ret = resolver.search(1, ldns.LDNS_RR_TYPE_A, ldns.LDNS_RR_CLASS_IN, ldns.LDNS_RD) set_error() except TypeError as e: pass except: set_error() try: ret = resolver.search("www.nic.cz", "bad argument", ldns.LDNS_RR_CLASS_IN, ldns.LDNS_RD) set_error() except TypeError as e: pass except: set_error() try: ret = resolver.search("www.nic.cz", ldns.LDNS_RR_TYPE_A, "bad argument", ldns.LDNS_RD) set_error() except TypeError as e: pass except: set_error() try: ret = resolver.search("www.nic.cz", ldns.LDNS_RR_TYPE_A, ldns.LDNS_RR_CLASS_IN, "bad argument") set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".searchlist()" sys.stderr.write("%s not tested.\n" % (method_name)) #if not error_detected: if True: method_name = class_name + ".searchlist_count()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: ret = resolver.searchlist_count() if (not isinstance(ret, int)) and (not isinstance(ret, long)): set_error() if ret != 0: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".send()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: ret = resolver.send("www.nic.cz", ldns.LDNS_RR_TYPE_A, ldns.LDNS_RR_CLASS_IN, ldns.LDNS_RD) if not isinstance(ret, ldns.ldns_pkt): set_error() except: set_error() try: ret = resolver.send(1, ldns.LDNS_RR_TYPE_A, ldns.LDNS_RR_CLASS_IN, ldns.LDNS_RD) set_error() except TypeError as e: pass except: set_error() try: ret = resolver.send("www.nic.cz", "bad argument", ldns.LDNS_RR_CLASS_IN, ldns.LDNS_RD) set_error() except TypeError as e: pass except: set_error() try: ret = resolver.send("www.nic.cz", ldns.LDNS_RR_TYPE_A, "bad argument", ldns.LDNS_RD) set_error() except TypeError as e: pass except: set_error() try: ret = resolver.send("www.nic.cz", ldns.LDNS_RR_TYPE_A, ldns.LDNS_RR_CLASS_IN, "bad argument") set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".send_pkt()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") pkt = ldns.ldns_pkt.new_query_frm_str("test.nic.cz",ldns.LDNS_RR_TYPE_ANY, ldns.LDNS_RR_CLASS_IN, ldns.LDNS_RD | ldns.LDNS_AD) try: status, ret = resolver.send_pkt(pkt) if status != ldns.LDNS_STATUS_OK: ste_error() if not isinstance(ret, ldns.ldns_pkt): set_error() except: set_error() try: status, ret = resolver.send_pkt("bad argument") set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".set_debug()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: resolver.set_debug(False) ret = resolver.debug() if not isinstance(ret, bool): set_error() if ret != False: set_error() except: set_error() try: resolver.set_debug(True) ret = resolver.debug() if not isinstance(ret, bool): set_error() if ret != True: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".set_defnames()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: resolver.set_defnames(False) ret = resolver.defnames() if not isinstance(ret, bool): set_error() if ret != False: set_error() except: set_error() try: resolver.set_defnames(True) ret = resolver.defnames() if not isinstance(ret, bool): set_error() if ret != True: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".set_dnsrch()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: resolver.set_dnsrch(False) ret = resolver.dnsrch() if not isinstance(ret, bool): set_error() if ret != False: set_error() except: set_error() try: resolver.set_dnsrch(True) ret = resolver.dnsrch() if not isinstance(ret, bool): set_error() if ret != True: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".set_dnssec()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: resolver.set_dnssec(False) ret = resolver.dnssec() if not isinstance(ret, bool): set_error() if ret != False: set_error() except: set_error() try: resolver.set_dnssec(True) ret = resolver.dnssec() if not isinstance(ret, bool): set_error() if ret != True: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".set_dnssec_anchors()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") rrl = ldns.ldns_rr_list.new() try: resolver.set_dnssec_anchors(rrl) ret = resolver.dnssec_anchors() if not isinstance(ret, ldns.ldns_rr_list): set_error() except: set_error() try: resolver.set_dnssec_anchors("bad argument") set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".set_dnssec_cd()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: resolver.set_dnssec_cd(False) ret = resolver.dnssec_cd() if not isinstance(ret, bool): set_error() if ret != False: set_error() except: set_error() try: resolver.set_dnssec_cd(True) ret = resolver.dnssec_cd() if not isinstance(ret, bool): set_error() if ret != True: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".set_domain()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: resolver.set_domain(None) ret = resolver.domain() if ret != None: set_error() except: set_error() dname = ldns.ldns_dname("example.com.") try: resolver.set_domain(dname) ret = resolver.domain() if not isinstance(ret, ldns.ldns_dname): set_error() if ret != dname: set_error() except: set_error() rdf = ldns.ldns_rdf.new_frm_str("example.com.", ldns.LDNS_RDF_TYPE_DNAME) try: resolver.set_domain(rdf) ret = resolver.domain() if not isinstance(ret, ldns.ldns_rdf): set_error() if ret != dname: set_error() except: set_error() resolver.set_domain("example.com.") try: resolver.set_domain("example.com.") ret = resolver.domain() if not isinstance(ret, ldns.ldns_dname): set_error() if ret != dname: set_error() except: set_error() rdf = ldns.ldns_rdf.new_frm_str("127.0.0.1", ldns.LDNS_RDF_TYPE_A) try: resolver.set_domain(rdf) set_error() except Exception as e: pass except: set_error() try: resolver.set_domain(1) set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".set_edns_udp_size()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: resolver.set_edns_udp_size(4096) ret = resolver.edns_udp_size() if (not isinstance(ret, int)) and (not isinstance(ret, long)): set_error() if ret != 4096: set_error() except: set_error() try: resolver.set_edns_udp_size("bad argument") set_error() except TypeError as e: pass except: ste_error() #if not error_detected: if True: method_name = class_name + ".set_fail()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: resolver.set_fail(False) ret = resolver.fail() if not isinstance(ret, bool): set_error() if ret != False: set_error() except: set_error() try: resolver.set_fail(True) ret = resolver.fail() if not isinstance(ret, bool): set_error() if ret != True: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".set_fallback()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: resolver.set_fallback(False) ret = resolver.fallback() if not isinstance(ret, bool): set_error() if ret != False: set_error() except: set_error() try: resolver.set_fallback(True) ret = resolver.fallback() if not isinstance(ret, bool): set_error() if ret != True: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".set_igntc()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: resolver.set_igntc(False) ret = resolver.igntc() if not isinstance(ret, bool): set_error() if ret != False: set_error() except: set_error() try: resolver.set_igntc(True) ret = resolver.igntc() if not isinstance(ret, bool): set_error() if ret != True: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".set_ip6()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: resolver.set_ip6(1) ret = resolver.ip6() if (not isinstance(ret, int)) and (not isinstance(ret, long)): set_error() if ret != 1: set_error() except: set_error() try: resolver.set_ip6("bad argument") set_error() except TypeError as e: pass except: ste_error() #if not error_detected: if True: method_name = class_name + ".set_nameserver_count()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: resolver.set_nameserver_count(2) ret = resolver.nameserver_count() if (not isinstance(ret, int)) and (not isinstance(ret, long)): set_error() if ret != 2: set_error() except: set_error() try: resolver.set_nameserver_count("bad argument") set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".set_nameserver_rtt()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") num = resolver.nameserver_count() try: for i in range(0, num): resolver.set_nameserver_rtt(i, i + 1) ret = resolver.nameserver_rtt(i) if (not isinstance(ret, int)) and (not isinstance(ret, long)): set_error() if (i + 1) != ret: set_error() except: set_error() try: ret = resolver.set_nameserver_rtt("bad argument", 0) set_error() except TypeError as e: pass except: set_error() try: ret = resolver.set_nameserver_rtt(0, "bad argument") set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".set_nameservers()" sys.stderr.write("%s not tested.\n" % (method_name)) #if not error_detected: if True: method_name = class_name + ".set_port()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: resolver.set_port(12345) ret = resolver.port() if (not isinstance(ret, int)) and (not isinstance(ret, long)): set_error() if ret != 12345: set_error() except: set_error() try: resolver.set_port("bad argument") set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".set_random()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: resolver.set_random(False) ret = resolver.random() if not isinstance(ret, bool): set_error() if ret != False: set_error() except: set_error() try: resolver.set_random(True) ret = resolver.random() if not isinstance(ret, bool): set_error() if ret != True: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".set_recursive()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: resolver.set_recursive(False) ret = resolver.recursive() if not isinstance(ret, bool): set_error() if ret != False: set_error() except: set_error() try: resolver.set_recursive(True) ret = resolver.recursive() if not isinstance(ret, bool): set_error() if ret != True: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".set_retrans()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: resolver.set_retrans(127) ret = resolver.retrans() if (not isinstance(ret, int)) and (not isinstance(ret, long)): set_error() if ret != 127: set_error() except: set_error() try: resolver.set_retrans("bad argument") set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".set_retry()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: resolver.set_retry(4) ret = resolver.retry() if (not isinstance(ret, int)) and (not isinstance(ret, long)): set_error() if ret != 4: set_error() except: set_error() try: resolver.set_retry("bad argument") set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".set_rtt()" sys.stderr.write("%s not tested.\n" % (method_name)) #if not error_detected: if True: method_name = class_name + ".set_timeout()" sys.stderr.write("%s not tested.\n" % (method_name)) #if not error_detected: if True: method_name = class_name + ".set_tsig_algorithm()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") tsigstr = "hmac-md5.sig-alg.reg.int." try: resolver.set_tsig_algorithm(tsigstr) ret = resolver.tsig_algorithm() if not isinstance(ret, str): set_error() if ret != tsigstr: set_error() except: set_error() try: resolver.set_tsig_algorithm(1) set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".set_tsig_keydata()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") tkdstr = "Humpty Dumpty sat on a wall, Humpty Dumpty had a great fall, All the King's horses and all the King's men, Couldn't put Humpty together again." try: resolver.set_tsig_keydata(tkdstr) ret = resolver.tsig_keydata() if not isinstance(ret, str): set_error() if ret != tkdstr: set_error() except: set_error() try: resolver.set_tsig_keydata(1) set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".set_tsig_keyname()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") tknstr = "key 1" try: resolver.set_tsig_keyname(tknstr) ret = resolver.tsig_keyname() if not isinstance(ret, str): set_error() if ret != tknstr: set_error() except: set_error() try: resolver.set_tsig_keyname(1) set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".set_usevc()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: resolver.set_usevc(False) ret = resolver.usevc() if not isinstance(ret, bool): set_error() if ret != False: set_error() except: set_error() try: resolver.set_usevc(True) ret = resolver.usevc() if not isinstance(ret, bool): set_error() if ret != True: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".timeout()" sys.stderr.write("%s not tested.\n" % (method_name)) #if not error_detected: if True: method_name = class_name + ".trusted_key()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") key = ldns.ldns_key.new_frm_algorithm(ldns.LDNS_SIGN_DSA, 512) domain = ldns.ldns_dname("example.") key.set_pubkey_owner(domain) pubkey = key.key_to_rr() ds = ldns.ldns_key_rr2ds(pubkey, ldns.LDNS_SHA1) resolver.push_dnssec_anchor(ds) rrl = ldns.ldns_rr_list.new() try: ret = resolver.trusted_key(rrl) if ret != None: set_error() except: set_error() rrl.push_rr(ds) ret = resolver.trusted_key(rrl) try: ret = resolver.trusted_key(rrl) if not isinstance(ret, ldns.ldns_rr_list): set_error() if ret.rr_count() != 1: set_error() except: set_error() try: ret = resolver.trusted_key("bad argument") set_error() except TypeError as e: pass except: set_error() #if not error_detected: if True: method_name = class_name + ".tsig_algorithm()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: ret = resolver.tsig_algorithm() if ret != None: set_error() except: set_error() tsigstr = "hmac-md5.sig-alg.reg.int." resolver.set_tsig_algorithm(tsigstr) try: ret = resolver.tsig_algorithm() if not isinstance(ret, str): set_error() if ret != tsigstr: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".tsig_keydata()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: ret = resolver.tsig_keydata() if ret != None: set_error() except: set_error() tkdstr = "Twas brillig, and the slithy toves Did gyre and gimble in the wabe; All mimsy were the borogoves, And the mome raths outgrabe." resolver.set_tsig_keydata(tkdstr) try: ret = resolver.tsig_keydata() if not isinstance(ret, str): set_error() if ret != tkdstr: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".tsig_keyname()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") try: ret = resolver.tsig_keyname() if ret != None: set_error() except: set_error() tknstr = "key 2" resolver.set_tsig_keyname(tknstr) try: ret = resolver.tsig_keyname() if not isinstance(ret, str): set_error() if ret != tknstr: set_error() except: set_error() #if not error_detected: if True: method_name = class_name + ".usevc()" resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf") resolver.set_usevc(False) try: ret = resolver.usevc() if not isinstance(ret, bool): set_error() if ret != False: set_error() except: set_error() resolver.set_usevc(True) try: ret = resolver.usevc() if not isinstance(ret, bool): set_error() if ret != True: set_error() except: set_error() if not error_detected: sys.stdout.write("%s: passed.\n" % (os.path.basename(__file__))) else: sys.stdout.write("%s: errors detected.\n" % (os.path.basename(__file__))) sys.exit(1)
{ "content_hash": "299a5d4e3bb2d8c9b872175e5ed1d5f5", "timestamp": "", "source": "github", "line_count": 1741, "max_line_length": 157, "avg_line_length": 25.9075244112579, "alnum_prop": 0.5575878505708901, "repo_name": "mbuij/ldns-cga-tsig", "id": "274a4d87364f225a3b75e9158affc090c494da77", "size": "45261", "binary": false, "copies": "8", "ref": "refs/heads/master", "path": "contrib/python/examples/test_resolver.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "1773128" }, { "name": "C++", "bytes": "32132" }, { "name": "CSS", "bytes": "9080" }, { "name": "Emacs Lisp", "bytes": "125" }, { "name": "Lua", "bytes": "9777" }, { "name": "Perl", "bytes": "9272" }, { "name": "Python", "bytes": "1190935" }, { "name": "Shell", "bytes": "48704" }, { "name": "VimL", "bytes": "19597" } ], "symlink_target": "" }
"""Nodes for PPAPI IDL AST""" # # IDL Node # # IDL Node defines the IDLAttribute and IDLNode objects which are constructed # by the parser as it processes the various 'productions'. The IDLAttribute # objects are assigned to the IDLNode's property dictionary instead of being # applied as children of The IDLNodes, so they do not exist in the final tree. # The AST of IDLNodes is the output from the parsing state and will be used # as the source data by the various generators. # import hashlib import sys from idl_log import ErrOut, InfoOut, WarnOut from idl_propertynode import IDLPropertyNode from idl_namespace import IDLNamespace from idl_release import IDLRelease, IDLReleaseMap # IDLAttribute # # A temporary object used by the parsing process to hold an Extended Attribute # which will be passed as a child to a standard IDLNode. # class IDLAttribute(object): def __init__(self, name, value): self.cls = 'ExtAttribute' self.name = name self.value = value def __str__(self): return '%s=%s' % (self.name, self.value) # # IDLNode # # This class implements the AST tree, providing the associations between # parents and children. It also contains a namepsace and propertynode to # allow for look-ups. IDLNode is derived from IDLRelease, so it is # version aware. # class IDLNode(IDLRelease): # Set of object IDLNode types which have a name and belong in the namespace. NamedSet = set(['Enum', 'EnumItem', 'File', 'Function', 'Interface', 'Member', 'Param', 'Struct', 'Type', 'Typedef']) show_versions = False def __init__(self, cls, filename, lineno, pos, children=None): # Initialize with no starting or ending Version IDLRelease.__init__(self, None, None) self.cls = cls self.lineno = lineno self.pos = pos self.filename = filename self.filenode = None self.hashes = {} self.deps = {} self.errors = 0 self.namespace = None self.typelist = None self.parent = None self.property_node = IDLPropertyNode() # A list of unique releases for this node self.releases = None # A map from any release, to the first unique release self.first_release = None # self.children is a list of children ordered as defined self.children = [] # Process the passed in list of children, placing ExtAttributes into the # property dictionary, and nodes into the local child list in order. In # addition, add nodes to the namespace if the class is in the NamedSet. if not children: children = [] for child in children: if child.cls == 'ExtAttribute': self.SetProperty(child.name, child.value) else: self.AddChild(child) # # String related functions # # # Return a string representation of this node def __str__(self): name = self.GetName() ver = IDLRelease.__str__(self) if name is None: name = '' if not IDLNode.show_versions: ver = '' return '%s(%s%s)' % (self.cls, name, ver) # Return file and line number for where node was defined def Location(self): return '%s(%d)' % (self.filename, self.lineno) # Log an error for this object def Error(self, msg): self.errors += 1 ErrOut.LogLine(self.filename, self.lineno, 0, ' %s %s' % (str(self), msg)) if self.filenode: errcnt = self.filenode.GetProperty('ERRORS', 0) self.filenode.SetProperty('ERRORS', errcnt + 1) # Log a warning for this object def Warning(self, msg): WarnOut.LogLine(self.filename, self.lineno, 0, ' %s %s' % (str(self), msg)) def GetName(self): return self.GetProperty('NAME') def GetNameVersion(self): name = self.GetProperty('NAME', default='') ver = IDLRelease.__str__(self) return '%s%s' % (name, ver) # Dump this object and its children def Dump(self, depth=0, comments=False, out=sys.stdout): if self.cls in ['Comment', 'Copyright']: is_comment = True else: is_comment = False # Skip this node if it's a comment, and we are not printing comments if not comments and is_comment: return tab = ''.rjust(depth * 2) if is_comment: out.write('%sComment\n' % tab) for line in self.GetName().split('\n'): out.write('%s "%s"\n' % (tab, line)) else: ver = IDLRelease.__str__(self) if self.releases: release_list = ': ' + ' '.join(self.releases) else: release_list = ': undefined' out.write('%s%s%s%s\n' % (tab, self, ver, release_list)) if self.typelist: out.write('%s Typelist: %s\n' % (tab, self.typelist.GetReleases()[0])) properties = self.property_node.GetPropertyList() if properties: out.write('%s Properties\n' % tab) for p in properties: if is_comment and p == 'NAME': # Skip printing the name for comments, since we printed above already continue out.write('%s %s : %s\n' % (tab, p, self.GetProperty(p))) for child in self.children: child.Dump(depth+1, comments=comments, out=out) # # Search related functions # # Check if node is of a given type def IsA(self, *typelist): if self.cls in typelist: return True return False # Get a list of objects for this key def GetListOf(self, *keys): out = [] for child in self.children: if child.cls in keys: out.append(child) return out def GetOneOf(self, *keys): out = self.GetListOf(*keys) if out: return out[0] return None def SetParent(self, parent): self.property_node.AddParent(parent) self.parent = parent def AddChild(self, node): node.SetParent(self) self.children.append(node) # Get a list of all children def GetChildren(self): return self.children # Get a list of all children of a given version def GetChildrenVersion(self, version): out = [] for child in self.children: if child.IsVersion(version): out.append(child) return out # Get a list of all children in a given range def GetChildrenRange(self, vmin, vmax): out = [] for child in self.children: if child.IsRange(vmin, vmax): out.append(child) return out def FindVersion(self, name, version): node = self.namespace.FindNode(name, version) if not node and self.parent: node = self.parent.FindVersion(name, version) return node def FindRange(self, name, vmin, vmax): nodes = self.namespace.FindNodes(name, vmin, vmax) if not nodes and self.parent: nodes = self.parent.FindVersion(name, vmin, vmax) return nodes def GetType(self, release): if not self.typelist: return None return self.typelist.FindRelease(release) def GetHash(self, release): hashval = self.hashes.get(release, None) if hashval is None: hashval = hashlib.sha1() hashval.update(self.cls) for key in self.property_node.GetPropertyList(): val = self.GetProperty(key) hashval.update('%s=%s' % (key, str(val))) typeref = self.GetType(release) if typeref: hashval.update(typeref.GetHash(release)) for child in self.GetChildren(): if child.IsA('Copyright', 'Comment', 'Label'): continue if not child.IsRelease(release): continue hashval.update( child.GetHash(release) ) self.hashes[release] = hashval return hashval.hexdigest() def GetDeps(self, release): deps = self.deps.get(release, None) if deps is None: deps = set([self]) for child in self.GetChildren(): deps |= child.GetDeps(release) typeref = self.GetType(release) if typeref: deps |= typeref.GetDeps(release) self.deps[release] = deps return deps def GetVersion(self, release): filenode = self.GetProperty('FILE') if not filenode: return None return filenode.release_map.GetVersion(release) def GetUniqueReleases(self, releases): my_min, my_max = self.GetMinMax(releases) if my_min > releases[-1] or my_max < releases[0]: return [] out = set() for rel in releases: remapped = self.first_release[rel] if not remapped: continue if remapped < releases[0]: remapped = releases[0] out |= set([remapped]) out = sorted(out) return out def GetRelease(self, version): filenode = self.GetProperty('FILE') if not filenode: return None return filenode.release_map.GetRelease(version) def _GetReleases(self, releases): if not self.releases: my_min, my_max = self.GetMinMax(releases) my_releases = [my_min] if my_max != releases[-1]: my_releases.append(my_max) my_releases = set(my_releases) for child in self.GetChildren(): if child.IsA('Copyright', 'Comment', 'Label'): continue my_releases |= child.GetReleases(releases) self.releases = my_releases return self.releases def _GetReleaseList(self, releases): if not self.releases: # If we are unversionable, then return first available release if self.IsA('Comment', 'Copyright', 'Label'): self.releases = [] return self.releases # Generate the first and if deprecated within this subset, the # last release for this node my_min, my_max = self.GetMinMax(releases) if my_max != releases[-1]: my_releases = set([my_min, my_max]) else: my_releases = set([my_min]) # Files inherit all there releases from items in the file if self.IsA('AST', 'File'): my_releases = set() child_releases = set() for child in self.children: child_releases |= set(child._GetReleaseList(releases)) type_releases = set() if self.typelist: type_list = self.typelist.GetReleases() for typenode in type_list: type_releases |= set(typenode._GetReleaseList(releases)) type_release_list = sorted(type_releases) if my_min < type_release_list[0]: type_node = type_list[0] self.Error('requires %s in %s which is undefined at %s.' % ( type_node, type_node.filename, my_min)) for rel in child_releases: if rel >= my_min and rel <= my_max: my_releases |= set([rel]) self.releases = sorted(my_releases) return self.releases def GetReleaseList(self): return self.releases def BuildReleaseMap(self, releases): unique_list = self._GetReleaseList(releases) my_min, my_max = self.GetMinMax(releases) self.first_release = {} last_rel = None for rel in releases: if rel in unique_list: last_rel = rel self.first_release[rel] = last_rel if rel == my_max: last_rel = None def SetProperty(self, name, val): self.property_node.SetProperty(name, val) def GetProperty(self, name, default=None): return self.property_node.GetProperty(name, default) def Traverse(self, data, func): func(self, data) for child in self.children: child.Traverse(data, func) # # IDLFile # # A specialized version of IDLNode which tracks errors and warnings. # class IDLFile(IDLNode): def __init__(self, name, children, errors=0): attrs = [IDLAttribute('NAME', name), IDLAttribute('ERRORS', errors)] if not children: children = [] IDLNode.__init__(self, 'File', name, 1, 0, attrs + children) self.release_map = IDLReleaseMap([('M13', 1.0)]) # # Tests # def StringTest(): errors = 0 name_str = 'MyName' text_str = 'MyNode(%s)' % name_str name_node = IDLAttribute('NAME', name_str) node = IDLNode('MyNode', 'no file', 1, 0, [name_node]) if node.GetName() != name_str: ErrOut.Log('GetName returned >%s< not >%s<' % (node.GetName(), name_str)) errors += 1 if node.GetProperty('NAME') != name_str: ErrOut.Log('Failed to get name property.') errors += 1 if str(node) != text_str: ErrOut.Log('str() returned >%s< not >%s<' % (str(node), text_str)) errors += 1 if not errors: InfoOut.Log('Passed StringTest') return errors def ChildTest(): errors = 0 child = IDLNode('child', 'no file', 1, 0) parent = IDLNode('parent', 'no file', 1, 0, [child]) if child.parent != parent: ErrOut.Log('Failed to connect parent.') errors += 1 if [child] != parent.GetChildren(): ErrOut.Log('Failed GetChildren.') errors += 1 if child != parent.GetOneOf('child'): ErrOut.Log('Failed GetOneOf(child)') errors += 1 if parent.GetOneOf('bogus'): ErrOut.Log('Failed GetOneOf(bogus)') errors += 1 if not parent.IsA('parent'): ErrOut.Log('Expecting parent type') errors += 1 parent = IDLNode('parent', 'no file', 1, 0, [child, child]) if [child, child] != parent.GetChildren(): ErrOut.Log('Failed GetChildren2.') errors += 1 if not errors: InfoOut.Log('Passed ChildTest') return errors def Main(): errors = StringTest() errors += ChildTest() if errors: ErrOut.Log('IDLNode failed with %d errors.' % errors) return -1 return 0 if __name__ == '__main__': sys.exit(Main())
{ "content_hash": "501cf9b11985e94673ac7d2966ee14f1", "timestamp": "", "source": "github", "line_count": 450, "max_line_length": 79, "avg_line_length": 29.031111111111112, "alnum_prop": 0.6396968769136558, "repo_name": "junmin-zhu/chromium-rivertrail", "id": "34c4d22d4190e60442596fcf87d7c87f58346395", "size": "13253", "binary": false, "copies": "1", "ref": "refs/heads/v8-binding", "path": "ppapi/generators/idl_node.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "ASP", "bytes": "853" }, { "name": "AppleScript", "bytes": "6973" }, { "name": "Arduino", "bytes": "464" }, { "name": "Assembly", "bytes": "1172794" }, { "name": "Awk", "bytes": "9519" }, { "name": "C", "bytes": "75806807" }, { "name": "C#", "bytes": "1132" }, { "name": "C++", "bytes": "145161929" }, { "name": "DOT", "bytes": "1559" }, { "name": "F#", "bytes": "381" }, { "name": "Java", "bytes": "1546515" }, { "name": "JavaScript", "bytes": "18675242" }, { "name": "Logos", "bytes": "4517" }, { "name": "Matlab", "bytes": "5234" }, { "name": "Objective-C", "bytes": "6981387" }, { "name": "PHP", "bytes": "97817" }, { "name": "Perl", "bytes": "926245" }, { "name": "Python", "bytes": "8088373" }, { "name": "R", "bytes": "262" }, { "name": "Ragel in Ruby Host", "bytes": "3239" }, { "name": "Shell", "bytes": "1513486" }, { "name": "Tcl", "bytes": "277077" }, { "name": "XML", "bytes": "13493" } ], "symlink_target": "" }
import visa from re import match class Agilent81150: def __init__(self, gpib): ''' Initialize device ''' self.device = visa.instrument("GPIB::%d" %(gpib)) if (not self.__TestConnection()): print "No function generator on this GPIB channel..." return None else: print "Function generator found" def __TestConnection(self): ''' Test if we have the right device by matching id number ''' id = self.device.ask("*IDN?") if (match(".*,81150A,.*", id)): found = True else: found = False return found def write(self, command): ''' Connect to VISA write ''' self.device.write(command) def read(self): ''' Connect to VISA read ''' return self.device.read() def ask(self, command): ''' Connect to VISA ask ''' return self.device.ask(command)
{ "content_hash": "3db3cb510614bdd6440dc8c0a1cb90c6", "timestamp": "", "source": "github", "line_count": 36, "max_line_length": 70, "avg_line_length": 27.083333333333332, "alnum_prop": 0.5241025641025641, "repo_name": "imrehg/labhardware", "id": "6ac3e9dd43e8ee449b8517fb4626c8e770fc6363", "size": "975", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "drivers/agilent81150.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "535761" }, { "name": "Shell", "bytes": "348" } ], "symlink_target": "" }
from snovault import ( AuditFailure, audit_checker, ) from .formatter import ( audit_link, path_to_text, ) @audit_checker('Annotation', frame='object') def audit_annotation_organism(value, system): ''' Annotations need their organism to be specified This should eventually go to required schema element ''' if value['status'] in ['replaced', 'revoked', 'deleted']: return if 'organism' not in value: detail = ('Annotation {} lacks organism information.'.format(audit_link(path_to_text(value['@id']), value['@id']))) raise AuditFailure('missing organism', detail, level='INTERNAL_ACTION') @audit_checker('Annotation') def audit_annotation_derived_from_revoked(value, system): ''' Annotations with files derived from a revoked file should be flagged with an audit. ''' request = system.get('request') for file in value.get('files'): if 'derived_from' in file: for f in file['derived_from']: parent = request.embed(f + '@@object?skip_calculated=true') if parent['status'] == 'revoked': detail = ( f'Annotation {audit_link(path_to_text(value["@id"]), value["@id"])} ' f'includes a file {audit_link(path_to_text(file["@id"]), file["@id"])} ' f'that was derived from a revoked file {audit_link(path_to_text(f), f)}.' ) raise AuditFailure('derived from revoked file', detail, level='WARNING') @audit_checker('Annotation', frame='object') def audit_annotation_missing_original_files(value, system): ''' Annotations should have original_files present ''' if value['status'] in ['replaced', 'revoked', 'deleted']: return original_files = value.get('original_files', []) if original_files == []: detail = f'Annotation {audit_link(path_to_text(value["@id"]), value["@id"])} has no original_files submitted.' raise AuditFailure('missing original_files', detail, level='INTERNAL_ACTION')
{ "content_hash": "604fed279eede5aeb0477b6366040ade", "timestamp": "", "source": "github", "line_count": 55, "max_line_length": 123, "avg_line_length": 38.14545454545455, "alnum_prop": 0.6101048617731173, "repo_name": "ENCODE-DCC/encoded", "id": "a26f7ce0790a46c756a5827d2cfa994838b6bef5", "size": "2098", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "src/encoded/audit/annotation.py", "mode": "33188", "license": "mit", "language": [ { "name": "AngelScript", "bytes": "741" }, { "name": "Dockerfile", "bytes": "1988" }, { "name": "Gherkin", "bytes": "48806" }, { "name": "HTML", "bytes": "371973" }, { "name": "JavaScript", "bytes": "3493156" }, { "name": "Jsonnet", "bytes": "15159" }, { "name": "Makefile", "bytes": "875" }, { "name": "Mako", "bytes": "494" }, { "name": "Python", "bytes": "2845978" }, { "name": "SCSS", "bytes": "403800" }, { "name": "Shell", "bytes": "30525" } ], "symlink_target": "" }
import pandas as pd import io import unittest from collections import OrderedDict from skbio import TabularMSA, Protein, DNA, RNA from skbio.io import StockholmFormatError from skbio.io.format.stockholm import (_stockholm_to_tabular_msa, _tabular_msa_to_stockholm, _stockholm_sniffer) from skbio.util import get_data_path class TestStockholmSniffer(unittest.TestCase): def setUp(self): self.positives = [get_data_path(e) for e in [ 'stockholm_extensive', 'stockholm_minimal', 'stockholm_rna', 'stockholm_runon_gf_with_whitespace', 'stockholm_runon_gf_no_whitespace', 'stockholm_duplicate_sequence_names', 'stockholm_duplicate_gr', 'stockholm_duplicate_gc', 'stockholm_invalid_nonexistent_gr', 'stockholm_invalid_nonexistent_gs', 'stockholm_no_data', 'stockholm_blank_lines', 'stockholm_differing_gc_data_length', 'stockholm_differing_gr_data_length', 'stockholm_differing_seq_lengths', 'stockholm_duplicate_sequence_names', 'stockholm_duplicate_tree_ids', 'stockholm_extensive_mixed', 'stockholm_invalid_data_type', 'stockholm_malformed_gf_line', 'stockholm_malformed_gs_line', 'stockholm_malformed_gr_line', 'stockholm_malformed_gc_line', 'stockholm_malformed_data_line', 'stockholm_metadata_only', 'stockholm_multiple_msa', 'stockholm_multiple_trees', 'stockholm_runon_gs_with_whitespace', 'stockholm_runon_gs_no_whitespace', 'stockholm_single_tree_with_id', 'stockholm_single_tree_without_id', 'stockholm_whitespace_only_lines', 'stockholm_all_data_types', 'stockholm_two_of_each_metadata', 'stockholm_data_only', 'stockholm_nonstring_labels', 'stockholm_missing_reference_items', 'stockholm_multiple_references', 'stockholm_runon_references', 'stockholm_runon_references_mixed', 'stockholm_single_reference', 'stockholm_missing_reference_items', 'stockholm_missing_rn_tag', 'stockholm_different_padding', 'stockholm_multi_line_tree_no_id', 'stockholm_multi_line_tree_with_id', 'stockholm_multiple_multi_line_trees' ]] self.negatives = [get_data_path(e) for e in [ 'stockholm_missing_header', 'empty', 'whitespace_only' ]] def test_positives(self): for fp in self.positives: self.assertEqual(_stockholm_sniffer(fp), (True, {})) def test_negatives(self): for fp in self.negatives: self.assertEqual(_stockholm_sniffer(fp), (False, {})) class TestStockholmReader(unittest.TestCase): def test_stockholm_extensive(self): fp = get_data_path('stockholm_extensive') msa = _stockholm_to_tabular_msa(fp, constructor=Protein) exp = TabularMSA([Protein('MTCRAQLIAVPRASSLAE..AIACAQKM....' 'RVSRVPVYERS', positional_metadata={'SA': list('9998877564' '53524252..' '55152525..' '..36463774' '777')}), Protein('EVMLTDIPRLHINDPIMK..GFGMVINN....' '..GFVCVENDE', metadata={'OS': 'Bacillus subtilis'}, positional_metadata={'SS': list('CCCCCCCHHHH' 'HHHHHHH..HE' 'EEEEEE....E' 'EEEEEE' 'EEEH')}), Protein('EVMLTDIPRLHINDPIMK..GFGMVINN...' '...GFVCVENDE', positional_metadata={'AS': list('___________' '_____*_____' '___________' '________' '__'), 'IN': list('___________' '_1_________' '_____2_____' '_____0_' '___')})], metadata={'ID': 'CBS', 'AC': 'PF00571', 'AU': 'Bateman A', 'SQ': '67'}, positional_metadata={'SS_cons': list('CCCCCHHHHHHHH' 'HHHHH..EEEEEE' 'EE....EEEEEEE' 'EEEH')}, index=['O83071/192-246', 'O31698/88-139', 'O31699/88-139']) self.assertEqual(msa, exp) def test_stockholm_extensive_mixed(self): fp = get_data_path('stockholm_extensive_mixed') msa = _stockholm_to_tabular_msa(fp, constructor=Protein) exp = TabularMSA([Protein('MTCRAQLIAVPRASSLAE..AIACAQKM....' 'RVSRVPVYERS', positional_metadata={'SA': list('9998877564' '53524252..' '55152525..' '..36463774' '777')}), Protein('EVMLTDIPRLHINDPIMK..GFGMVINN....' '..GFVCVENDE', metadata={'OS': 'Bacillus subtilis'}, positional_metadata={'SS': list('CCCCCCCHHHH' 'HHHHHHH..HE' 'EEEEEE....E' 'EEEEEE' 'EEEH')}), Protein('EVMLTDIPRLHINDPIMK..GFGMVINN...' '...GFVCVENDE', positional_metadata={'AS': list('___________' '_____*_____' '___________' '________' '__'), 'IN': list('___________' '_1_________' '_____2_____' '_____0_' '___')})], metadata={'ID': 'CBS', 'AC': 'PF00571', 'AU': 'Bateman A', 'SQ': '67'}, positional_metadata={'SS_cons': list('CCCCCHHHHHHHH' 'HHHHH..EEEEEE' 'EE....EEEEEEE' 'EEEH')}, index=['O83071/192-246', 'O31698/88-139', 'O31699/88-139']) self.assertEqual(msa, exp) def test_stockholm_minimal(self): fp = get_data_path('stockholm_minimal') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) exp = TabularMSA([DNA('TGTGTCGCAGTTGTCGTTTG')], index=['0235244']) self.assertEqual(msa, exp) def test_stockholm_rna(self): fp = get_data_path('stockholm_rna') msa = _stockholm_to_tabular_msa(fp, constructor=RNA) exp = TabularMSA([RNA('AAGGGUUAUUUAUAUACUUU'), RNA('UGCUAAGAGUGGGGAUGAUU'), RNA('GCCACAACCGAUUAGAUAGA'), RNA('UUAGAAACCGAUGGACCGAA')], metadata={'AC': 'G2134T23', 'ID': 'ARD'}, positional_metadata=( {'AC_cons': list('GGGACUGGACAUCUAUUCAG')}), index=['RTC2231', 'RTF2124', 'RTH3322', 'RTB1512']) self.assertEqual(msa, exp) def test_stockholm_runon_gf(self): fp = get_data_path('stockholm_runon_gf_no_whitespace') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) exp = TabularMSA([DNA('ACTGGTTCAATG')], metadata={'CC': 'CBS domains are small intracellular' ' modules mostly found in 2 or four ' 'copies within a protein.'}, index=['GG1344']) self.assertEqual(msa, exp) fp = get_data_path('stockholm_runon_gf_with_whitespace') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) self.assertEqual(msa, exp) def test_stockholm_runon_gs(self): fp = get_data_path('stockholm_runon_gs_no_whitespace') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) exp = TabularMSA([DNA('ATCGTTCAGTG', metadata={'LN': 'This is a runon GS line.'})], index=['seq1']) self.assertEqual(msa, exp) fp = get_data_path('stockholm_runon_gs_with_whitespace') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) self.assertEqual(msa, exp) def test_stockholm_metadata_only(self): fp = get_data_path('stockholm_metadata_only') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) exp = TabularMSA([], metadata={'NM': 'Kestrel Gorlick', 'DT': 'February 5th, 2016'}) self.assertEqual(msa, exp) def test_stockholm_no_data(self): fp = get_data_path('stockholm_no_data') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) exp = TabularMSA([]) self.assertEqual(msa, exp) def test_stockholm_with_blank_lines(self): fp = get_data_path('stockholm_blank_lines') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) exp = TabularMSA([], metadata={'AL': 'ABCD', 'NM': '1234'}) self.assertEqual(msa, exp) def test_stockholm_with_whitespace_only_lines(self): fp = get_data_path('stockholm_whitespace_only_lines') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) exp = TabularMSA([], metadata={'AL': 'ABCD', 'NM': '1234'}) self.assertEqual(msa, exp) def test_stockholm_single_tree_without_id(self): fp = get_data_path('stockholm_single_tree_without_id') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) exp = TabularMSA([], metadata={'NH': 'ABCD'}) self.assertEqual(msa, exp) def test_stockholm_single_tree_with_id(self): fp = get_data_path('stockholm_single_tree_with_id') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) exp = TabularMSA([], metadata={'NH': {'tree1': 'ABCD'}}) self.assertEqual(msa, exp) def test_stockholm_multiple_trees(self): fp = get_data_path('stockholm_multiple_trees') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) exp = TabularMSA([], metadata={'NH': {'tree1': 'ABCD', 'tree2': 'EFGH', 'tree3': 'IJKL'}}) self.assertEqual(msa, exp) def test_stockhom_single_reference(self): fp = get_data_path('stockholm_single_reference') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) exp = TabularMSA( [], metadata={'RN': [OrderedDict([('RM', '123456789'), ('RT', 'A Title'), ('RA', 'The Author'), ('RL', 'A Location'), ('RC', 'Comment')])]}) self.assertEqual(msa, exp) def test_stockholm_multiple_references(self): fp = get_data_path('stockholm_multiple_references') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) exp = TabularMSA( [], metadata={'RN': [OrderedDict([('RM', '123456789'), ('RT', 'Title 1'), ('RA', 'Author 1'), ('RL', 'Location 1'), ('RC', 'Comment 1')]), OrderedDict([('RM', '987654321'), ('RT', 'Title 2'), ('RA', 'Author 2'), ('RL', 'Location 2'), ('RC', 'Comment 2')]), OrderedDict([('RM', '132465879'), ('RT', 'Title 3'), ('RA', 'Author 3'), ('RL', 'Location 3'), ('RC', 'Comment 3')])]}) self.assertEqual(msa, exp) def test_stockholm_runon_references(self): fp = get_data_path('stockholm_runon_references') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) exp = TabularMSA( [], metadata={'RN': [OrderedDict([('RM', '123456789'), ('RT', 'A Runon Title'), ('RA', 'The Author'), ('RL', 'A Location'), ('RC', 'A Runon Comment')])]}) self.assertEqual(msa, exp) def test_stockholm_mixed_runon_references(self): fp = get_data_path('stockholm_runon_references_mixed') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) exp = TabularMSA( [], metadata={'RN': [OrderedDict([('RC', 'A Runon Comment'), ('RM', '123456789'), ('RT', 'A Runon Title'), ('RA', 'The Author'), ('RL', 'A Location')])]}) self.assertEqual(msa, exp) def test_stockholm_to_msa_different_padding(self): fp = get_data_path('stockholm_different_padding') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) exp = TabularMSA( [], metadata={'RN': [OrderedDict([('RC', 'A Runon Comment Without ' 'Whitespace')]), OrderedDict([('RC', 'A Runon Comment With ' 'Whitespace')])]}) self.assertEqual(msa, exp) def test_stockholm_handles_missing_reference_items(self): fp = get_data_path('stockholm_missing_reference_items') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) exp = TabularMSA( [], metadata={'RN': [OrderedDict([('RT', 'A Title'), ('RA', 'The Author')])]}) self.assertEqual(msa, exp) def test_stockholm_multi_line_tree_no_id(self): fp = get_data_path('stockholm_multi_line_tree_no_id') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) exp = TabularMSA([], metadata={'NH': 'ABCDEFGH'}) self.assertEqual(msa, exp) def test_stockholm_multiple_multi_line_trees(self): fp = get_data_path('stockholm_multiple_multi_line_trees') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) exp = TabularMSA([], metadata={'NH': {'tree1': 'ABCDEFGH', 'tree2': 'IJKLMNOP'}}) self.assertEqual(msa, exp) def test_stockholm_multi_line_tree_with_id(self): fp = get_data_path('stockholm_multi_line_tree_with_id') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) exp = TabularMSA([], metadata={'NH': {'tree1': 'ABCDEFGH'}}) self.assertEqual(msa, exp) def test_multiple_msa_file(self): fp = get_data_path('stockholm_multiple_msa') msa = _stockholm_to_tabular_msa(fp, constructor=RNA) exp = TabularMSA([RNA('AAGGGUUAUUUAUAUACUUU'), RNA('UGCUAAGAGUGGGGAUGAUU'), RNA('GCCACAACCGAUUAGAUAGA'), RNA('UUAGAAACCGAUGGACCGAA')], metadata={'AC': 'G2134T23', 'ID': 'ARD'}, positional_metadata=( {'AC_cons': list('GGGACUGGACAUCUAUUCAG')}), index=['RTC2231', 'RTF2124', 'RTH3322', 'RTB1512']) self.assertEqual(msa, exp) def test_stockholm_maintains_order(self): fp = get_data_path('stockholm_two_of_each_metadata') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) msa_order = list(msa.metadata.items()) exp_order = [('NM', 'Kestrel Gorlick'), ('DT', 'February 5th, 2016')] self.assertEqual(msa_order, exp_order) msa_order = list(msa[0].metadata.items()) exp_order = [('AL', 'ABCD'), ('NS', '1234')] self.assertEqual(msa_order, exp_order) msa_order = list(msa.positional_metadata.columns) exp_order = ['SS_cons', 'AS_cons'] self.assertEqual(msa_order, exp_order) msa_order = list(msa[0].positional_metadata.columns) exp_order = ['SS', 'AS'] self.assertEqual(msa_order, exp_order) def test_stockholm_duplicate_tree_id_error(self): fp = get_data_path('stockholm_duplicate_tree_ids') with self.assertRaisesRegex(StockholmFormatError, r'Tree.*tree1.*in file.'): _stockholm_to_tabular_msa(fp, constructor=DNA) def test_stockholm_missing_reference_number_error(self): fp = get_data_path('stockholm_missing_rn_tag') with self.assertRaisesRegex(StockholmFormatError, r"Expected 'RN'.*'RL' tag."): _stockholm_to_tabular_msa(fp, constructor=DNA) def test_nonexistent_gr_error(self): fp = get_data_path('stockholm_invalid_nonexistent_gr') with self.assertRaisesRegex(StockholmFormatError, r'GS or GR.*nonexistent ' 'sequence.*RL1355.'): _stockholm_to_tabular_msa(fp, constructor=RNA) def test_nonexistent_gs_error(self): fp = get_data_path('stockholm_invalid_nonexistent_gs') with self.assertRaisesRegex(StockholmFormatError, r'GS or GR.*nonexistent sequence.*AC14.'): _stockholm_to_tabular_msa(fp, constructor=RNA) def test_duplicate_sequence_names_error(self): fp = get_data_path('stockholm_duplicate_sequence_names') with self.assertRaisesRegex( StockholmFormatError, r'duplicate sequence name.*ASR132.*supported by the reader.'): _stockholm_to_tabular_msa(fp, constructor=RNA) def test_duplicate_gr_error(self): fp = get_data_path('stockholm_duplicate_gr') with self.assertRaisesRegex(StockholmFormatError, r'Found duplicate GR.*OS.*LFDR3.*supported' ' by the reader.'): _stockholm_to_tabular_msa(fp, constructor=DNA) def test_duplicate_gc_error(self): fp = get_data_path('stockholm_duplicate_gc') with self.assertRaisesRegex(StockholmFormatError, r'Found duplicate GC.*SS_cons.*supported ' 'by the reader.'): _stockholm_to_tabular_msa(fp, constructor=DNA) def test_empty_file_error(self): fp = get_data_path('empty') with self.assertRaisesRegex(StockholmFormatError, r'File is empty.'): _stockholm_to_tabular_msa(fp, constructor=RNA) def test_missing_header_error(self): fp = get_data_path('stockholm_missing_header') with self.assertRaisesRegex(StockholmFormatError, r'File missing.*header'): _stockholm_to_tabular_msa(fp, constructor=DNA) def test_missing_footer_error(self): fp = get_data_path('stockholm_missing_footer') with self.assertRaisesRegex(StockholmFormatError, r'Final line.*only "//".'): _stockholm_to_tabular_msa(fp, constructor=DNA) def test_data_type_error(self): fp = get_data_path('stockholm_invalid_data_type') with self.assertRaisesRegex(StockholmFormatError, r"Unrecognized.*'#=GZ"): _stockholm_to_tabular_msa(fp, constructor=DNA) def test_malformed_gf_line_error(self): fp = get_data_path('stockholm_malformed_gf_line') with self.assertRaisesRegex(StockholmFormatError, r'Line contains 2.*must contain.*3.'): _stockholm_to_tabular_msa(fp, constructor=DNA) def test_malformed_gs_line_error(self): fp = get_data_path('stockholm_malformed_gs_line') with self.assertRaisesRegex(StockholmFormatError, r'Line contains 3.*must contain.*4.'): _stockholm_to_tabular_msa(fp, constructor=DNA) def test_malformed_gr_line_error(self): fp = get_data_path('stockholm_malformed_gr_line') with self.assertRaisesRegex(StockholmFormatError, r'Line contains 2.*must contain.*4.'): _stockholm_to_tabular_msa(fp, constructor=DNA) def test_malformed_gc_line_error(self): fp = get_data_path('stockholm_malformed_gc_line') with self.assertRaisesRegex(StockholmFormatError, r'Line contains 2.*must contain.*3.'): _stockholm_to_tabular_msa(fp, constructor=DNA) def test_malformed_data_line_error(self): fp = get_data_path('stockholm_malformed_data_line') with self.assertRaisesRegex(StockholmFormatError, r'Line contains 1.*must contain.*2.'): _stockholm_to_tabular_msa(fp, constructor=DNA) def test_differing_sequence_lengths_error(self): fp = get_data_path('stockholm_differing_seq_lengths') with self.assertRaisesRegex(ValueError, r'Each sequence.*11 != 10'): _stockholm_to_tabular_msa(fp, constructor=RNA) def test_differing_data_lengths_gr_error(self): fp = get_data_path('stockholm_differing_gr_data_length') with self.assertRaisesRegex(ValueError, r'Number.*7.*(8).'): _stockholm_to_tabular_msa(fp, constructor=RNA) def test_differing_data_lengths_gc_error(self): fp = get_data_path('stockholm_differing_gc_data_length') with self.assertRaisesRegex(ValueError, r'Number.*12.*(10).'): _stockholm_to_tabular_msa(fp, constructor=RNA) def test_no_constructor_error(self): fp = get_data_path('empty') with self.assertRaisesRegex(ValueError, r'Must provide.*parameter.'): _stockholm_to_tabular_msa(fp) def test_unsupported_constructor_error(self): fp = get_data_path('empty') with self.assertRaisesRegex(TypeError, r'`constructor`.*`GrammaredSequence`.'): _stockholm_to_tabular_msa(fp, constructor=TabularMSA) class TestStockholmWriter(unittest.TestCase): def test_msa_to_stockholm_extensive(self): fp = get_data_path('stockholm_all_data_types') msa = TabularMSA([DNA('GAGGCCATGCCCAGGTGAAG', metadata=OrderedDict([('DT', 'February 1, 2016'), ('NM', 'Unknown')])), DNA('ACCTGAGCCACAGTAGAAGT'), DNA('CCCTTCGCTGGAAATGTATG', metadata={'DT': 'Unknown'}, positional_metadata=OrderedDict([('AS', list('CCGAAAGT' 'CGTTCGA' 'AAATG')), ('SS', list('GGCGAGTC' 'GTTCGAGC' 'TGG' 'C'))]))], metadata=OrderedDict([('NM', 'Kestrel Gorlick'), ('DT', 'February 11, 2016'), ('FN', 'Writer test file')]), positional_metadata=OrderedDict([('AS_cons', list('CGTTCGTTCTAAC' 'AATTCCA')), ('SS_cons', list('GGCGCTACGACCT' 'ACGACCG'))]), index=['seq1', 'seq2', 'seq3']) fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) obs = fh.getvalue() fh.close() with io.open(fp) as fh: exp = fh.read() self.assertEqual(obs, exp) def test_msa_to_stockholm_minimal(self): fp = get_data_path('stockholm_minimal') msa = TabularMSA([DNA('TGTGTCGCAGTTGTCGTTTG')], index=['0235244']) fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) obs = fh.getvalue() fh.close() with io.open(fp) as fh: exp = fh.read() self.assertEqual(obs, exp) def test_msa_to_stockholm_single_tree(self): fp = get_data_path('stockholm_single_tree_without_id') msa = TabularMSA([], metadata=OrderedDict([('NH', 'ABCD')])) fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) obs = fh.getvalue() fh.close() with io.open(fp) as fh: exp = fh.read() self.assertEqual(obs, exp) def test_msa_to_stockholm_single_tree_as_dict(self): fp = get_data_path('stockholm_single_tree_with_id') msa = TabularMSA([], metadata={'NH': {'tree1': 'ABCD'}}) fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) obs = fh.getvalue() fh.close() with io.open(fp) as fh: exp = fh.read() self.assertEqual(obs, exp) def test_msa_to_stockholm_multiple_trees(self): fp = get_data_path('stockholm_multiple_trees') msa = TabularMSA([], metadata=OrderedDict([('NH', OrderedDict([('tree1', 'ABCD'), ('tree2', 'EFGH'), ('tree3', 'IJKL')]))])) fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) obs = fh.getvalue() fh.close() with io.open(fp) as fh: exp = fh.read() self.assertEqual(obs, exp) def test_msa_to_stockholm_single_reference(self): fp = get_data_path('stockholm_single_reference') msa = TabularMSA( [], metadata={'RN': [OrderedDict([('RM', '123456789'), ('RT', 'A Title'), ('RA', 'The Author'), ('RL', 'A Location'), ('RC', 'Comment')])]}) fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) obs = fh.getvalue() fh.close() with io.open(fp) as fh: exp = fh.read() self.assertEqual(obs, exp) def test_msa_to_stockholm_multiple_references(self): fp = get_data_path('stockholm_multiple_references') msa = TabularMSA( [], metadata={'RN': [OrderedDict([('RM', '123456789'), ('RT', 'Title 1'), ('RA', 'Author 1'), ('RL', 'Location 1'), ('RC', 'Comment 1')]), OrderedDict([('RM', '987654321'), ('RT', 'Title 2'), ('RA', 'Author 2'), ('RL', 'Location 2'), ('RC', 'Comment 2')]), OrderedDict([('RM', '132465879'), ('RT', 'Title 3'), ('RA', 'Author 3'), ('RL', 'Location 3'), ('RC', 'Comment 3')])]}) fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) obs = fh.getvalue() fh.close() with io.open(fp) as fh: exp = fh.read() self.assertEqual(obs, exp) def test_msa_to_stockholm_data_only(self): fp = get_data_path('stockholm_data_only') msa = TabularMSA([RNA('ACUCCGACAUGCUCC'), RNA('UAGUGCCGAACGCUG'), RNA('GUGUGGGCGUGAUUC')], index=['seq1', 'seq2', 'seq3']) fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) obs = fh.getvalue() fh.close() with io.open(fp) as fh: exp = fh.read() self.assertEqual(obs, exp) def test_msa_to_stockholm_nonstring_values(self): fp = get_data_path('stockholm_nonstring_labels') msa = TabularMSA([DNA('ACTG', metadata=OrderedDict([(8, 123)]), positional_metadata=OrderedDict([(1.0, [1, 2, 3, 4])]) )], metadata=OrderedDict([(1.3, 2857)]), positional_metadata=OrderedDict([(25, [4, 3, 2, 1])]), index=[11214]) fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) obs = fh.getvalue() fh.close() with io.open(fp) as fh: exp = fh.read() self.assertEqual(obs, exp) def test_msa_to_stockholm_empty(self): fp = get_data_path('stockholm_no_data') msa = TabularMSA([]) fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) obs = fh.getvalue() fh.close() with io.open(fp) as fh: exp = fh.read() self.assertEqual(obs, exp) def test_round_trip_extensive(self): fp = get_data_path('stockholm_extensive') msa = _stockholm_to_tabular_msa(fp, constructor=Protein) fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) obs = fh.getvalue() fh.close() with io.open(fp) as fh: exp = fh.read() self.assertEqual(obs, exp) def test_round_trip_minimal(self): fp = get_data_path('stockholm_minimal') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) obs = fh.getvalue() fh.close() with io.open(fp) as fh: exp = fh.read() self.assertEqual(obs, exp) def test_round_trip_single_tree(self): fp = get_data_path('stockholm_single_tree_without_id') msa = _stockholm_to_tabular_msa(fp, constructor=Protein) fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) obs = fh.getvalue() fh.close() with io.open(fp) as fh: exp = fh.read() self.assertEqual(obs, exp) def test_round_trip_multiple_trees(self): fp = get_data_path('stockholm_multiple_trees') msa = _stockholm_to_tabular_msa(fp, constructor=Protein) fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) obs = fh.getvalue() fh.close() with io.open(fp) as fh: exp = fh.read() self.assertEqual(obs, exp) def test_round_trip_single_reference(self): fp = get_data_path('stockholm_single_reference') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) obs = fh.getvalue() fh.close() with io.open(fp) as fh: exp = fh.read() self.assertEqual(obs, exp) def test_round_trip_multiple_references(self): fp = get_data_path('stockholm_multiple_references') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) obs = fh.getvalue() fh.close() with io.open(fp) as fh: exp = fh.read() self.assertEqual(obs, exp) def test_round_trip_missing_references(self): fp = get_data_path('stockholm_missing_reference_items') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) obs = fh.getvalue() fh.close() with io.open(fp) as fh: exp = fh.read() self.assertEqual(obs, exp) def test_round_trip_data_only(self): fp = get_data_path('stockholm_data_only') msa = _stockholm_to_tabular_msa(fp, constructor=RNA) fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) obs = fh.getvalue() fh.close() with io.open(fp) as fh: exp = fh.read() self.assertEqual(obs, exp) def test_round_trip_nonstring_index_values(self): fp = get_data_path('stockholm_nonstring_labels') msa = _stockholm_to_tabular_msa(fp, constructor=DNA) fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) obs = fh.getvalue() fh.close() with io.open(fp) as fh: exp = fh.read() self.assertEqual(obs, exp) def test_round_trip_empty(self): fp = get_data_path('stockholm_no_data') msa = _stockholm_to_tabular_msa(fp, constructor=Protein) fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) obs = fh.getvalue() fh.close() with io.open(fp) as fh: exp = fh.read() self.assertEqual(obs, exp) def test_unoriginal_index_error(self): msa = TabularMSA([DNA('ATCGCCAGCT'), DNA('TTGTGCTGGC')], index=['seq1', 'seq1']) with self.assertRaisesRegex(StockholmFormatError, r'index labels must be unique.'): fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) def test_unoriginal_gr_feature_names_error(self): pos_metadata_dataframe = pd.DataFrame( [ list('GAGCAAGCCACTAGA'), list('TCCTTGAACTACCCG'), list('TCAGCTCTGCAGCGT'), list('GTCAGGCGCTCGGTG') ], index=['AC', 'SS', 'AS', 'AC'] ).T msa = TabularMSA([DNA('CGTCAATCTCGAACT', positional_metadata=pos_metadata_dataframe)], index=['seq1']) with self.assertRaisesRegex(StockholmFormatError, r'Sequence-specific positional metadata.*' 'must be unique. Found 1 duplicate'): fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) def test_unoriginal_gc_feature_names_error(self): pos_metadata_dataframe = pd.DataFrame( [ list('GAGCAAGCCACTAGA'), list('TCCTTGAACTACCCG'), list('TCAGCTCTGCAGCGT'), list('GTCAGGCGCTCGGTG') ], index=['AC', 'SS', 'SS', 'AC'] ).T msa = TabularMSA([DNA('CCCCTGCTTTCGTAG')], positional_metadata=pos_metadata_dataframe) with self.assertRaisesRegex(StockholmFormatError, r'Multiple sequence alignment positional ' 'metadata.*must be unique. Found 2 ' 'duplicate'): fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) def test_gr_wrong_dataframe_item_length_error(self): seq1 = list('GAGCAAGCCACTAGA') seq1.append('GG') pos_metadata_dataframe = pd.DataFrame({'AC': seq1, 'SS': list('TCCTTGAACTACCCGA'), 'AS': list('TCAGCTCTGCAGCGTT')}) msa = TabularMSA([DNA('TCCTTGAACTACCCGA', positional_metadata=pos_metadata_dataframe)]) with self.assertRaisesRegex(StockholmFormatError, r'Sequence-specific positional metadata.*' r'must contain a single character.*Found ' r'value\(s\) in column AC'): fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) def test_gc_wrong_dataframe_item_length_error(self): seq1 = list('GAGCAAGCCACTAGA') seq1.append('GG') pos_metadata_dataframe = pd.DataFrame({'AC': seq1, 'SS': list('TCCTTGAACTACCCGA'), 'AS': list('TCAGCTCTGCAGCGTT')}) msa = TabularMSA([DNA('TCCTTGAACTACCCGA')], positional_metadata=pos_metadata_dataframe) message = (r'Multiple sequence alignment positional metadata.*must ' r'contain a single character.*Found value\(s\) in column ' 'AC') with self.assertRaisesRegex(StockholmFormatError, message): fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) def test_rn_not_list_of_refs_error(self): msa = TabularMSA([], metadata={'RN': '1'}) with self.assertRaisesRegex(StockholmFormatError, r"Expected 'RN'.*list of reference" ".*got '1'"): fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) def test_rn_data_not_in_dict_error(self): msa = TabularMSA([], metadata={'RN': [OrderedDict([('RL', 'Flagstaff')]), 'Incorrect Item']}) with self.assertRaisesRegex(StockholmFormatError, r"Expected reference information.*stored" " as a dictionary, found.*2 stored as " "'str'"): fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) def test_invalid_reference_tag_error(self): msa = TabularMSA([], metadata={'RN': [OrderedDict([('RL', 'Flagstaff'), ('foo', 'bar')])]}) with self.assertRaisesRegex(StockholmFormatError, r"Invalid reference.*foo' found " "in.*1.*Valid reference tags are:"): fh = io.StringIO() _tabular_msa_to_stockholm(msa, fh) if __name__ == '__main__': unittest.main()
{ "content_hash": "5b5940e906d6bba951fcc5941b2425bb", "timestamp": "", "source": "github", "line_count": 899, "max_line_length": 79, "avg_line_length": 46.23692992213571, "alnum_prop": 0.460268963360358, "repo_name": "gregcaporaso/scikit-bio", "id": "665ca30ea391362d5b18398a5bd971a77239192e", "size": "41921", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "skbio/io/format/tests/test_stockholm.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "822164" }, { "name": "CSS", "bytes": "4379" }, { "name": "Cython", "bytes": "66355" }, { "name": "Dockerfile", "bytes": "904" }, { "name": "Jupyter Notebook", "bytes": "210926" }, { "name": "Makefile", "bytes": "1075" }, { "name": "Python", "bytes": "2960199" }, { "name": "Roff", "bytes": "471" } ], "symlink_target": "" }
from twisted.trial.unittest import TestCase from twisted.python.versions import Version from twisted.python.deprecate import _getDeprecationWarningString from twisted.python.deprecate import DEPRECATION_WARNING_FORMAT from axiom.store import Store from axiom.item import Item from axiom.attributes import integer, text, textlist, reference from axiom.dependency import installOn from xmantissa.website import WebSite from methanal import errors, model as mmodel from methanal.model import (Model, ItemModel, constraint, Value, Enum, List, loadFromItem) from methanal.view import (LiveForm, FormGroup, ItemView, GroupInput, IntegerInput) _marker = object() class MethanalTests(TestCase): def test_constraints(self): def _constraint(value): if value != 5: return u'Value must be 5' param = Value(name='param') param._constraint = constraint(_constraint) self.assertFalse(param.isValid(4)) self.assertTrue(param.isValid(5)) model = Model([param]) param.value = 3 self.assertRaises(errors.ConstraintError, model.process) def test_enumeration(self): param = Enum(name='param', values=range(5)) self.assertTrue(param.isValid(3)) self.assertFalse(param.isValid(10)) def test_processing(self): model = Model( params=[ Value(name='foo', value=4), Value(name='bar', value=u'quux')]) result = model.process() self.assertEquals(result, dict(foo=4, bar=u'quux')) def test_repr(self): """ L{methanal.model.Value} has a useful human-readable representation. """ self.assertEquals( repr(Value(name='foo', value=u'bar')), "<Value name='foo' value=u'bar' doc=u'foo'>") self.assertEquals( repr(Value(name='foo', value=u'bar', doc=u'Hello')), "<Value name='foo' value=u'bar' doc=u'Hello'>") class ParameterTests(TestCase): def test_enumerationValidation(self): param = List(name='foo') self.assertTrue(param.isValid([])) self.assertTrue(param.isValid(None)) self.assertFalse(param.isValid(5)) class _DummyItem(Item): i = integer(default=5) t = text(doc=u'param t') tl = textlist(doc=u'param tl') class _DummyChildItem(Item): i = integer(default=5) class _DummyParentItem(Item): r = reference(reftype=_DummyChildItem, doc=u'dummy reference') class _BrokenReference(Item): r = reference() class AttributeTests(TestCase): def test_valueParamNoDoc(self): param = Value.fromAttr(_DummyItem.i) self.assertIdentical(type(param), Value) self.assertEqual(param.name, 'i') self.assertEqual(param.value, 5) self.assertEqual(param.doc, 'i') def test_valueParam(self): param = Value.fromAttr(_DummyItem.t) self.assertIdentical(type(param), Value) self.assertEqual(param.name, 't') self.assertEqual(param.value, None) self.assertEqual(param.doc, 'param t') def test_listParam(self): param = List.fromAttr(_DummyItem.tl) self.assertIdentical(type(param), List) self.assertEqual(param.name, 'tl') self.assertEqual(param.value, None) self.assertEqual(param.doc, 'param tl') class ItemUtilityTests(TestCase): def test_loadFromItem(self): item = _DummyItem(i=55, t=u'lulz') model = Model(params=[Value.fromAttr(_DummyItem.i), Value.fromAttr(_DummyItem.t)]) loadFromItem(model, item) self.assertEqual(model.params['i'].value, item.i) self.assertEqual(model.params['t'].value, item.t) class AutoSchemaTests(TestCase): expectedParams = { 'i': (Value(name='i', doc=u'i'), 5, 5), 't': (Value(name='t', doc=u'param t'), None, u'text'), 'tl': (List(name='tl', doc=u'param tl'), None, [u'text1', u'text2']), } def setUp(self): self.store = Store() def test_schemaAnalysis(self): """ Test that parameters are correctly synthesized from an Item schema. """ store = Store() model = ItemModel(itemClass=_DummyItem, store=store) params = model.params self.assertEquals(params.keys(), self.expectedParams.keys()) for k in params: p1 = params[k] p2, classDefault, itemDefault = self.expectedParams[k] self.assertIdentical(type(p1), type(p2)) self.assertEquals(p1.name, p2.name) self.assertEquals(p1.value, classDefault) self.assertEquals(p1.doc, p2.doc) def test_schemaIgnore(self): """ Test that ignoredAttributes is respected. """ store = Store() model = ItemModel( itemClass=_DummyItem, store=store, ignoredAttributes=set(['tl'])) params = model.params self.assertNotIn('tl', params) def test_itemAnalysis(self): dummyItem = _DummyItem( store=self.store, i=5, t=u'text', tl=[u'text1', u'text2']) model = ItemModel(item=dummyItem) params = model.params for k in params: p1 = params[k] p2, classDefault, itemDefault = self.expectedParams[k] self.assertEquals(p1.value, itemDefault) def test_itemCreation(self): model = ItemModel(itemClass=_DummyItem, store=self.store) model.params['i'].value = 7 model.params['t'].value = u'foo' self.assertIdentical(model.item, None) model.process() self.assertEquals(model.item.i, 7) self.assertEquals(model.item.t, u'foo') def test_itemEditing(self): model = ItemModel(item=_DummyItem(store=self.store)) model.params['i'].value = 7 model.params['t'].value = u'foo' model.process() self.assertEquals(model.item.i, 7) self.assertEquals(model.item.t, u'foo') def test_referenceAttributeCreating(self): dummyParent = _DummyParentItem(store=self.store, r=None) model = ItemModel(dummyParent) self.assertIdentical(dummyParent.r, None) model.process() self.assertEquals(dummyParent.r.i, 5) def test_referenceAttributeEditing(self): dummyChild = _DummyChildItem(store=self.store, i=5) dummyParent = _DummyParentItem(store=self.store, r=dummyChild) model = ItemModel(dummyParent) self.assertIdentical(dummyParent.r, dummyChild) model.params['r'].model.params['i'].value = 6 model.process() self.assertIdentical(dummyParent.r, dummyChild) self.assertEquals(dummyChild.i, 6) def test_noRefType(self): """ Attempting to automatically synthesise a model for an Item with a C{reference} attribute with no C{'reftype'} raises C{ValueError}. """ brokenParent = _BrokenReference(store=self.store) self.assertRaises(ValueError, ItemModel, brokenParent) class _DummyControl(object): invoked = 0 def __init__(self, parent): parent.addFormChild(self) def invoke(self, data): self.invoked += 1 class LiveFormTests(TestCase): def setUp(self): s = self.store = Store() installOn(WebSite(store=s), s) self.model = Model( params=[ Value(name='foo', value=4), Value(name='bar', value=u'quux')]) def test_process(self): view = LiveForm(self.store, self.model) control = _DummyControl(view) view.invoke({}) self.assertEquals(control.invoked, 1) def test_groups(self): view = LiveForm(self.store, self.model) group = FormGroup(view) control = _DummyControl(group) view.invoke({}) self.assertEquals(control.invoked, 1) class _DummyParameter(object): name = u'DUMMY_PARAMETER' value = u'DUMMY_PARAMETER_VALUE' doc = '' class _DummyLiveForm(object): page = None liveFragmentChildren = [] model = Model(params=[]) def addFormChild(self, *args): pass def getParameter(self, name): return _DummyParameter() class _GroupTestView(ItemView): def __init__(self, *args, **kw): super(_GroupTestView, self).__init__(*args, **kw) group = GroupInput(parent=self, name='r') IntegerInput(parent=group, name='i') class GroupInputTests(TestCase): def setUp(self): self.store = Store() installOn(WebSite(store=self.store), self.store) def test_editing(self): dummyChild = _DummyChildItem(store=self.store) dummyParent = _DummyParentItem(store=self.store, r=dummyChild) view = _GroupTestView(item=dummyParent) self.assertIdentical(dummyParent.r, dummyChild) view.invoke({u'r': {u'i': 6}}) self.assertIdentical(dummyParent.r, dummyChild) self.assertEquals(dummyChild.i, 6) def test_creation(self): dummyParent = _DummyParentItem(store=self.store, r=None) view = _GroupTestView(item=dummyParent, switchInPlace=True) self.assertIdentical(dummyParent.r, None) view.invoke({u'r': {u'i': 6}}) self.assertEquals(dummyParent.r.i, 6) class ModelTests(TestCase): def test_attach(self): model = Model() p = Value(name='foo') model.attach(p) self.assertIdentical(model.params['foo'], p) def test_attachMany(self): model = Model() p1 = Value(name='foo') p2 = Value(name='bar') model.attach(p1, p2) self.assertIdentical(model.params['foo'], p1) self.assertIdentical(model.params['bar'], p2) class DeprecatedAttributesTests(TestCase): """ Tests for deprecated attributes in L{methanal.model}. """ version020 = Version('methanal', 0, 2, 0) def _getWarningString(self, obj, name, version): """ Create the warning string used by deprecated attributes. """ return _getDeprecationWarningString( obj.__name__ + '.' + name, version, DEPRECATION_WARNING_FORMAT + ': ') def assertDeprecated(self, obj, name, version): """ Assert that the attribute C{name} on C{obj} was deprecated in C{version}, by testing whether a deprecation warning was issued. """ getattr(obj, name) warningsShown = self.flushWarnings([ self.assertDeprecated]) self.assertEquals(len(warningsShown), 1) self.assertIdentical(warningsShown[0]['category'], DeprecationWarning) self.assertIn(self._getWarningString(obj, name, version), warningsShown[0]['message']) def test_valueParameter(self): """ L{methanal.model.ValueParameter} is deprecated. """ self.assertDeprecated(mmodel, 'ValueParameter', self.version020) def test_listParameter(self): """ L{methanal.model.ListParameter} is deprecated. """ self.assertDeprecated(mmodel, 'ListParameter', self.version020) def test_enumerationParameter(self): """ L{methanal.model.EnumerationParameter} is deprecated. """ self.assertDeprecated(mmodel, 'EnumerationParameter', self.version020) def test_decimalParameter(self): """ L{methanal.model.DecimalParameter} is deprecated. """ self.assertDeprecated(mmodel, 'DecimalParameter', self.version020) def test_storeIDParameter(self): """ L{methanal.model.StoreIDParameter} is deprecated. """ self.assertDeprecated(mmodel, 'StoreIDParameter', self.version020) def test_multiEnumerationParameter(self): """ L{methanal.model.MultiEnumerationParameter} is deprecated. """ self.assertDeprecated(mmodel, 'MultiEnumerationParameter', self.version020)
{ "content_hash": "daafd9398342e9274969cb622c07f553", "timestamp": "", "source": "github", "line_count": 433, "max_line_length": 83, "avg_line_length": 27.74364896073903, "alnum_prop": 0.6203279780238076, "repo_name": "fusionapp/methanal", "id": "979bd470bc7562016af22f3c8bc996ffa660bc87", "size": "12013", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "methanal/test/test_model.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "15540" }, { "name": "HTML", "bytes": "10966" }, { "name": "JavaScript", "bytes": "371928" }, { "name": "Python", "bytes": "267198" } ], "symlink_target": "" }
from functools import wraps from django.db.models import Max from tars.api.utils import str2bool, convert_status from tars.deployment.constants import HALTED, SUCCESS from tars.deployment.models import TarsDeployment, TarsFortDeployment def fort_batch(param='fort_batch'): def decorator(func): @wraps(func) def func_wrapper(*args, **kwargs): queryset = func(*args, **kwargs) request = args[1] fort_batch_id = None if isinstance(args[0], TarsFortDeployment): fort_batch_id = args[0].get_fort_batch().id is_fort_batch = str2bool(request.QUERY_PARAMS.get(param)) if is_fort_batch is not None: if is_fort_batch: queryset = queryset.filter(id=fort_batch_id) else: queryset = queryset.exclude(id=fort_batch_id) return queryset return func_wrapper return decorator def running(param='running'): def decorator(func): @wraps(func) def func_wrapper(*args, **kwargs): queryset = func(*args, **kwargs) request = args[1] is_running = str2bool(request.QUERY_PARAMS.get(param)) if is_running is not None: if is_running: return queryset.exclude(status__in=HALTED) else: return queryset.filter(status__in=HALTED) return queryset return func_wrapper return decorator def last_success_deployment(param='last_success'): def decorator(func): @wraps(func) def func_wrapper(*args, **kwargs): queryset = func(*args, **kwargs) request = args[1] last_success = str2bool(request.QUERY_PARAMS.get(param)) if last_success: queryset = queryset.order_by( '-created_at').filter(status=SUCCESS)[:1] return queryset return func_wrapper return decorator def status(param='status'): def decorator(func): @wraps(func) def func_wrapper(*args, **kwargs): queryset = func(*args, **kwargs) request = args[1] query_param_status = request.QUERY_PARAMS.get(param) if query_param_status is not None: statuses = query_param_status.split(',') queryset = queryset.filter(status__in=statuses) return queryset return func_wrapper return decorator def deployment(param='deployment'): def decorator(func): @wraps(func) def func_wrapper(*args, **kwargs): queryset = func(*args, **kwargs) request = args[1] deployment_id = request.QUERY_PARAMS.get(param) if deployment_id is not None: queryset = queryset.filter( pk=TarsDeployment.objects.get(pk=deployment_id).package_id) return queryset return func_wrapper return decorator def last_success_package(param='last_success'): def decorator(func): @wraps(func) def func_wrapper(*args, **kwargs): queryset = func(*args, **kwargs) request = args[1] last_success = str2bool(request.QUERY_PARAMS.get(param)) if last_success: last_success_ids = queryset.filter(status=SUCCESS).annotate(max_pk=Max('pk')) queryset = queryset.filter(pk__in=last_success_ids.values('max_pk')) return queryset return func_wrapper return decorator def created_from(param='created_from'): def decorator(func): @wraps(func) def func_wrapper(*args, **kwargs): queryset = func(*args, **kwargs) request = args[1] query_param_date = request.QUERY_PARAMS.get(param) if query_param_date is not None: queryset = queryset.filter(created_at__gte=query_param_date) return queryset return func_wrapper return decorator def created_before(param='created_before'): def decorator(func): @wraps(func) def func_wrapper(*args, **kwargs): queryset = func(*args, **kwargs) request = args[1] query_param_date = request.QUERY_PARAMS.get(param) if query_param_date is not None: queryset = queryset.filter(created_at__lt=query_param_date) return queryset return func_wrapper return decorator def ids(param, field): def decorator(func): @wraps(func) def func_wrapper(*args, **kwargs): queryset = func(*args, **kwargs) request = args[1] query_param_id = request.QUERY_PARAMS.get(param) if query_param_id is not None: ids = query_param_id.split(',') kwargs = {'{0}__in'.format(field): ids} queryset = queryset.filter(**kwargs) return queryset return func_wrapper return decorator def app_status(func): @wraps(func) def func_wrapper(*args, **kwargs): queryset = func(*args, **kwargs) request = args[1] query_param_status = request.QUERY_PARAMS.get('status') if query_param_status is not None: from django.db.models import F from tars.deployment.models import Deployment statuses = query_param_status.split(',') statuses = convert_status(statuses) app_ids = [d['group__application_id'] for d in Deployment.objects .annotate(max_deployment=Max('group__deployments__id')) .filter(id=F('max_deployment'), status__in=statuses) .values('group__application_id')] return queryset.filter(id__in=app_ids) return queryset return func_wrapper def log_request(logger=None): def decorator(func): @wraps(func) def func_wrapper(*args, **kwargs): request = args[1] body = request.body if logger is not None: logger.info("url: {}, body: {}".format(request.path, body)) return func(*args, **kwargs) return func_wrapper return decorator def clean_request_data(func): @wraps(func) def func_wrapper(*args, **kwargs): request = args[1] data = request.data request._full_data = {k: v for k, v in data.items() if v is not None} return func(*args, **kwargs) return func_wrapper
{ "content_hash": "e2e9f7e0cc625817a5d6e748fd59dc4c", "timestamp": "", "source": "github", "line_count": 193, "max_line_length": 93, "avg_line_length": 34.082901554404145, "alnum_prop": 0.5717543326238979, "repo_name": "ctripcorp/tars", "id": "d201dde4d9e313f01e30c43651bd32a417b2ef8e", "size": "6578", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tars/api/decorators.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "372246" }, { "name": "Dockerfile", "bytes": "188" }, { "name": "HTML", "bytes": "175411" }, { "name": "JavaScript", "bytes": "1190261" }, { "name": "Makefile", "bytes": "1731" }, { "name": "Python", "bytes": "305797" }, { "name": "Shell", "bytes": "12737" } ], "symlink_target": "" }
import uuid from django.db import models class UrlconfRevision(models.Model): revision = models.CharField(max_length=255) class Meta: app_label = 'cms' def save(self, *args, **kwargs): """ Simply forces this model to be a singleton. """ self.pk = 1 super().save(*args, **kwargs) @classmethod def get_or_create_revision(cls, revision=None): """ Convenience method for getting or creating revision. """ if revision is None: revision = str(uuid.uuid4()) obj, created = cls.objects.get_or_create( pk=1, defaults=dict(revision=revision)) return obj.revision, created @classmethod def update_revision(cls, revision): """ Convenience method for updating the revision. """ obj, created = cls.objects.get_or_create( pk=1, defaults=dict(revision=revision)) if not created: obj.revision = revision obj.save()
{ "content_hash": "fa4df3b23f879fec47fd35327aef7378", "timestamp": "", "source": "github", "line_count": 39, "max_line_length": 60, "avg_line_length": 26.384615384615383, "alnum_prop": 0.5782312925170068, "repo_name": "datakortet/django-cms", "id": "38ef465ebc63688b4f8fd6e0ff91bb247dcae9dd", "size": "1029", "binary": false, "copies": "3", "ref": "refs/heads/develop", "path": "cms/models/apphooks_reload.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "HTML", "bytes": "203975" }, { "name": "JavaScript", "bytes": "1249081" }, { "name": "Python", "bytes": "2374270" }, { "name": "SCSS", "bytes": "137720" }, { "name": "Shell", "bytes": "22511" } ], "symlink_target": "" }
from distutils.core import setup setup( name='py-noembed', version='0.2', description='Python Wrapper over NoEmbed', author='Jacob Haslehurst', author_email='[email protected]', url='https://github.com/gizmag/py-noembed', packages=['noembed'], install_requires=['requests'] )
{ "content_hash": "b523889d4e37ee4b08216775d34c0aac", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 47, "avg_line_length": 26, "alnum_prop": 0.6730769230769231, "repo_name": "gizmag/py-noembed", "id": "1789ff8115fe2b127469311f631607cad90cb6fc", "size": "335", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "5222" } ], "symlink_target": "" }
from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import from __future__ import print_function try: from thread import get_ident as _get_ident except ImportError: from dummy_thread import get_ident as _get_ident try: from _abcoll import KeysView, ValuesView, ItemsView except ImportError: pass # Note well: this class is only used for versions of python < 2.7 # Since 2.7 OrderDict is part of the collections module of the standard # library. It does not need to be python3 compatible and can # eventually removed when python versions <= 2.6 are no longer supported. # class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. # Big-O running times for all methods are the same as for regular dictionaries. # The internal self.__map dictionary maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # Each link is stored as a list of length three: [PREV, NEXT, KEY]. def __init__(self, *args, **kwds): '''Initialize an ordered dictionary. Signature is the same as for regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary. ''' if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__root except AttributeError: self.__root = root = [] # sentinel node root[:] = [root, root, None] self.__map = {} self.__update(*args, **kwds) def __setitem__(self, key, value, dict_setitem=dict.__setitem__): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link which goes at the end of the linked # list, and the inherited dictionary is updated with the new key/value pair. if key not in self: root = self.__root last = root[0] last[1] = root[0] = self.__map[key] = [last, root, key] dict_setitem(self, key, value) def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which is # then removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link_prev, link_next, key = self.__map.pop(key) link_prev[1] = link_next link_next[0] = link_prev def __iter__(self): 'od.__iter__() <==> iter(od)' root = self.__root curr = root[1] while curr is not root: yield curr[2] curr = curr[1] def __reversed__(self): 'od.__reversed__() <==> reversed(od)' root = self.__root curr = root[0] while curr is not root: yield curr[2] curr = curr[0] def clear(self): 'od.clear() -> None. Remove all items from od.' try: for node in self.__map.itervalues(): del node[:] root = self.__root root[:] = [root, root, None] self.__map.clear() except AttributeError: pass dict.clear(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') root = self.__root if last: link = root[0] link_prev = link[0] link_prev[1] = root root[0] = link_prev else: link = root[1] link_next = link[1] root[1] = link_next link_next[0] = root key = link[2] del self.__map[key] value = dict.pop(self, key) return key, value # -- the following methods do not depend on the internal structure -- def keys(self): 'od.keys() -> list of keys in od' return list(self) def values(self): 'od.values() -> list of values in od' return [self[key] for key in self] def items(self): 'od.items() -> list of (key, value) pairs in od' return [(key, self[key]) for key in self] def iterkeys(self): 'od.iterkeys() -> an iterator over the keys in od' return iter(self) def itervalues(self): 'od.itervalues -> an iterator over the values in od' for k in self: yield self[k] def iteritems(self): 'od.iteritems -> an iterator over the (key, value) items in od' for k in self: yield (k, self[k]) def update(*args, **kwds): '''od.update(E, **F) -> None. Update od from dict/iterable E and F. If E is a dict instance, does: for k in E: od[k] = E[k] If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] Or if E is an iterable of items, does: for k, v in E: od[k] = v In either case, this is followed by: for k, v in F.items(): od[k] = v ''' if len(args) > 2: raise TypeError('update() takes at most 2 positional ' 'arguments (%d given)' % (len(args),)) elif not args: raise TypeError('update() takes at least 1 argument (0 given)') self = args[0] # Make progressively weaker assumptions about "other" other = () if len(args) == 2: other = args[1] if isinstance(other, dict): for key in other: self[key] = other[key] elif hasattr(other, 'keys'): for key in other.keys(): self[key] = other[key] else: for key, value in other: self[key] = value for key, value in kwds.items(): self[key] = value __update = update # let subclasses override update without breaking __init__ __marker = object() def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' if key in self: result = self[key] del self[key] return result if default is self.__marker: raise KeyError(key) return default def setdefault(self, key, default=None): 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' if key in self: return self[key] self[key] = default return default def __repr__(self, _repr_running={}): 'od.__repr__() <==> repr(od)' call_key = id(self), _get_ident() if call_key in _repr_running: return '...' _repr_running[call_key] = 1 try: if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) finally: del _repr_running[call_key] def __reduce__(self): 'Return state information for pickling' items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() for k in vars(OrderedDict()): inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def copy(self): 'od.copy() -> a shallow copy of od' return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S and values equal to v (which defaults to None). ''' d = cls() for key in iterable: d[key] = value return d def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. ''' if isinstance(other, OrderedDict): return len(self)==len(other) and self.items() == other.items() return dict.__eq__(self, other) def __ne__(self, other): return not self == other # -- the following methods are only used in Python 2.7 -- def viewkeys(self): "od.viewkeys() -> a set-like object providing a view on od's keys" return KeysView(self) def viewvalues(self): "od.viewvalues() -> an object providing a view on od's values" return ValuesView(self) def viewitems(self): "od.viewitems() -> a set-like object providing a view on od's items" return ItemsView(self)
{ "content_hash": "a2a9c67dbf6131f8882bc347393c547f", "timestamp": "", "source": "github", "line_count": 265, "max_line_length": 87, "avg_line_length": 34.301886792452834, "alnum_prop": 0.5522552255225522, "repo_name": "bhardesty/qpid-dispatch", "id": "3d676123249db53a06ca752cd702c52842f60b83", "size": "10087", "binary": false, "copies": "2", "ref": "refs/heads/fix-xref", "path": "python/qpid_dispatch_internal/compat/ordereddict.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "1933791" }, { "name": "C++", "bytes": "58231" }, { "name": "CMake", "bytes": "43336" }, { "name": "CSS", "bytes": "57551" }, { "name": "Dockerfile", "bytes": "3278" }, { "name": "HTML", "bytes": "38799" }, { "name": "Java", "bytes": "1940" }, { "name": "JavaScript", "bytes": "1026704" }, { "name": "Python", "bytes": "2085723" }, { "name": "Shell", "bytes": "34107" } ], "symlink_target": "" }
from locustfile import CourseDiscoveryLocust
{ "content_hash": "05d5d8494b92946342886fa3f6416ef9", "timestamp": "", "source": "github", "line_count": 1, "max_line_length": 44, "avg_line_length": 45, "alnum_prop": 0.9111111111111111, "repo_name": "edx/edx-load-tests", "id": "5db6acc3537c688dcdd76bf139120a04afc5c00c", "size": "45", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "loadtests/course_discovery/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "2741" }, { "name": "JavaScript", "bytes": "9768" }, { "name": "Jupyter Notebook", "bytes": "4305" }, { "name": "Makefile", "bytes": "3964" }, { "name": "Python", "bytes": "284496" }, { "name": "Shell", "bytes": "5986" } ], "symlink_target": "" }
import sys from nose.plugins.skip import SkipTest sys.path[0:0] = [""] import datetime import unittest import uuid import math import itertools import re try: import dateutil except ImportError: dateutil = None from decimal import Decimal from bson import Binary, DBRef, ObjectId from mongoengine import * from mongoengine.connection import get_db from mongoengine.base import _document_registry from mongoengine.base.datastructures import BaseDict, EmbeddedDocumentList from mongoengine.errors import NotRegistered from mongoengine.python_support import PY3, b, bin_type __all__ = ("FieldTest", "EmbeddedDocumentListFieldTestCase") class FieldTest(unittest.TestCase): def setUp(self): connect(db='mongoenginetest') self.db = get_db() def tearDown(self): self.db.drop_collection('fs.files') self.db.drop_collection('fs.chunks') self.db.drop_collection('mongoengine.counters') def test_default_values_nothing_set(self): """Ensure that default field values are used when creating a document. """ class Person(Document): name = StringField() age = IntField(default=30, required=False) userid = StringField(default=lambda: 'test', required=True) created = DateTimeField(default=datetime.datetime.utcnow) person = Person(name="Ross") # Confirm saving now would store values data_to_be_saved = sorted(person.to_mongo().keys()) self.assertEqual( data_to_be_saved, ['age', 'created', 'name', 'userid']) self.assertTrue(person.validate() is None) self.assertEqual(person.name, person.name) self.assertEqual(person.age, person.age) self.assertEqual(person.userid, person.userid) self.assertEqual(person.created, person.created) self.assertEqual(person._data['name'], person.name) self.assertEqual(person._data['age'], person.age) self.assertEqual(person._data['userid'], person.userid) self.assertEqual(person._data['created'], person.created) # Confirm introspection changes nothing data_to_be_saved = sorted(person.to_mongo().keys()) self.assertEqual( data_to_be_saved, ['age', 'created', 'name', 'userid']) def test_default_values_set_to_None(self): """Ensure that default field values are used when creating a document. """ class Person(Document): name = StringField() age = IntField(default=30, required=False) userid = StringField(default=lambda: 'test', required=True) created = DateTimeField(default=datetime.datetime.utcnow) # Trying setting values to None person = Person(name=None, age=None, userid=None, created=None) # Confirm saving now would store values data_to_be_saved = sorted(person.to_mongo().keys()) self.assertEqual(data_to_be_saved, ['age', 'created', 'userid']) self.assertTrue(person.validate() is None) self.assertEqual(person.name, person.name) self.assertEqual(person.age, person.age) self.assertEqual(person.userid, person.userid) self.assertEqual(person.created, person.created) self.assertEqual(person._data['name'], person.name) self.assertEqual(person._data['age'], person.age) self.assertEqual(person._data['userid'], person.userid) self.assertEqual(person._data['created'], person.created) # Confirm introspection changes nothing data_to_be_saved = sorted(person.to_mongo().keys()) self.assertEqual(data_to_be_saved, ['age', 'created', 'userid']) def test_default_values_when_setting_to_None(self): """Ensure that default field values are used when creating a document. """ class Person(Document): name = StringField() age = IntField(default=30, required=False) userid = StringField(default=lambda: 'test', required=True) created = DateTimeField(default=datetime.datetime.utcnow) person = Person() person.name = None person.age = None person.userid = None person.created = None # Confirm saving now would store values data_to_be_saved = sorted(person.to_mongo().keys()) self.assertEqual(data_to_be_saved, ['age', 'created', 'userid']) self.assertTrue(person.validate() is None) self.assertEqual(person.name, person.name) self.assertEqual(person.age, person.age) self.assertEqual(person.userid, person.userid) self.assertEqual(person.created, person.created) self.assertEqual(person._data['name'], person.name) self.assertEqual(person._data['age'], person.age) self.assertEqual(person._data['userid'], person.userid) self.assertEqual(person._data['created'], person.created) # Confirm introspection changes nothing data_to_be_saved = sorted(person.to_mongo().keys()) self.assertEqual(data_to_be_saved, ['age', 'created', 'userid']) def test_default_values_when_deleting_value(self): """Ensure that default field values are used when creating a document. """ class Person(Document): name = StringField() age = IntField(default=30, required=False) userid = StringField(default=lambda: 'test', required=True) created = DateTimeField(default=datetime.datetime.utcnow) person = Person(name="Ross") del person.name del person.age del person.userid del person.created data_to_be_saved = sorted(person.to_mongo().keys()) self.assertEqual(data_to_be_saved, ['age', 'created', 'userid']) self.assertTrue(person.validate() is None) self.assertEqual(person.name, person.name) self.assertEqual(person.age, person.age) self.assertEqual(person.userid, person.userid) self.assertEqual(person.created, person.created) self.assertEqual(person._data['name'], person.name) self.assertEqual(person._data['age'], person.age) self.assertEqual(person._data['userid'], person.userid) self.assertEqual(person._data['created'], person.created) # Confirm introspection changes nothing data_to_be_saved = sorted(person.to_mongo().keys()) self.assertEqual(data_to_be_saved, ['age', 'created', 'userid']) def test_required_values(self): """Ensure that required field constraints are enforced. """ class Person(Document): name = StringField(required=True) age = IntField(required=True) userid = StringField() person = Person(name="Test User") self.assertRaises(ValidationError, person.validate) person = Person(age=30) self.assertRaises(ValidationError, person.validate) def test_not_required_handles_none_in_update(self): """Ensure that every fields should accept None if required is False. """ class HandleNoneFields(Document): str_fld = StringField() int_fld = IntField() flt_fld = FloatField() comp_dt_fld = ComplexDateTimeField() HandleNoneFields.drop_collection() doc = HandleNoneFields() doc.str_fld = u'spam ham egg' doc.int_fld = 42 doc.flt_fld = 4.2 doc.com_dt_fld = datetime.datetime.utcnow() doc.save() res = HandleNoneFields.objects(id=doc.id).update( set__str_fld=None, set__int_fld=None, set__flt_fld=None, set__comp_dt_fld=None, ) self.assertEqual(res, 1) # Retrive data from db and verify it. ret = HandleNoneFields.objects.all()[0] self.assertEqual(ret.str_fld, None) self.assertEqual(ret.int_fld, None) self.assertEqual(ret.flt_fld, None) # Return current time if retrived value is None. self.assertTrue(isinstance(ret.comp_dt_fld, datetime.datetime)) def test_not_required_handles_none_from_database(self): """Ensure that every fields can handle null values from the database. """ class HandleNoneFields(Document): str_fld = StringField(required=True) int_fld = IntField(required=True) flt_fld = FloatField(required=True) comp_dt_fld = ComplexDateTimeField(required=True) HandleNoneFields.drop_collection() doc = HandleNoneFields() doc.str_fld = u'spam ham egg' doc.int_fld = 42 doc.flt_fld = 4.2 doc.com_dt_fld = datetime.datetime.utcnow() doc.save() collection = self.db[HandleNoneFields._get_collection_name()] obj = collection.update({"_id": doc.id}, {"$unset": { "str_fld": 1, "int_fld": 1, "flt_fld": 1, "comp_dt_fld": 1} }) # Retrive data from db and verify it. ret = HandleNoneFields.objects.all()[0] self.assertEqual(ret.str_fld, None) self.assertEqual(ret.int_fld, None) self.assertEqual(ret.flt_fld, None) # Return current time if retrived value is None. self.assertTrue(isinstance(ret.comp_dt_fld, datetime.datetime)) self.assertRaises(ValidationError, ret.validate) def test_int_and_float_ne_operator(self): class TestDocument(Document): int_fld = IntField() float_fld = FloatField() TestDocument.drop_collection() TestDocument(int_fld=None, float_fld=None).save() TestDocument(int_fld=1, float_fld=1).save() self.assertEqual(1, TestDocument.objects(int_fld__ne=None).count()) self.assertEqual(1, TestDocument.objects(float_fld__ne=None).count()) def test_long_ne_operator(self): class TestDocument(Document): long_fld = LongField() TestDocument.drop_collection() TestDocument(long_fld=None).save() TestDocument(long_fld=1).save() self.assertEqual(1, TestDocument.objects(long_fld__ne=None).count()) def test_object_id_validation(self): """Ensure that invalid values cannot be assigned to string fields. """ class Person(Document): name = StringField() person = Person(name='Test User') self.assertEqual(person.id, None) person.id = 47 self.assertRaises(ValidationError, person.validate) person.id = 'abc' self.assertRaises(ValidationError, person.validate) person.id = '497ce96f395f2f052a494fd4' person.validate() def test_string_validation(self): """Ensure that invalid values cannot be assigned to string fields. """ class Person(Document): name = StringField(max_length=20) userid = StringField(r'[0-9a-z_]+$') person = Person(name=34) self.assertRaises(ValidationError, person.validate) # Test regex validation on userid person = Person(userid='test.User') self.assertRaises(ValidationError, person.validate) person.userid = 'test_user' self.assertEqual(person.userid, 'test_user') person.validate() # Test max length validation on name person = Person(name='Name that is more than twenty characters') self.assertRaises(ValidationError, person.validate) person.name = 'Shorter name' person.validate() def test_url_validation(self): """Ensure that URLFields validate urls properly. """ class Link(Document): url = URLField() link = Link() link.url = 'google' self.assertRaises(ValidationError, link.validate) link.url = 'http://www.google.com:8080' link.validate() def test_url_scheme_validation(self): """Ensure that URLFields validate urls with specific schemes properly. """ class Link(Document): url = URLField() class SchemeLink(Document): url = URLField(schemes=['ws', 'irc']) link = Link() link.url = 'ws://google.com' self.assertRaises(ValidationError, link.validate) scheme_link = SchemeLink() scheme_link.url = 'ws://google.com' scheme_link.validate() def test_int_validation(self): """Ensure that invalid values cannot be assigned to int fields. """ class Person(Document): age = IntField(min_value=0, max_value=110) person = Person() person.age = 50 person.validate() person.age = -1 self.assertRaises(ValidationError, person.validate) person.age = 120 self.assertRaises(ValidationError, person.validate) person.age = 'ten' self.assertRaises(ValidationError, person.validate) def test_long_validation(self): """Ensure that invalid values cannot be assigned to long fields. """ class TestDocument(Document): value = LongField(min_value=0, max_value=110) doc = TestDocument() doc.value = 50 doc.validate() doc.value = -1 self.assertRaises(ValidationError, doc.validate) doc.age = 120 self.assertRaises(ValidationError, doc.validate) doc.age = 'ten' self.assertRaises(ValidationError, doc.validate) def test_float_validation(self): """Ensure that invalid values cannot be assigned to float fields. """ class Person(Document): height = FloatField(min_value=0.1, max_value=3.5) person = Person() person.height = 1.89 person.validate() person.height = '2.0' self.assertRaises(ValidationError, person.validate) person.height = 0.01 self.assertRaises(ValidationError, person.validate) person.height = 4.0 self.assertRaises(ValidationError, person.validate) person_2 = Person(height='something invalid') self.assertRaises(ValidationError, person_2.validate) def test_decimal_validation(self): """Ensure that invalid values cannot be assigned to decimal fields. """ class Person(Document): height = DecimalField(min_value=Decimal('0.1'), max_value=Decimal('3.5')) Person.drop_collection() Person(height=Decimal('1.89')).save() person = Person.objects.first() self.assertEqual(person.height, Decimal('1.89')) person.height = '2.0' person.save() person.height = 0.01 self.assertRaises(ValidationError, person.validate) person.height = Decimal('0.01') self.assertRaises(ValidationError, person.validate) person.height = Decimal('4.0') self.assertRaises(ValidationError, person.validate) person.height = 'something invalid' self.assertRaises(ValidationError, person.validate) person_2 = Person(height='something invalid') self.assertRaises(ValidationError, person_2.validate) Person.drop_collection() def test_decimal_comparison(self): class Person(Document): money = DecimalField() Person.drop_collection() Person(money=6).save() Person(money=8).save() Person(money=10).save() self.assertEqual(2, Person.objects(money__gt=Decimal("7")).count()) self.assertEqual(2, Person.objects(money__gt=7).count()) self.assertEqual(2, Person.objects(money__gt="7").count()) def test_decimal_storage(self): class Person(Document): btc = DecimalField(precision=4) Person.drop_collection() Person(btc=10).save() Person(btc=10.1).save() Person(btc=10.11).save() Person(btc="10.111").save() Person(btc=Decimal("10.1111")).save() Person(btc=Decimal("10.11111")).save() # How its stored expected = [{'btc': 10.0}, {'btc': 10.1}, {'btc': 10.11}, {'btc': 10.111}, {'btc': 10.1111}, {'btc': 10.1111}] actual = list(Person.objects.exclude('id').as_pymongo()) self.assertEqual(expected, actual) # How it comes out locally expected = [Decimal('10.0000'), Decimal('10.1000'), Decimal('10.1100'), Decimal('10.1110'), Decimal('10.1111'), Decimal('10.1111')] actual = list(Person.objects().scalar('btc')) self.assertEqual(expected, actual) def test_boolean_validation(self): """Ensure that invalid values cannot be assigned to boolean fields. """ class Person(Document): admin = BooleanField() person = Person() person.admin = True person.validate() person.admin = 2 self.assertRaises(ValidationError, person.validate) person.admin = 'Yes' self.assertRaises(ValidationError, person.validate) def test_uuid_field_string(self): """Test UUID fields storing as String """ class Person(Document): api_key = UUIDField(binary=False) Person.drop_collection() uu = uuid.uuid4() Person(api_key=uu).save() self.assertEqual(1, Person.objects(api_key=uu).count()) self.assertEqual(uu, Person.objects.first().api_key) person = Person() valid = (uuid.uuid4(), uuid.uuid1()) for api_key in valid: person.api_key = api_key person.validate() invalid = ('9d159858-549b-4975-9f98-dd2f987c113g', '9d159858-549b-4975-9f98-dd2f987c113') for api_key in invalid: person.api_key = api_key self.assertRaises(ValidationError, person.validate) def test_uuid_field_binary(self): """Test UUID fields storing as Binary object """ class Person(Document): api_key = UUIDField(binary=True) Person.drop_collection() uu = uuid.uuid4() Person(api_key=uu).save() self.assertEqual(1, Person.objects(api_key=uu).count()) self.assertEqual(uu, Person.objects.first().api_key) person = Person() valid = (uuid.uuid4(), uuid.uuid1()) for api_key in valid: person.api_key = api_key person.validate() invalid = ('9d159858-549b-4975-9f98-dd2f987c113g', '9d159858-549b-4975-9f98-dd2f987c113') for api_key in invalid: person.api_key = api_key self.assertRaises(ValidationError, person.validate) def test_datetime_validation(self): """Ensure that invalid values cannot be assigned to datetime fields. """ class LogEntry(Document): time = DateTimeField() log = LogEntry() log.time = datetime.datetime.now() log.validate() log.time = datetime.date.today() log.validate() log.time = datetime.datetime.now().isoformat(' ') log.validate() if dateutil: log.time = datetime.datetime.now().isoformat('T') log.validate() log.time = -1 self.assertRaises(ValidationError, log.validate) log.time = 'ABC' self.assertRaises(ValidationError, log.validate) def test_datetime_tz_aware_mark_as_changed(self): from mongoengine import connection # Reset the connections connection._connection_settings = {} connection._connections = {} connection._dbs = {} connect(db='mongoenginetest', tz_aware=True) class LogEntry(Document): time = DateTimeField() LogEntry.drop_collection() LogEntry(time=datetime.datetime(2013, 1, 1, 0, 0, 0)).save() log = LogEntry.objects.first() log.time = datetime.datetime(2013, 1, 1, 0, 0, 0) self.assertEqual(['time'], log._changed_fields) def test_datetime(self): """Tests showing pymongo datetime fields handling of microseconds. Microseconds are rounded to the nearest millisecond and pre UTC handling is wonky. See: http://api.mongodb.org/python/current/api/bson/son.html#dt """ class LogEntry(Document): date = DateTimeField() LogEntry.drop_collection() # Test can save dates log = LogEntry() log.date = datetime.date.today() log.save() log.reload() self.assertEqual(log.date.date(), datetime.date.today()) LogEntry.drop_collection() # Post UTC - microseconds are rounded (down) nearest millisecond and # dropped d1 = datetime.datetime(1970, 01, 01, 00, 00, 01, 999) d2 = datetime.datetime(1970, 01, 01, 00, 00, 01) log = LogEntry() log.date = d1 log.save() log.reload() self.assertNotEqual(log.date, d1) self.assertEqual(log.date, d2) # Post UTC - microseconds are rounded (down) nearest millisecond d1 = datetime.datetime(1970, 01, 01, 00, 00, 01, 9999) d2 = datetime.datetime(1970, 01, 01, 00, 00, 01, 9000) log.date = d1 log.save() log.reload() self.assertNotEqual(log.date, d1) self.assertEqual(log.date, d2) if not PY3: # Pre UTC dates microseconds below 1000 are dropped # This does not seem to be true in PY3 d1 = datetime.datetime(1969, 12, 31, 23, 59, 59, 999) d2 = datetime.datetime(1969, 12, 31, 23, 59, 59) log.date = d1 log.save() log.reload() self.assertNotEqual(log.date, d1) self.assertEqual(log.date, d2) LogEntry.drop_collection() def test_datetime_usage(self): """Tests for regular datetime fields""" class LogEntry(Document): date = DateTimeField() LogEntry.drop_collection() d1 = datetime.datetime(1970, 01, 01, 00, 00, 01) log = LogEntry() log.date = d1 log.validate() log.save() for query in (d1, d1.isoformat(' ')): log1 = LogEntry.objects.get(date=query) self.assertEqual(log, log1) if dateutil: log1 = LogEntry.objects.get(date=d1.isoformat('T')) self.assertEqual(log, log1) LogEntry.drop_collection() # create 60 log entries for i in xrange(1950, 2010): d = datetime.datetime(i, 01, 01, 00, 00, 01) LogEntry(date=d).save() self.assertEqual(LogEntry.objects.count(), 60) # Test ordering logs = LogEntry.objects.order_by("date") count = logs.count() i = 0 while i == count - 1: self.assertTrue(logs[i].date <= logs[i + 1].date) i += 1 logs = LogEntry.objects.order_by("-date") count = logs.count() i = 0 while i == count - 1: self.assertTrue(logs[i].date >= logs[i + 1].date) i += 1 # Test searching logs = LogEntry.objects.filter(date__gte=datetime.datetime(1980, 1, 1)) self.assertEqual(logs.count(), 30) logs = LogEntry.objects.filter(date__lte=datetime.datetime(1980, 1, 1)) self.assertEqual(logs.count(), 30) logs = LogEntry.objects.filter( date__lte=datetime.datetime(2011, 1, 1), date__gte=datetime.datetime(2000, 1, 1), ) self.assertEqual(logs.count(), 10) LogEntry.drop_collection() def test_complexdatetime_storage(self): """Tests for complex datetime fields - which can handle microseconds without rounding. """ class LogEntry(Document): date = ComplexDateTimeField() date_with_dots = ComplexDateTimeField(separator='.') LogEntry.drop_collection() # Post UTC - microseconds are rounded (down) nearest millisecond and # dropped - with default datetimefields d1 = datetime.datetime(1970, 01, 01, 00, 00, 01, 999) log = LogEntry() log.date = d1 log.save() log.reload() self.assertEqual(log.date, d1) # Post UTC - microseconds are rounded (down) nearest millisecond - with # default datetimefields d1 = datetime.datetime(1970, 01, 01, 00, 00, 01, 9999) log.date = d1 log.save() log.reload() self.assertEqual(log.date, d1) # Pre UTC dates microseconds below 1000 are dropped - with default # datetimefields d1 = datetime.datetime(1969, 12, 31, 23, 59, 59, 999) log.date = d1 log.save() log.reload() self.assertEqual(log.date, d1) # Pre UTC microseconds above 1000 is wonky - with default datetimefields # log.date has an invalid microsecond value so I can't construct # a date to compare. for i in xrange(1001, 3113, 33): d1 = datetime.datetime(1969, 12, 31, 23, 59, 59, i) log.date = d1 log.save() log.reload() self.assertEqual(log.date, d1) log1 = LogEntry.objects.get(date=d1) self.assertEqual(log, log1) # Test string padding microsecond = map(int, [math.pow(10, x) for x in xrange(6)]) mm = dd = hh = ii = ss = [1, 10] for values in itertools.product([2014], mm, dd, hh, ii, ss, microsecond): stored = LogEntry(date=datetime.datetime(*values)).to_mongo()['date'] self.assertTrue(re.match('^\d{4},\d{2},\d{2},\d{2},\d{2},\d{2},\d{6}$', stored) is not None) # Test separator stored = LogEntry(date_with_dots=datetime.datetime(2014, 1, 1)).to_mongo()['date_with_dots'] self.assertTrue(re.match('^\d{4}.\d{2}.\d{2}.\d{2}.\d{2}.\d{2}.\d{6}$', stored) is not None) LogEntry.drop_collection() def test_complexdatetime_usage(self): """Tests for complex datetime fields - which can handle microseconds without rounding. """ class LogEntry(Document): date = ComplexDateTimeField() LogEntry.drop_collection() d1 = datetime.datetime(1970, 01, 01, 00, 00, 01, 999) log = LogEntry() log.date = d1 log.save() log1 = LogEntry.objects.get(date=d1) self.assertEqual(log, log1) LogEntry.drop_collection() # create 60 log entries for i in xrange(1950, 2010): d = datetime.datetime(i, 01, 01, 00, 00, 01, 999) LogEntry(date=d).save() self.assertEqual(LogEntry.objects.count(), 60) # Test ordering logs = LogEntry.objects.order_by("date") count = logs.count() i = 0 while i == count - 1: self.assertTrue(logs[i].date <= logs[i + 1].date) i += 1 logs = LogEntry.objects.order_by("-date") count = logs.count() i = 0 while i == count - 1: self.assertTrue(logs[i].date >= logs[i + 1].date) i += 1 # Test searching logs = LogEntry.objects.filter(date__gte=datetime.datetime(1980, 1, 1)) self.assertEqual(logs.count(), 30) logs = LogEntry.objects.filter(date__lte=datetime.datetime(1980, 1, 1)) self.assertEqual(logs.count(), 30) logs = LogEntry.objects.filter( date__lte=datetime.datetime(2011, 1, 1), date__gte=datetime.datetime(2000, 1, 1), ) self.assertEqual(logs.count(), 10) LogEntry.drop_collection() # Test microsecond-level ordering/filtering for microsecond in (99, 999, 9999, 10000): LogEntry(date=datetime.datetime(2015, 1, 1, 0, 0, 0, microsecond)).save() logs = list(LogEntry.objects.order_by('date')) for next_idx, log in enumerate(logs[:-1], start=1): next_log = logs[next_idx] self.assertTrue(log.date < next_log.date) logs = list(LogEntry.objects.order_by('-date')) for next_idx, log in enumerate(logs[:-1], start=1): next_log = logs[next_idx] self.assertTrue(log.date > next_log.date) logs = LogEntry.objects.filter(date__lte=datetime.datetime(2015, 1, 1, 0, 0, 0, 10000)) self.assertEqual(logs.count(), 4) LogEntry.drop_collection() def test_list_validation(self): """Ensure that a list field only accepts lists with valid elements. """ class User(Document): pass class Comment(EmbeddedDocument): content = StringField() class BlogPost(Document): content = StringField() comments = ListField(EmbeddedDocumentField(Comment)) tags = ListField(StringField()) authors = ListField(ReferenceField(User)) generic = ListField(GenericReferenceField()) post = BlogPost(content='Went for a walk today...') post.validate() post.tags = 'fun' self.assertRaises(ValidationError, post.validate) post.tags = [1, 2] self.assertRaises(ValidationError, post.validate) post.tags = ['fun', 'leisure'] post.validate() post.tags = ('fun', 'leisure') post.validate() post.comments = ['a'] self.assertRaises(ValidationError, post.validate) post.comments = 'yay' self.assertRaises(ValidationError, post.validate) comments = [Comment(content='Good for you'), Comment(content='Yay.')] post.comments = comments post.validate() post.authors = [Comment()] self.assertRaises(ValidationError, post.validate) post.authors = [User()] self.assertRaises(ValidationError, post.validate) user = User() user.save() post.authors = [user] post.validate() post.generic = [1, 2] self.assertRaises(ValidationError, post.validate) post.generic = [User(), Comment()] self.assertRaises(ValidationError, post.validate) post.generic = [Comment()] self.assertRaises(ValidationError, post.validate) post.generic = [user] post.validate() User.drop_collection() BlogPost.drop_collection() def test_sorted_list_sorting(self): """Ensure that a sorted list field properly sorts values. """ class Comment(EmbeddedDocument): order = IntField() content = StringField() class BlogPost(Document): content = StringField() comments = SortedListField(EmbeddedDocumentField(Comment), ordering='order') tags = SortedListField(StringField()) post = BlogPost(content='Went for a walk today...') post.save() post.tags = ['leisure', 'fun'] post.save() post.reload() self.assertEqual(post.tags, ['fun', 'leisure']) comment1 = Comment(content='Good for you', order=1) comment2 = Comment(content='Yay.', order=0) comments = [comment1, comment2] post.comments = comments post.save() post.reload() self.assertEqual(post.comments[0].content, comment2.content) self.assertEqual(post.comments[1].content, comment1.content) post.comments[0].order = 2 post.save() post.reload() self.assertEqual(post.comments[0].content, comment1.content) self.assertEqual(post.comments[1].content, comment2.content) BlogPost.drop_collection() def test_reverse_list_sorting(self): """Ensure that a reverse sorted list field properly sorts values""" class Category(EmbeddedDocument): count = IntField() name = StringField() class CategoryList(Document): categories = SortedListField(EmbeddedDocumentField(Category), ordering='count', reverse=True) name = StringField() catlist = CategoryList(name="Top categories") cat1 = Category(name='posts', count=10) cat2 = Category(name='food', count=100) cat3 = Category(name='drink', count=40) catlist.categories = [cat1, cat2, cat3] catlist.save() catlist.reload() self.assertEqual(catlist.categories[0].name, cat2.name) self.assertEqual(catlist.categories[1].name, cat3.name) self.assertEqual(catlist.categories[2].name, cat1.name) CategoryList.drop_collection() def test_list_field(self): """Ensure that list types work as expected. """ class BlogPost(Document): info = ListField() BlogPost.drop_collection() post = BlogPost() post.info = 'my post' self.assertRaises(ValidationError, post.validate) post.info = {'title': 'test'} self.assertRaises(ValidationError, post.validate) post.info = ['test'] post.save() post = BlogPost() post.info = [{'test': 'test'}] post.save() post = BlogPost() post.info = [{'test': 3}] post.save() self.assertEqual(BlogPost.objects.count(), 3) self.assertEqual( BlogPost.objects.filter(info__exact='test').count(), 1) self.assertEqual( BlogPost.objects.filter(info__0__test='test').count(), 1) # Confirm handles non strings or non existing keys self.assertEqual( BlogPost.objects.filter(info__0__test__exact='5').count(), 0) self.assertEqual( BlogPost.objects.filter(info__100__test__exact='test').count(), 0) post = BlogPost() post.info = ['1', '2'] post.save() post = BlogPost.objects(info=['1', '2']).get() post.info += ['3', '4'] post.save() self.assertEqual(BlogPost.objects(info=['1', '2', '3', '4']).count(), 1) post = BlogPost.objects(info=['1', '2', '3', '4']).get() post.info *= 2 post.save() self.assertEqual(BlogPost.objects(info=['1', '2', '3', '4', '1', '2', '3', '4']).count(), 1) BlogPost.drop_collection() def test_list_field_passed_in_value(self): class Foo(Document): bars = ListField(ReferenceField("Bar")) class Bar(Document): text = StringField() bar = Bar(text="hi") bar.save() foo = Foo(bars=[]) foo.bars.append(bar) self.assertEqual(repr(foo.bars), '[<Bar: Bar object>]') def test_list_field_strict(self): """Ensure that list field handles validation if provided a strict field type.""" class Simple(Document): mapping = ListField(field=IntField()) Simple.drop_collection() e = Simple() e.mapping = [1] e.save() def create_invalid_mapping(): e.mapping = ["abc"] e.save() self.assertRaises(ValidationError, create_invalid_mapping) Simple.drop_collection() def test_list_field_rejects_strings(self): """Strings aren't valid list field data types""" class Simple(Document): mapping = ListField() Simple.drop_collection() e = Simple() e.mapping = 'hello world' self.assertRaises(ValidationError, e.save) def test_complex_field_required(self): """Ensure required cant be None / Empty""" class Simple(Document): mapping = ListField(required=True) Simple.drop_collection() e = Simple() e.mapping = [] self.assertRaises(ValidationError, e.save) class Simple(Document): mapping = DictField(required=True) Simple.drop_collection() e = Simple() e.mapping = {} self.assertRaises(ValidationError, e.save) def test_complex_field_same_value_not_changed(self): """ If a complex field is set to the same value, it should not be marked as changed. """ class Simple(Document): mapping = ListField() Simple.drop_collection() e = Simple().save() e.mapping = [] self.assertEqual([], e._changed_fields) class Simple(Document): mapping = DictField() Simple.drop_collection() e = Simple().save() e.mapping = {} self.assertEqual([], e._changed_fields) def test_slice_marks_field_as_changed(self): class Simple(Document): widgets = ListField() simple = Simple(widgets=[1, 2, 3, 4]).save() simple.widgets[:3] = [] self.assertEqual(['widgets'], simple._changed_fields) simple.save() simple = simple.reload() self.assertEqual(simple.widgets, [4]) def test_del_slice_marks_field_as_changed(self): class Simple(Document): widgets = ListField() simple = Simple(widgets=[1, 2, 3, 4]).save() del simple.widgets[:3] self.assertEqual(['widgets'], simple._changed_fields) simple.save() simple = simple.reload() self.assertEqual(simple.widgets, [4]) def test_list_field_complex(self): """Ensure that the list fields can handle the complex types.""" class SettingBase(EmbeddedDocument): meta = {'allow_inheritance': True} class StringSetting(SettingBase): value = StringField() class IntegerSetting(SettingBase): value = IntField() class Simple(Document): mapping = ListField() Simple.drop_collection() e = Simple() e.mapping.append(StringSetting(value='foo')) e.mapping.append(IntegerSetting(value=42)) e.mapping.append({'number': 1, 'string': 'Hi!', 'float': 1.001, 'complex': IntegerSetting(value=42), 'list': [IntegerSetting(value=42), StringSetting(value='foo')]}) e.save() e2 = Simple.objects.get(id=e.id) self.assertTrue(isinstance(e2.mapping[0], StringSetting)) self.assertTrue(isinstance(e2.mapping[1], IntegerSetting)) # Test querying self.assertEqual( Simple.objects.filter(mapping__1__value=42).count(), 1) self.assertEqual( Simple.objects.filter(mapping__2__number=1).count(), 1) self.assertEqual( Simple.objects.filter(mapping__2__complex__value=42).count(), 1) self.assertEqual( Simple.objects.filter(mapping__2__list__0__value=42).count(), 1) self.assertEqual( Simple.objects.filter(mapping__2__list__1__value='foo').count(), 1) # Confirm can update Simple.objects().update(set__mapping__1=IntegerSetting(value=10)) self.assertEqual( Simple.objects.filter(mapping__1__value=10).count(), 1) Simple.objects().update( set__mapping__2__list__1=StringSetting(value='Boo')) self.assertEqual( Simple.objects.filter(mapping__2__list__1__value='foo').count(), 0) self.assertEqual( Simple.objects.filter(mapping__2__list__1__value='Boo').count(), 1) Simple.drop_collection() def test_dict_field(self): """Ensure that dict types work as expected. """ class BlogPost(Document): info = DictField() BlogPost.drop_collection() post = BlogPost() post.info = 'my post' self.assertRaises(ValidationError, post.validate) post.info = ['test', 'test'] self.assertRaises(ValidationError, post.validate) post.info = {'$title': 'test'} self.assertRaises(ValidationError, post.validate) post.info = {'nested': {'$title': 'test'}} self.assertRaises(ValidationError, post.validate) post.info = {'the.title': 'test'} self.assertRaises(ValidationError, post.validate) post.info = {'nested': {'the.title': 'test'}} self.assertRaises(ValidationError, post.validate) post.info = {1: 'test'} self.assertRaises(ValidationError, post.validate) post.info = {'title': 'test'} post.save() post = BlogPost() post.info = {'details': {'test': 'test'}} post.save() post = BlogPost() post.info = {'details': {'test': 3}} post.save() self.assertEqual(BlogPost.objects.count(), 3) self.assertEqual( BlogPost.objects.filter(info__title__exact='test').count(), 1) self.assertEqual( BlogPost.objects.filter(info__details__test__exact='test').count(), 1) # Confirm handles non strings or non existing keys self.assertEqual( BlogPost.objects.filter(info__details__test__exact=5).count(), 0) self.assertEqual( BlogPost.objects.filter(info__made_up__test__exact='test').count(), 0) post = BlogPost.objects.create(info={'title': 'original'}) post.info.update({'title': 'updated'}) post.save() post.reload() self.assertEqual('updated', post.info['title']) post.info.setdefault('authors', []) post.save() post.reload() self.assertEqual([], post.info['authors']) BlogPost.drop_collection() def test_dictfield_dump_document(self): """Ensure a DictField can handle another document's dump """ class Doc(Document): field = DictField() class ToEmbed(Document): id = IntField(primary_key=True, default=1) recursive = DictField() class ToEmbedParent(Document): id = IntField(primary_key=True, default=1) recursive = DictField() meta = {'allow_inheritance': True} class ToEmbedChild(ToEmbedParent): pass to_embed_recursive = ToEmbed(id=1).save() to_embed = ToEmbed( id=2, recursive=to_embed_recursive.to_mongo().to_dict()).save() doc = Doc(field=to_embed.to_mongo().to_dict()) doc.save() assert isinstance(doc.field, dict) assert doc.field == {'_id': 2, 'recursive': {'_id': 1, 'recursive': {}}} # Same thing with a Document with a _cls field to_embed_recursive = ToEmbedChild(id=1).save() to_embed_child = ToEmbedChild( id=2, recursive=to_embed_recursive.to_mongo().to_dict()).save() doc = Doc(field=to_embed_child.to_mongo().to_dict()) doc.save() assert isinstance(doc.field, dict) assert doc.field == { '_id': 2, '_cls': 'ToEmbedParent.ToEmbedChild', 'recursive': {'_id': 1, '_cls': 'ToEmbedParent.ToEmbedChild', 'recursive': {}} } def test_dictfield_strict(self): """Ensure that dict field handles validation if provided a strict field type.""" class Simple(Document): mapping = DictField(field=IntField()) Simple.drop_collection() e = Simple() e.mapping['someint'] = 1 e.save() def create_invalid_mapping(): e.mapping['somestring'] = "abc" e.save() self.assertRaises(ValidationError, create_invalid_mapping) Simple.drop_collection() def test_dictfield_complex(self): """Ensure that the dict field can handle the complex types.""" class SettingBase(EmbeddedDocument): meta = {'allow_inheritance': True} class StringSetting(SettingBase): value = StringField() class IntegerSetting(SettingBase): value = IntField() class Simple(Document): mapping = DictField() Simple.drop_collection() e = Simple() e.mapping['somestring'] = StringSetting(value='foo') e.mapping['someint'] = IntegerSetting(value=42) e.mapping['nested_dict'] = {'number': 1, 'string': 'Hi!', 'float': 1.001, 'complex': IntegerSetting(value=42), 'list': [IntegerSetting(value=42), StringSetting(value='foo')]} e.save() e2 = Simple.objects.get(id=e.id) self.assertTrue(isinstance(e2.mapping['somestring'], StringSetting)) self.assertTrue(isinstance(e2.mapping['someint'], IntegerSetting)) # Test querying self.assertEqual( Simple.objects.filter(mapping__someint__value=42).count(), 1) self.assertEqual( Simple.objects.filter(mapping__nested_dict__number=1).count(), 1) self.assertEqual( Simple.objects.filter(mapping__nested_dict__complex__value=42).count(), 1) self.assertEqual( Simple.objects.filter(mapping__nested_dict__list__0__value=42).count(), 1) self.assertEqual( Simple.objects.filter(mapping__nested_dict__list__1__value='foo').count(), 1) # Confirm can update Simple.objects().update( set__mapping={"someint": IntegerSetting(value=10)}) Simple.objects().update( set__mapping__nested_dict__list__1=StringSetting(value='Boo')) self.assertEqual( Simple.objects.filter(mapping__nested_dict__list__1__value='foo').count(), 0) self.assertEqual( Simple.objects.filter(mapping__nested_dict__list__1__value='Boo').count(), 1) Simple.drop_collection() def test_atomic_update_dict_field(self): """Ensure that the entire DictField can be atomically updated.""" class Simple(Document): mapping = DictField(field=ListField(IntField(required=True))) Simple.drop_collection() e = Simple() e.mapping['someints'] = [1, 2] e.save() e.update(set__mapping={"ints": [3, 4]}) e.reload() self.assertEqual(BaseDict, type(e.mapping)) self.assertEqual({"ints": [3, 4]}, e.mapping) def create_invalid_mapping(): e.update(set__mapping={"somestrings": ["foo", "bar", ]}) self.assertRaises(ValueError, create_invalid_mapping) Simple.drop_collection() def test_mapfield(self): """Ensure that the MapField handles the declared type.""" class Simple(Document): mapping = MapField(IntField()) Simple.drop_collection() e = Simple() e.mapping['someint'] = 1 e.save() def create_invalid_mapping(): e.mapping['somestring'] = "abc" e.save() self.assertRaises(ValidationError, create_invalid_mapping) def create_invalid_class(): class NoDeclaredType(Document): mapping = MapField() self.assertRaises(ValidationError, create_invalid_class) Simple.drop_collection() def test_complex_mapfield(self): """Ensure that the MapField can handle complex declared types.""" class SettingBase(EmbeddedDocument): meta = {"allow_inheritance": True} class StringSetting(SettingBase): value = StringField() class IntegerSetting(SettingBase): value = IntField() class Extensible(Document): mapping = MapField(EmbeddedDocumentField(SettingBase)) Extensible.drop_collection() e = Extensible() e.mapping['somestring'] = StringSetting(value='foo') e.mapping['someint'] = IntegerSetting(value=42) e.save() e2 = Extensible.objects.get(id=e.id) self.assertTrue(isinstance(e2.mapping['somestring'], StringSetting)) self.assertTrue(isinstance(e2.mapping['someint'], IntegerSetting)) def create_invalid_mapping(): e.mapping['someint'] = 123 e.save() self.assertRaises(ValidationError, create_invalid_mapping) Extensible.drop_collection() def test_embedded_mapfield_db_field(self): class Embedded(EmbeddedDocument): number = IntField(default=0, db_field='i') class Test(Document): my_map = MapField(field=EmbeddedDocumentField(Embedded), db_field='x') Test.drop_collection() test = Test() test.my_map['DICTIONARY_KEY'] = Embedded(number=1) test.save() Test.objects.update_one(inc__my_map__DICTIONARY_KEY__number=1) test = Test.objects.get() self.assertEqual(test.my_map['DICTIONARY_KEY'].number, 2) doc = self.db.test.find_one() self.assertEqual(doc['x']['DICTIONARY_KEY']['i'], 2) def test_mapfield_numerical_index(self): """Ensure that MapField accept numeric strings as indexes.""" class Embedded(EmbeddedDocument): name = StringField() class Test(Document): my_map = MapField(EmbeddedDocumentField(Embedded)) Test.drop_collection() test = Test() test.my_map['1'] = Embedded(name='test') test.save() test.my_map['1'].name = 'test updated' test.save() Test.drop_collection() def test_map_field_lookup(self): """Ensure MapField lookups succeed on Fields without a lookup method""" class Action(EmbeddedDocument): operation = StringField() object = StringField() class Log(Document): name = StringField() visited = MapField(DateTimeField()) actions = MapField(EmbeddedDocumentField(Action)) Log.drop_collection() Log(name="wilson", visited={'friends': datetime.datetime.now()}, actions={'friends': Action(operation='drink', object='beer')}).save() self.assertEqual(1, Log.objects( visited__friends__exists=True).count()) self.assertEqual(1, Log.objects( actions__friends__operation='drink', actions__friends__object='beer').count()) def test_embedded_db_field(self): class Embedded(EmbeddedDocument): number = IntField(default=0, db_field='i') class Test(Document): embedded = EmbeddedDocumentField(Embedded, db_field='x') Test.drop_collection() test = Test() test.embedded = Embedded(number=1) test.save() Test.objects.update_one(inc__embedded__number=1) test = Test.objects.get() self.assertEqual(test.embedded.number, 2) doc = self.db.test.find_one() self.assertEqual(doc['x']['i'], 2) def test_embedded_document_validation(self): """Ensure that invalid embedded documents cannot be assigned to embedded document fields. """ class Comment(EmbeddedDocument): content = StringField() class PersonPreferences(EmbeddedDocument): food = StringField(required=True) number = IntField() class Person(Document): name = StringField() preferences = EmbeddedDocumentField(PersonPreferences) person = Person(name='Test User') person.preferences = 'My Preferences' self.assertRaises(ValidationError, person.validate) # Check that only the right embedded doc works person.preferences = Comment(content='Nice blog post...') self.assertRaises(ValidationError, person.validate) # Check that the embedded doc is valid person.preferences = PersonPreferences() self.assertRaises(ValidationError, person.validate) person.preferences = PersonPreferences(food='Cheese', number=47) self.assertEqual(person.preferences.food, 'Cheese') person.validate() def test_embedded_document_inheritance(self): """Ensure that subclasses of embedded documents may be provided to EmbeddedDocumentFields of the superclass' type. """ class User(EmbeddedDocument): name = StringField() meta = {'allow_inheritance': True} class PowerUser(User): power = IntField() class BlogPost(Document): content = StringField() author = EmbeddedDocumentField(User) post = BlogPost(content='What I did today...') post.author = PowerUser(name='Test User', power=47) post.save() self.assertEqual(47, BlogPost.objects.first().author.power) def test_reference_validation(self): """Ensure that invalid docment objects cannot be assigned to reference fields. """ class User(Document): name = StringField() class BlogPost(Document): content = StringField() author = ReferenceField(User) User.drop_collection() BlogPost.drop_collection() self.assertRaises(ValidationError, ReferenceField, EmbeddedDocument) user = User(name='Test User') # Ensure that the referenced object must have been saved post1 = BlogPost(content='Chips and gravy taste good.') post1.author = user self.assertRaises(ValidationError, post1.save) # Check that an invalid object type cannot be used post2 = BlogPost(content='Chips and chilli taste good.') post1.author = post2 self.assertRaises(ValidationError, post1.validate) user.save() post1.author = user post1.save() post2.save() post1.author = post2 self.assertRaises(ValidationError, post1.validate) User.drop_collection() BlogPost.drop_collection() def test_dbref_reference_fields(self): class Person(Document): name = StringField() parent = ReferenceField('self', dbref=True) Person.drop_collection() p1 = Person(name="John").save() Person(name="Ross", parent=p1).save() col = Person._get_collection() data = col.find_one({'name': 'Ross'}) self.assertEqual(data['parent'], DBRef('person', p1.pk)) p = Person.objects.get(name="Ross") self.assertEqual(p.parent, p1) def test_dbref_to_mongo(self): class Person(Document): name = StringField() parent = ReferenceField('self', dbref=False) p1 = Person._from_son({'name': "Yakxxx", 'parent': "50a234ea469ac1eda42d347d"}) mongoed = p1.to_mongo() self.assertTrue(isinstance(mongoed['parent'], ObjectId)) def test_cached_reference_field_get_and_save(self): """ Tests #1047: CachedReferenceField creates DBRefs on to_python, but can't save them on to_mongo """ class Animal(Document): name = StringField() tag = StringField() class Ocorrence(Document): person = StringField() animal = CachedReferenceField(Animal) Animal.drop_collection() Ocorrence.drop_collection() Ocorrence(person="testte", animal=Animal(name="Leopard", tag="heavy").save()).save() p = Ocorrence.objects.get() p.person = 'new_testte' p.save() def test_cached_reference_fields(self): class Animal(Document): name = StringField() tag = StringField() class Ocorrence(Document): person = StringField() animal = CachedReferenceField( Animal, fields=['tag']) Animal.drop_collection() Ocorrence.drop_collection() a = Animal(name="Leopard", tag="heavy") a.save() self.assertEqual(Animal._cached_reference_fields, [Ocorrence.animal]) o = Ocorrence(person="teste", animal=a) o.save() p = Ocorrence(person="Wilson") p.save() self.assertEqual(Ocorrence.objects(animal=None).count(), 1) self.assertEqual( a.to_mongo(fields=['tag']), {'tag': 'heavy', "_id": a.pk}) self.assertEqual(o.to_mongo()['animal']['tag'], 'heavy') # counts Ocorrence(person="teste 2").save() Ocorrence(person="teste 3").save() count = Ocorrence.objects(animal__tag='heavy').count() self.assertEqual(count, 1) ocorrence = Ocorrence.objects(animal__tag='heavy').first() self.assertEqual(ocorrence.person, "teste") self.assertTrue(isinstance(ocorrence.animal, Animal)) def test_cached_reference_field_decimal(self): class PersonAuto(Document): name = StringField() salary = DecimalField() class SocialTest(Document): group = StringField() person = CachedReferenceField( PersonAuto, fields=('salary',)) PersonAuto.drop_collection() SocialTest.drop_collection() p = PersonAuto(name="Alberto", salary=Decimal('7000.00')) p.save() s = SocialTest(group="dev", person=p) s.save() self.assertEqual( SocialTest.objects._collection.find_one({'person.salary': 7000.00}), { '_id': s.pk, 'group': s.group, 'person': { '_id': p.pk, 'salary': 7000.00 } }) def test_cached_reference_field_reference(self): class Group(Document): name = StringField() class Person(Document): name = StringField() group = ReferenceField(Group) class SocialData(Document): obs = StringField() tags = ListField( StringField()) person = CachedReferenceField( Person, fields=('group',)) Group.drop_collection() Person.drop_collection() SocialData.drop_collection() g1 = Group(name='dev') g1.save() g2 = Group(name="designers") g2.save() p1 = Person(name="Alberto", group=g1) p1.save() p2 = Person(name="Andre", group=g1) p2.save() p3 = Person(name="Afro design", group=g2) p3.save() s1 = SocialData(obs="testing 123", person=p1, tags=['tag1', 'tag2']) s1.save() s2 = SocialData(obs="testing 321", person=p3, tags=['tag3', 'tag4']) s2.save() self.assertEqual(SocialData.objects._collection.find_one( {'tags': 'tag2'}), { '_id': s1.pk, 'obs': 'testing 123', 'tags': ['tag1', 'tag2'], 'person': { '_id': p1.pk, 'group': g1.pk } }) self.assertEqual(SocialData.objects(person__group=g2).count(), 1) self.assertEqual(SocialData.objects(person__group=g2).first(), s2) def test_cached_reference_field_update_all(self): class Person(Document): TYPES = ( ('pf', "PF"), ('pj', "PJ") ) name = StringField() tp = StringField( choices=TYPES ) father = CachedReferenceField('self', fields=('tp',)) Person.drop_collection() a1 = Person(name="Wilson Father", tp="pj") a1.save() a2 = Person(name='Wilson Junior', tp='pf', father=a1) a2.save() self.assertEqual(dict(a2.to_mongo()), { "_id": a2.pk, "name": u"Wilson Junior", "tp": u"pf", "father": { "_id": a1.pk, "tp": u"pj" } }) self.assertEqual(Person.objects(father=a1)._query, { 'father._id': a1.pk }) self.assertEqual(Person.objects(father=a1).count(), 1) Person.objects.update(set__tp="pf") Person.father.sync_all() a2.reload() self.assertEqual(dict(a2.to_mongo()), { "_id": a2.pk, "name": u"Wilson Junior", "tp": u"pf", "father": { "_id": a1.pk, "tp": u"pf" } }) def test_cached_reference_fields_on_embedded_documents(self): def build(): class Test(Document): name = StringField() type('WrongEmbeddedDocument', ( EmbeddedDocument,), { 'test': CachedReferenceField(Test) }) self.assertRaises(InvalidDocumentError, build) def test_cached_reference_auto_sync(self): class Person(Document): TYPES = ( ('pf', "PF"), ('pj', "PJ") ) name = StringField() tp = StringField( choices=TYPES ) father = CachedReferenceField('self', fields=('tp',)) Person.drop_collection() a1 = Person(name="Wilson Father", tp="pj") a1.save() a2 = Person(name='Wilson Junior', tp='pf', father=a1) a2.save() a1.tp = 'pf' a1.save() a2.reload() self.assertEqual(dict(a2.to_mongo()), { '_id': a2.pk, 'name': 'Wilson Junior', 'tp': 'pf', 'father': { '_id': a1.pk, 'tp': 'pf' } }) def test_cached_reference_auto_sync_disabled(self): class Persone(Document): TYPES = ( ('pf', "PF"), ('pj', "PJ") ) name = StringField() tp = StringField( choices=TYPES ) father = CachedReferenceField( 'self', fields=('tp',), auto_sync=False) Persone.drop_collection() a1 = Persone(name="Wilson Father", tp="pj") a1.save() a2 = Persone(name='Wilson Junior', tp='pf', father=a1) a2.save() a1.tp = 'pf' a1.save() self.assertEqual(Persone.objects._collection.find_one({'_id': a2.pk}), { '_id': a2.pk, 'name': 'Wilson Junior', 'tp': 'pf', 'father': { '_id': a1.pk, 'tp': 'pj' } }) def test_cached_reference_embedded_fields(self): class Owner(EmbeddedDocument): TPS = ( ('n', "Normal"), ('u', "Urgent") ) name = StringField() tp = StringField( verbose_name="Type", db_field="t", choices=TPS) class Animal(Document): name = StringField() tag = StringField() owner = EmbeddedDocumentField(Owner) class Ocorrence(Document): person = StringField() animal = CachedReferenceField( Animal, fields=['tag', 'owner.tp']) Animal.drop_collection() Ocorrence.drop_collection() a = Animal(name="Leopard", tag="heavy", owner=Owner(tp='u', name="Wilson Júnior") ) a.save() o = Ocorrence(person="teste", animal=a) o.save() self.assertEqual(dict(a.to_mongo(fields=['tag', 'owner.tp'])), { '_id': a.pk, 'tag': 'heavy', 'owner': { 't': 'u' } }) self.assertEqual(o.to_mongo()['animal']['tag'], 'heavy') self.assertEqual(o.to_mongo()['animal']['owner']['t'], 'u') # counts Ocorrence(person="teste 2").save() Ocorrence(person="teste 3").save() count = Ocorrence.objects( animal__tag='heavy', animal__owner__tp='u').count() self.assertEqual(count, 1) ocorrence = Ocorrence.objects( animal__tag='heavy', animal__owner__tp='u').first() self.assertEqual(ocorrence.person, "teste") self.assertTrue(isinstance(ocorrence.animal, Animal)) def test_cached_reference_embedded_list_fields(self): class Owner(EmbeddedDocument): name = StringField() tags = ListField(StringField()) class Animal(Document): name = StringField() tag = StringField() owner = EmbeddedDocumentField(Owner) class Ocorrence(Document): person = StringField() animal = CachedReferenceField( Animal, fields=['tag', 'owner.tags']) Animal.drop_collection() Ocorrence.drop_collection() a = Animal(name="Leopard", tag="heavy", owner=Owner(tags=['cool', 'funny'], name="Wilson Júnior") ) a.save() o = Ocorrence(person="teste 2", animal=a) o.save() self.assertEqual(dict(a.to_mongo(fields=['tag', 'owner.tags'])), { '_id': a.pk, 'tag': 'heavy', 'owner': { 'tags': ['cool', 'funny'] } }) self.assertEqual(o.to_mongo()['animal']['tag'], 'heavy') self.assertEqual(o.to_mongo()['animal']['owner']['tags'], ['cool', 'funny']) # counts Ocorrence(person="teste 2").save() Ocorrence(person="teste 3").save() query = Ocorrence.objects( animal__tag='heavy', animal__owner__tags='cool')._query self.assertEqual( query, {'animal.owner.tags': 'cool', 'animal.tag': 'heavy'}) ocorrence = Ocorrence.objects( animal__tag='heavy', animal__owner__tags='cool').first() self.assertEqual(ocorrence.person, "teste 2") self.assertTrue(isinstance(ocorrence.animal, Animal)) def test_objectid_reference_fields(self): class Person(Document): name = StringField() parent = ReferenceField('self', dbref=False) Person.drop_collection() p1 = Person(name="John").save() Person(name="Ross", parent=p1).save() col = Person._get_collection() data = col.find_one({'name': 'Ross'}) self.assertEqual(data['parent'], p1.pk) p = Person.objects.get(name="Ross") self.assertEqual(p.parent, p1) def test_list_item_dereference(self): """Ensure that DBRef items in ListFields are dereferenced. """ class User(Document): name = StringField() class Group(Document): members = ListField(ReferenceField(User)) User.drop_collection() Group.drop_collection() user1 = User(name='user1') user1.save() user2 = User(name='user2') user2.save() group = Group(members=[user1, user2]) group.save() group_obj = Group.objects.first() self.assertEqual(group_obj.members[0].name, user1.name) self.assertEqual(group_obj.members[1].name, user2.name) User.drop_collection() Group.drop_collection() def test_recursive_reference(self): """Ensure that ReferenceFields can reference their own documents. """ class Employee(Document): name = StringField() boss = ReferenceField('self') friends = ListField(ReferenceField('self')) Employee.drop_collection() bill = Employee(name='Bill Lumbergh') bill.save() michael = Employee(name='Michael Bolton') michael.save() samir = Employee(name='Samir Nagheenanajar') samir.save() friends = [michael, samir] peter = Employee(name='Peter Gibbons', boss=bill, friends=friends) peter.save() peter = Employee.objects.with_id(peter.id) self.assertEqual(peter.boss, bill) self.assertEqual(peter.friends, friends) def test_recursive_embedding(self): """Ensure that EmbeddedDocumentFields can contain their own documents. """ class TreeNode(EmbeddedDocument): name = StringField() children = ListField(EmbeddedDocumentField('self')) class Tree(Document): name = StringField() children = ListField(EmbeddedDocumentField('TreeNode')) Tree.drop_collection() tree = Tree(name="Tree") first_child = TreeNode(name="Child 1") tree.children.append(first_child) second_child = TreeNode(name="Child 2") first_child.children.append(second_child) tree.save() tree = Tree.objects.first() self.assertEqual(len(tree.children), 1) self.assertEqual(len(tree.children[0].children), 1) third_child = TreeNode(name="Child 3") tree.children[0].children.append(third_child) tree.save() self.assertEqual(len(tree.children), 1) self.assertEqual(tree.children[0].name, first_child.name) self.assertEqual(tree.children[0].children[0].name, second_child.name) self.assertEqual(tree.children[0].children[1].name, third_child.name) # Test updating tree.children[0].name = 'I am Child 1' tree.children[0].children[0].name = 'I am Child 2' tree.children[0].children[1].name = 'I am Child 3' tree.save() self.assertEqual(tree.children[0].name, 'I am Child 1') self.assertEqual(tree.children[0].children[0].name, 'I am Child 2') self.assertEqual(tree.children[0].children[1].name, 'I am Child 3') # Test removal self.assertEqual(len(tree.children[0].children), 2) del(tree.children[0].children[1]) tree.save() self.assertEqual(len(tree.children[0].children), 1) tree.children[0].children.pop(0) tree.save() self.assertEqual(len(tree.children[0].children), 0) self.assertEqual(tree.children[0].children, []) tree.children[0].children.insert(0, third_child) tree.children[0].children.insert(0, second_child) tree.save() self.assertEqual(len(tree.children[0].children), 2) self.assertEqual(tree.children[0].children[0].name, second_child.name) self.assertEqual(tree.children[0].children[1].name, third_child.name) def test_undefined_reference(self): """Ensure that ReferenceFields may reference undefined Documents. """ class Product(Document): name = StringField() company = ReferenceField('Company') class Company(Document): name = StringField() Product.drop_collection() Company.drop_collection() ten_gen = Company(name='10gen') ten_gen.save() mongodb = Product(name='MongoDB', company=ten_gen) mongodb.save() me = Product(name='MongoEngine') me.save() obj = Product.objects(company=ten_gen).first() self.assertEqual(obj, mongodb) self.assertEqual(obj.company, ten_gen) obj = Product.objects(company=None).first() self.assertEqual(obj, me) obj = Product.objects.get(company=None) self.assertEqual(obj, me) def test_reference_query_conversion(self): """Ensure that ReferenceFields can be queried using objects and values of the type of the primary key of the referenced object. """ class Member(Document): user_num = IntField(primary_key=True) class BlogPost(Document): title = StringField() author = ReferenceField(Member, dbref=False) Member.drop_collection() BlogPost.drop_collection() m1 = Member(user_num=1) m1.save() m2 = Member(user_num=2) m2.save() post1 = BlogPost(title='post 1', author=m1) post1.save() post2 = BlogPost(title='post 2', author=m2) post2.save() post = BlogPost.objects(author=m1).first() self.assertEqual(post.id, post1.id) post = BlogPost.objects(author=m2).first() self.assertEqual(post.id, post2.id) Member.drop_collection() BlogPost.drop_collection() def test_reference_query_conversion_dbref(self): """Ensure that ReferenceFields can be queried using objects and values of the type of the primary key of the referenced object. """ class Member(Document): user_num = IntField(primary_key=True) class BlogPost(Document): title = StringField() author = ReferenceField(Member, dbref=True) Member.drop_collection() BlogPost.drop_collection() m1 = Member(user_num=1) m1.save() m2 = Member(user_num=2) m2.save() post1 = BlogPost(title='post 1', author=m1) post1.save() post2 = BlogPost(title='post 2', author=m2) post2.save() post = BlogPost.objects(author=m1).first() self.assertEqual(post.id, post1.id) post = BlogPost.objects(author=m2).first() self.assertEqual(post.id, post2.id) Member.drop_collection() BlogPost.drop_collection() def test_reference_class_with_abstract_parent(self): """Ensure that a class with an abstract parent can be referenced. """ class Sibling(Document): name = StringField() meta = {"abstract": True} class Sister(Sibling): pass class Brother(Sibling): sibling = ReferenceField(Sibling) Sister.drop_collection() Brother.drop_collection() sister = Sister(name="Alice") sister.save() brother = Brother(name="Bob", sibling=sister) brother.save() self.assertEquals(Brother.objects[0].sibling.name, sister.name) Sister.drop_collection() Brother.drop_collection() def test_reference_abstract_class(self): """Ensure that an abstract class instance cannot be used in the reference of that abstract class. """ class Sibling(Document): name = StringField() meta = {"abstract": True} class Sister(Sibling): pass class Brother(Sibling): sibling = ReferenceField(Sibling) Sister.drop_collection() Brother.drop_collection() sister = Sibling(name="Alice") brother = Brother(name="Bob", sibling=sister) self.assertRaises(ValidationError, brother.save) Sister.drop_collection() Brother.drop_collection() def test_abstract_reference_base_type(self): """Ensure that an an abstract reference fails validation when given a Document that does not inherit from the abstract type. """ class Sibling(Document): name = StringField() meta = {"abstract": True} class Brother(Sibling): sibling = ReferenceField(Sibling) class Mother(Document): name = StringField() Brother.drop_collection() Mother.drop_collection() mother = Mother(name="Carol") mother.save() brother = Brother(name="Bob", sibling=mother) self.assertRaises(ValidationError, brother.save) Brother.drop_collection() Mother.drop_collection() def test_generic_reference(self): """Ensure that a GenericReferenceField properly dereferences items. """ class Link(Document): title = StringField() meta = {'allow_inheritance': False} class Post(Document): title = StringField() class Bookmark(Document): bookmark_object = GenericReferenceField() Link.drop_collection() Post.drop_collection() Bookmark.drop_collection() link_1 = Link(title="Pitchfork") link_1.save() post_1 = Post(title="Behind the Scenes of the Pavement Reunion") post_1.save() bm = Bookmark(bookmark_object=post_1) bm.save() bm = Bookmark.objects(bookmark_object=post_1).first() self.assertEqual(bm.bookmark_object, post_1) self.assertTrue(isinstance(bm.bookmark_object, Post)) bm.bookmark_object = link_1 bm.save() bm = Bookmark.objects(bookmark_object=link_1).first() self.assertEqual(bm.bookmark_object, link_1) self.assertTrue(isinstance(bm.bookmark_object, Link)) Link.drop_collection() Post.drop_collection() Bookmark.drop_collection() def test_generic_reference_list(self): """Ensure that a ListField properly dereferences generic references. """ class Link(Document): title = StringField() class Post(Document): title = StringField() class User(Document): bookmarks = ListField(GenericReferenceField()) Link.drop_collection() Post.drop_collection() User.drop_collection() link_1 = Link(title="Pitchfork") link_1.save() post_1 = Post(title="Behind the Scenes of the Pavement Reunion") post_1.save() user = User(bookmarks=[post_1, link_1]) user.save() user = User.objects(bookmarks__all=[post_1, link_1]).first() self.assertEqual(user.bookmarks[0], post_1) self.assertEqual(user.bookmarks[1], link_1) Link.drop_collection() Post.drop_collection() User.drop_collection() def test_generic_reference_document_not_registered(self): """Ensure dereferencing out of the document registry throws a `NotRegistered` error. """ class Link(Document): title = StringField() class User(Document): bookmarks = ListField(GenericReferenceField()) Link.drop_collection() User.drop_collection() link_1 = Link(title="Pitchfork") link_1.save() user = User(bookmarks=[link_1]) user.save() # Mimic User and Link definitions being in a different file # and the Link model not being imported in the User file. del(_document_registry["Link"]) user = User.objects.first() try: user.bookmarks raise AssertionError("Link was removed from the registry") except NotRegistered: pass Link.drop_collection() User.drop_collection() def test_generic_reference_is_none(self): class Person(Document): name = StringField() city = GenericReferenceField() Person.drop_collection() Person(name="Wilson Jr").save() self.assertEqual(repr(Person.objects(city=None)), "[<Person: Person object>]") def test_generic_reference_choices(self): """Ensure that a GenericReferenceField can handle choices """ class Link(Document): title = StringField() class Post(Document): title = StringField() class Bookmark(Document): bookmark_object = GenericReferenceField(choices=(Post,)) Link.drop_collection() Post.drop_collection() Bookmark.drop_collection() link_1 = Link(title="Pitchfork") link_1.save() post_1 = Post(title="Behind the Scenes of the Pavement Reunion") post_1.save() bm = Bookmark(bookmark_object=link_1) self.assertRaises(ValidationError, bm.validate) bm = Bookmark(bookmark_object=post_1) bm.save() bm = Bookmark.objects.first() self.assertEqual(bm.bookmark_object, post_1) def test_generic_reference_string_choices(self): """Ensure that a GenericReferenceField can handle choices as strings """ class Link(Document): title = StringField() class Post(Document): title = StringField() class Bookmark(Document): bookmark_object = GenericReferenceField(choices=('Post', Link)) Link.drop_collection() Post.drop_collection() Bookmark.drop_collection() link_1 = Link(title="Pitchfork") link_1.save() post_1 = Post(title="Behind the Scenes of the Pavement Reunion") post_1.save() bm = Bookmark(bookmark_object=link_1) bm.save() bm = Bookmark(bookmark_object=post_1) bm.save() bm = Bookmark(bookmark_object=bm) self.assertRaises(ValidationError, bm.validate) def test_generic_reference_choices_no_dereference(self): """Ensure that a GenericReferenceField can handle choices on non-derefenreced (i.e. DBRef) elements """ class Post(Document): title = StringField() class Bookmark(Document): bookmark_object = GenericReferenceField(choices=(Post, )) other_field = StringField() Post.drop_collection() Bookmark.drop_collection() post_1 = Post(title="Behind the Scenes of the Pavement Reunion") post_1.save() bm = Bookmark(bookmark_object=post_1) bm.save() bm = Bookmark.objects.get(id=bm.id) # bookmark_object is now a DBRef bm.other_field = 'dummy_change' bm.save() def test_generic_reference_list_choices(self): """Ensure that a ListField properly dereferences generic references and respects choices. """ class Link(Document): title = StringField() class Post(Document): title = StringField() class User(Document): bookmarks = ListField(GenericReferenceField(choices=(Post,))) Link.drop_collection() Post.drop_collection() User.drop_collection() link_1 = Link(title="Pitchfork") link_1.save() post_1 = Post(title="Behind the Scenes of the Pavement Reunion") post_1.save() user = User(bookmarks=[link_1]) self.assertRaises(ValidationError, user.validate) user = User(bookmarks=[post_1]) user.save() user = User.objects.first() self.assertEqual(user.bookmarks, [post_1]) Link.drop_collection() Post.drop_collection() User.drop_collection() def test_generic_reference_list_item_modification(self): """Ensure that modifications of related documents (through generic reference) don't influence on querying """ class Post(Document): title = StringField() class User(Document): username = StringField() bookmarks = ListField(GenericReferenceField()) Post.drop_collection() User.drop_collection() post_1 = Post(title="Behind the Scenes of the Pavement Reunion") post_1.save() user = User(bookmarks=[post_1]) user.save() post_1.title = "Title was modified" user.username = "New username" user.save() user = User.objects(bookmarks__all=[post_1]).first() self.assertNotEqual(user, None) self.assertEqual(user.bookmarks[0], post_1) Post.drop_collection() User.drop_collection() def test_binary_fields(self): """Ensure that binary fields can be stored and retrieved. """ class Attachment(Document): content_type = StringField() blob = BinaryField() BLOB = b('\xe6\x00\xc4\xff\x07') MIME_TYPE = 'application/octet-stream' Attachment.drop_collection() attachment = Attachment(content_type=MIME_TYPE, blob=BLOB) attachment.save() attachment_1 = Attachment.objects().first() self.assertEqual(MIME_TYPE, attachment_1.content_type) self.assertEqual(BLOB, bin_type(attachment_1.blob)) Attachment.drop_collection() def test_binary_validation(self): """Ensure that invalid values cannot be assigned to binary fields. """ class Attachment(Document): blob = BinaryField() class AttachmentRequired(Document): blob = BinaryField(required=True) class AttachmentSizeLimit(Document): blob = BinaryField(max_bytes=4) Attachment.drop_collection() AttachmentRequired.drop_collection() AttachmentSizeLimit.drop_collection() attachment = Attachment() attachment.validate() attachment.blob = 2 self.assertRaises(ValidationError, attachment.validate) attachment_required = AttachmentRequired() self.assertRaises(ValidationError, attachment_required.validate) attachment_required.blob = Binary(b('\xe6\x00\xc4\xff\x07')) attachment_required.validate() attachment_size_limit = AttachmentSizeLimit( blob=b('\xe6\x00\xc4\xff\x07')) self.assertRaises(ValidationError, attachment_size_limit.validate) attachment_size_limit.blob = b('\xe6\x00\xc4\xff') attachment_size_limit.validate() Attachment.drop_collection() AttachmentRequired.drop_collection() AttachmentSizeLimit.drop_collection() def test_binary_field_primary(self): class Attachment(Document): id = BinaryField(primary_key=True) Attachment.drop_collection() binary_id = uuid.uuid4().bytes att = Attachment(id=binary_id).save() self.assertEqual(1, Attachment.objects.count()) self.assertEqual(1, Attachment.objects.filter(id=att.id).count()) # TODO use assertIsNotNone once Python 2.6 support is dropped self.assertTrue(Attachment.objects.filter(id=att.id).first() is not None) att.delete() self.assertEqual(0, Attachment.objects.count()) def test_binary_field_primary_filter_by_binary_pk_as_str(self): raise SkipTest("Querying by id as string is not currently supported") class Attachment(Document): id = BinaryField(primary_key=True) Attachment.drop_collection() binary_id = uuid.uuid4().bytes att = Attachment(id=binary_id).save() self.assertEqual(1, Attachment.objects.filter(id=binary_id).count()) # TODO use assertIsNotNone once Python 2.6 support is dropped self.assertTrue(Attachment.objects.filter(id=binary_id).first() is not None) att.delete() self.assertEqual(0, Attachment.objects.count()) def test_choices_validation(self): """Ensure that value is in a container of allowed values. """ class Shirt(Document): size = StringField(max_length=3, choices=( ('S', 'Small'), ('M', 'Medium'), ('L', 'Large'), ('XL', 'Extra Large'), ('XXL', 'Extra Extra Large'))) Shirt.drop_collection() shirt = Shirt() shirt.validate() shirt.size = "S" shirt.validate() shirt.size = "XS" self.assertRaises(ValidationError, shirt.validate) Shirt.drop_collection() def test_choices_validation_documents(self): """ Ensure fields with document choices validate given a valid choice. """ class UserComments(EmbeddedDocument): author = StringField() message = StringField() class BlogPost(Document): comments = ListField( GenericEmbeddedDocumentField(choices=(UserComments,)) ) # Ensure Validation Passes BlogPost(comments=[ UserComments(author='user2', message='message2'), ]).save() def test_choices_validation_documents_invalid(self): """ Ensure fields with document choices validate given an invalid choice. This should throw a ValidationError exception. """ class UserComments(EmbeddedDocument): author = StringField() message = StringField() class ModeratorComments(EmbeddedDocument): author = StringField() message = StringField() class BlogPost(Document): comments = ListField( GenericEmbeddedDocumentField(choices=(UserComments,)) ) # Single Entry Failure post = BlogPost(comments=[ ModeratorComments(author='mod1', message='message1'), ]) self.assertRaises(ValidationError, post.save) # Mixed Entry Failure post = BlogPost(comments=[ ModeratorComments(author='mod1', message='message1'), UserComments(author='user2', message='message2'), ]) self.assertRaises(ValidationError, post.save) def test_choices_validation_documents_inheritance(self): """ Ensure fields with document choices validate given subclass of choice. """ class Comments(EmbeddedDocument): meta = { 'abstract': True } author = StringField() message = StringField() class UserComments(Comments): pass class BlogPost(Document): comments = ListField( GenericEmbeddedDocumentField(choices=(Comments,)) ) # Save Valid EmbeddedDocument Type BlogPost(comments=[ UserComments(author='user2', message='message2'), ]).save() def test_choices_get_field_display(self): """Test dynamic helper for returning the display value of a choices field. """ class Shirt(Document): size = StringField(max_length=3, choices=( ('S', 'Small'), ('M', 'Medium'), ('L', 'Large'), ('XL', 'Extra Large'), ('XXL', 'Extra Extra Large'))) style = StringField(max_length=3, choices=( ('S', 'Small'), ('B', 'Baggy'), ('W', 'wide')), default='S') Shirt.drop_collection() shirt = Shirt() self.assertEqual(shirt.get_size_display(), None) self.assertEqual(shirt.get_style_display(), 'Small') shirt.size = "XXL" shirt.style = "B" self.assertEqual(shirt.get_size_display(), 'Extra Extra Large') self.assertEqual(shirt.get_style_display(), 'Baggy') # Set as Z - an invalid choice shirt.size = "Z" shirt.style = "Z" self.assertEqual(shirt.get_size_display(), 'Z') self.assertEqual(shirt.get_style_display(), 'Z') self.assertRaises(ValidationError, shirt.validate) Shirt.drop_collection() def test_simple_choices_validation(self): """Ensure that value is in a container of allowed values. """ class Shirt(Document): size = StringField(max_length=3, choices=('S', 'M', 'L', 'XL', 'XXL')) Shirt.drop_collection() shirt = Shirt() shirt.validate() shirt.size = "S" shirt.validate() shirt.size = "XS" self.assertRaises(ValidationError, shirt.validate) Shirt.drop_collection() def test_simple_choices_get_field_display(self): """Test dynamic helper for returning the display value of a choices field. """ class Shirt(Document): size = StringField(max_length=3, choices=('S', 'M', 'L', 'XL', 'XXL')) style = StringField(max_length=3, choices=('Small', 'Baggy', 'wide'), default='Small') Shirt.drop_collection() shirt = Shirt() self.assertEqual(shirt.get_size_display(), None) self.assertEqual(shirt.get_style_display(), 'Small') shirt.size = "XXL" shirt.style = "Baggy" self.assertEqual(shirt.get_size_display(), 'XXL') self.assertEqual(shirt.get_style_display(), 'Baggy') # Set as Z - an invalid choice shirt.size = "Z" shirt.style = "Z" self.assertEqual(shirt.get_size_display(), 'Z') self.assertEqual(shirt.get_style_display(), 'Z') self.assertRaises(ValidationError, shirt.validate) Shirt.drop_collection() def test_simple_choices_validation_invalid_value(self): """Ensure that error messages are correct. """ SIZES = ('S', 'M', 'L', 'XL', 'XXL') COLORS = (('R', 'Red'), ('B', 'Blue')) SIZE_MESSAGE = u"Value must be one of ('S', 'M', 'L', 'XL', 'XXL')" COLOR_MESSAGE = u"Value must be one of ['R', 'B']" class Shirt(Document): size = StringField(max_length=3, choices=SIZES) color = StringField(max_length=1, choices=COLORS) Shirt.drop_collection() shirt = Shirt() shirt.validate() shirt.size = "S" shirt.color = "R" shirt.validate() shirt.size = "XS" shirt.color = "G" try: shirt.validate() except ValidationError, error: # get the validation rules error_dict = error.to_dict() self.assertEqual(error_dict['size'], SIZE_MESSAGE) self.assertEqual(error_dict['color'], COLOR_MESSAGE) Shirt.drop_collection() def test_ensure_unique_default_instances(self): """Ensure that every field has it's own unique default instance.""" class D(Document): data = DictField() data2 = DictField(default=lambda: {}) d1 = D() d1.data['foo'] = 'bar' d1.data2['foo'] = 'bar' d2 = D() self.assertEqual(d2.data, {}) self.assertEqual(d2.data2, {}) def test_sequence_field(self): class Person(Document): id = SequenceField(primary_key=True) name = StringField() self.db['mongoengine.counters'].drop() Person.drop_collection() for x in xrange(10): Person(name="Person %s" % x).save() c = self.db['mongoengine.counters'].find_one({'_id': 'person.id'}) self.assertEqual(c['next'], 10) ids = [i.id for i in Person.objects] self.assertEqual(ids, range(1, 11)) c = self.db['mongoengine.counters'].find_one({'_id': 'person.id'}) self.assertEqual(c['next'], 10) Person.id.set_next_value(1000) c = self.db['mongoengine.counters'].find_one({'_id': 'person.id'}) self.assertEqual(c['next'], 1000) def test_sequence_field_get_next_value(self): class Person(Document): id = SequenceField(primary_key=True) name = StringField() self.db['mongoengine.counters'].drop() Person.drop_collection() for x in xrange(10): Person(name="Person %s" % x).save() self.assertEqual(Person.id.get_next_value(), 11) self.db['mongoengine.counters'].drop() self.assertEqual(Person.id.get_next_value(), 1) class Person(Document): id = SequenceField(primary_key=True, value_decorator=str) name = StringField() self.db['mongoengine.counters'].drop() Person.drop_collection() for x in xrange(10): Person(name="Person %s" % x).save() self.assertEqual(Person.id.get_next_value(), '11') self.db['mongoengine.counters'].drop() self.assertEqual(Person.id.get_next_value(), '1') def test_sequence_field_sequence_name(self): class Person(Document): id = SequenceField(primary_key=True, sequence_name='jelly') name = StringField() self.db['mongoengine.counters'].drop() Person.drop_collection() for x in xrange(10): Person(name="Person %s" % x).save() c = self.db['mongoengine.counters'].find_one({'_id': 'jelly.id'}) self.assertEqual(c['next'], 10) ids = [i.id for i in Person.objects] self.assertEqual(ids, range(1, 11)) c = self.db['mongoengine.counters'].find_one({'_id': 'jelly.id'}) self.assertEqual(c['next'], 10) Person.id.set_next_value(1000) c = self.db['mongoengine.counters'].find_one({'_id': 'jelly.id'}) self.assertEqual(c['next'], 1000) def test_multiple_sequence_fields(self): class Person(Document): id = SequenceField(primary_key=True) counter = SequenceField() name = StringField() self.db['mongoengine.counters'].drop() Person.drop_collection() for x in xrange(10): Person(name="Person %s" % x).save() c = self.db['mongoengine.counters'].find_one({'_id': 'person.id'}) self.assertEqual(c['next'], 10) ids = [i.id for i in Person.objects] self.assertEqual(ids, range(1, 11)) counters = [i.counter for i in Person.objects] self.assertEqual(counters, range(1, 11)) c = self.db['mongoengine.counters'].find_one({'_id': 'person.id'}) self.assertEqual(c['next'], 10) Person.id.set_next_value(1000) c = self.db['mongoengine.counters'].find_one({'_id': 'person.id'}) self.assertEqual(c['next'], 1000) Person.counter.set_next_value(999) c = self.db['mongoengine.counters'].find_one({'_id': 'person.counter'}) self.assertEqual(c['next'], 999) def test_sequence_fields_reload(self): class Animal(Document): counter = SequenceField() name = StringField() self.db['mongoengine.counters'].drop() Animal.drop_collection() a = Animal(name="Boi").save() self.assertEqual(a.counter, 1) a.reload() self.assertEqual(a.counter, 1) a.counter = None self.assertEqual(a.counter, 2) a.save() self.assertEqual(a.counter, 2) a = Animal.objects.first() self.assertEqual(a.counter, 2) a.reload() self.assertEqual(a.counter, 2) def test_multiple_sequence_fields_on_docs(self): class Animal(Document): id = SequenceField(primary_key=True) name = StringField() class Person(Document): id = SequenceField(primary_key=True) name = StringField() self.db['mongoengine.counters'].drop() Animal.drop_collection() Person.drop_collection() for x in xrange(10): Animal(name="Animal %s" % x).save() Person(name="Person %s" % x).save() c = self.db['mongoengine.counters'].find_one({'_id': 'person.id'}) self.assertEqual(c['next'], 10) c = self.db['mongoengine.counters'].find_one({'_id': 'animal.id'}) self.assertEqual(c['next'], 10) ids = [i.id for i in Person.objects] self.assertEqual(ids, range(1, 11)) id = [i.id for i in Animal.objects] self.assertEqual(id, range(1, 11)) c = self.db['mongoengine.counters'].find_one({'_id': 'person.id'}) self.assertEqual(c['next'], 10) c = self.db['mongoengine.counters'].find_one({'_id': 'animal.id'}) self.assertEqual(c['next'], 10) def test_sequence_field_value_decorator(self): class Person(Document): id = SequenceField(primary_key=True, value_decorator=str) name = StringField() self.db['mongoengine.counters'].drop() Person.drop_collection() for x in xrange(10): p = Person(name="Person %s" % x) p.save() c = self.db['mongoengine.counters'].find_one({'_id': 'person.id'}) self.assertEqual(c['next'], 10) ids = [i.id for i in Person.objects] self.assertEqual(ids, map(str, range(1, 11))) c = self.db['mongoengine.counters'].find_one({'_id': 'person.id'}) self.assertEqual(c['next'], 10) def test_embedded_sequence_field(self): class Comment(EmbeddedDocument): id = SequenceField() content = StringField(required=True) class Post(Document): title = StringField(required=True) comments = ListField(EmbeddedDocumentField(Comment)) self.db['mongoengine.counters'].drop() Post.drop_collection() Post(title="MongoEngine", comments=[Comment(content="NoSQL Rocks"), Comment(content="MongoEngine Rocks")]).save() c = self.db['mongoengine.counters'].find_one({'_id': 'comment.id'}) self.assertEqual(c['next'], 2) post = Post.objects.first() self.assertEqual(1, post.comments[0].id) self.assertEqual(2, post.comments[1].id) def test_inherited_sequencefield(self): class Base(Document): name = StringField() counter = SequenceField() meta = {'abstract': True} class Foo(Base): pass class Bar(Base): pass bar = Bar(name='Bar') bar.save() foo = Foo(name='Foo') foo.save() self.assertTrue('base.counter' in self.db['mongoengine.counters'].find().distinct('_id')) self.assertFalse(('foo.counter' or 'bar.counter') in self.db['mongoengine.counters'].find().distinct('_id')) self.assertNotEqual(foo.counter, bar.counter) self.assertEqual(foo._fields['counter'].owner_document, Base) self.assertEqual(bar._fields['counter'].owner_document, Base) def test_no_inherited_sequencefield(self): class Base(Document): name = StringField() meta = {'abstract': True} class Foo(Base): counter = SequenceField() class Bar(Base): counter = SequenceField() bar = Bar(name='Bar') bar.save() foo = Foo(name='Foo') foo.save() self.assertFalse('base.counter' in self.db['mongoengine.counters'].find().distinct('_id')) self.assertTrue(('foo.counter' and 'bar.counter') in self.db['mongoengine.counters'].find().distinct('_id')) self.assertEqual(foo.counter, bar.counter) self.assertEqual(foo._fields['counter'].owner_document, Foo) self.assertEqual(bar._fields['counter'].owner_document, Bar) def test_generic_embedded_document(self): class Car(EmbeddedDocument): name = StringField() class Dish(EmbeddedDocument): food = StringField(required=True) number = IntField() class Person(Document): name = StringField() like = GenericEmbeddedDocumentField() Person.drop_collection() person = Person(name='Test User') person.like = Car(name='Fiat') person.save() person = Person.objects.first() self.assertTrue(isinstance(person.like, Car)) person.like = Dish(food="arroz", number=15) person.save() person = Person.objects.first() self.assertTrue(isinstance(person.like, Dish)) def test_generic_embedded_document_choices(self): """Ensure you can limit GenericEmbeddedDocument choices """ class Car(EmbeddedDocument): name = StringField() class Dish(EmbeddedDocument): food = StringField(required=True) number = IntField() class Person(Document): name = StringField() like = GenericEmbeddedDocumentField(choices=(Dish,)) Person.drop_collection() person = Person(name='Test User') person.like = Car(name='Fiat') self.assertRaises(ValidationError, person.validate) person.like = Dish(food="arroz", number=15) person.save() person = Person.objects.first() self.assertTrue(isinstance(person.like, Dish)) def test_generic_list_embedded_document_choices(self): """Ensure you can limit GenericEmbeddedDocument choices inside a list field """ class Car(EmbeddedDocument): name = StringField() class Dish(EmbeddedDocument): food = StringField(required=True) number = IntField() class Person(Document): name = StringField() likes = ListField(GenericEmbeddedDocumentField(choices=(Dish,))) Person.drop_collection() person = Person(name='Test User') person.likes = [Car(name='Fiat')] self.assertRaises(ValidationError, person.validate) person.likes = [Dish(food="arroz", number=15)] person.save() person = Person.objects.first() self.assertTrue(isinstance(person.likes[0], Dish)) def test_recursive_validation(self): """Ensure that a validation result to_dict is available. """ class Author(EmbeddedDocument): name = StringField(required=True) class Comment(EmbeddedDocument): author = EmbeddedDocumentField(Author, required=True) content = StringField(required=True) class Post(Document): title = StringField(required=True) comments = ListField(EmbeddedDocumentField(Comment)) bob = Author(name='Bob') post = Post(title='hello world') post.comments.append(Comment(content='hello', author=bob)) post.comments.append(Comment(author=bob)) self.assertRaises(ValidationError, post.validate) try: post.validate() except ValidationError, error: # ValidationError.errors property self.assertTrue(hasattr(error, 'errors')) self.assertTrue(isinstance(error.errors, dict)) self.assertTrue('comments' in error.errors) self.assertTrue(1 in error.errors['comments']) self.assertTrue(isinstance(error.errors['comments'][1]['content'], ValidationError)) # ValidationError.schema property error_dict = error.to_dict() self.assertTrue(isinstance(error_dict, dict)) self.assertTrue('comments' in error_dict) self.assertTrue(1 in error_dict['comments']) self.assertTrue('content' in error_dict['comments'][1]) self.assertEqual(error_dict['comments'][1]['content'], u'Field is required') post.comments[1].content = 'here we go' post.validate() def test_email_field(self): class User(Document): email = EmailField() user = User(email="[email protected]") self.assertTrue(user.validate() is None) user = User(email="[email protected]") self.assertTrue(user.validate() is None) user = User(email=("Kofq@rhom0e4klgauOhpbpNdogawnyIKvQS0wk2mjqrgGQ5S" "aJIazqqWkm7.net")) self.assertTrue(user.validate() is None) user = User(email="[email protected]") self.assertTrue(user.validate() is None) user = User(email='me@localhost') self.assertRaises(ValidationError, user.validate) user = User(email="[email protected].") self.assertRaises(ValidationError, user.validate) def test_email_field_honors_regex(self): class User(Document): email = EmailField(regex=r'\[email protected]') # Fails regex validation user = User(email='[email protected]') self.assertRaises(ValidationError, user.validate) # Passes regex validation user = User(email='[email protected]') self.assertTrue(user.validate() is None) def test_tuples_as_tuples(self): """ Ensure that tuples remain tuples when they are inside a ComplexBaseField """ from mongoengine.base import BaseField class EnumField(BaseField): def __init__(self, **kwargs): super(EnumField, self).__init__(**kwargs) def to_mongo(self, value, **kwargs): return value def to_python(self, value): return tuple(value) class TestDoc(Document): items = ListField(EnumField()) TestDoc.drop_collection() tuples = [(100, 'Testing')] doc = TestDoc() doc.items = tuples doc.save() x = TestDoc.objects().get() self.assertTrue(x is not None) self.assertTrue(len(x.items) == 1) self.assertTrue(tuple(x.items[0]) in tuples) self.assertTrue(x.items[0] in tuples) def test_dynamic_fields_class(self): class Doc2(Document): field_1 = StringField(db_field='f') class Doc(Document): my_id = IntField(required=True, unique=True, primary_key=True) embed_me = DynamicField(db_field='e') field_x = StringField(db_field='x') Doc.drop_collection() Doc2.drop_collection() doc2 = Doc2(field_1="hello") doc = Doc(my_id=1, embed_me=doc2, field_x="x") self.assertRaises(OperationError, doc.save) doc2.save() doc.save() doc = Doc.objects.get() self.assertEqual(doc.embed_me.field_1, "hello") def test_dynamic_fields_embedded_class(self): class Embed(EmbeddedDocument): field_1 = StringField(db_field='f') class Doc(Document): my_id = IntField(required=True, unique=True, primary_key=True) embed_me = DynamicField(db_field='e') field_x = StringField(db_field='x') Doc.drop_collection() Doc(my_id=1, embed_me=Embed(field_1="hello"), field_x="x").save() doc = Doc.objects.get() self.assertEqual(doc.embed_me.field_1, "hello") def test_dynamicfield_dump_document(self): """Ensure a DynamicField can handle another document's dump """ class Doc(Document): field = DynamicField() class ToEmbed(Document): id = IntField(primary_key=True, default=1) recursive = DynamicField() class ToEmbedParent(Document): id = IntField(primary_key=True, default=1) recursive = DynamicField() meta = {'allow_inheritance': True} class ToEmbedChild(ToEmbedParent): pass to_embed_recursive = ToEmbed(id=1).save() to_embed = ToEmbed(id=2, recursive=to_embed_recursive).save() doc = Doc(field=to_embed) doc.save() assert isinstance(doc.field, ToEmbed) assert doc.field == to_embed # Same thing with a Document with a _cls field to_embed_recursive = ToEmbedChild(id=1).save() to_embed_child = ToEmbedChild(id=2, recursive=to_embed_recursive).save() doc = Doc(field=to_embed_child) doc.save() assert isinstance(doc.field, ToEmbedChild) assert doc.field == to_embed_child def test_invalid_dict_value(self): class DictFieldTest(Document): dictionary = DictField(required=True) DictFieldTest.drop_collection() test = DictFieldTest(dictionary=None) test.dictionary # Just access to test getter self.assertRaises(ValidationError, test.validate) test = DictFieldTest(dictionary=False) test.dictionary # Just access to test getter self.assertRaises(ValidationError, test.validate) def test_cls_field(self): class Animal(Document): meta = {'allow_inheritance': True} class Fish(Animal): pass class Mammal(Animal): pass class Dog(Mammal): pass class Human(Mammal): pass Animal.objects.delete() Dog().save() Fish().save() Human().save() self.assertEquals(Animal.objects(_cls__in=["Animal.Mammal.Dog", "Animal.Fish"]).count(), 2) self.assertEquals(Animal.objects(_cls__in=["Animal.Fish.Guppy"]).count(), 0) def test_sparse_field(self): class Doc(Document): name = StringField(required=False, unique=True, sparse=True) try: Doc().save() Doc().save() except Exception: self.fail() def test_undefined_field_exception(self): """Tests if a `FieldDoesNotExist` exception is raised when trying to instanciate a document with a field that's not defined. """ class Doc(Document): foo = StringField(db_field='f') def test(): Doc(bar='test') self.assertRaises(FieldDoesNotExist, test) def test_undefined_field_exception_with_strict(self): """Tests if a `FieldDoesNotExist` exception is raised when trying to instanciate a document with a field that's not defined, even when strict is set to False. """ class Doc(Document): foo = StringField(db_field='f') meta = {'strict': False} def test(): Doc(bar='test') self.assertRaises(FieldDoesNotExist, test) class EmbeddedDocumentListFieldTestCase(unittest.TestCase): @classmethod def setUpClass(cls): cls.db = connect(db='EmbeddedDocumentListFieldTestCase') class Comments(EmbeddedDocument): author = StringField() message = StringField() class BlogPost(Document): comments = EmbeddedDocumentListField(Comments) cls.Comments = Comments cls.BlogPost = BlogPost def setUp(self): """ Create two BlogPost entries in the database, each with several EmbeddedDocuments. """ self.post1 = self.BlogPost(comments=[ self.Comments(author='user1', message='message1'), self.Comments(author='user2', message='message1') ]).save() self.post2 = self.BlogPost(comments=[ self.Comments(author='user2', message='message2'), self.Comments(author='user2', message='message3'), self.Comments(author='user3', message='message1') ]).save() def tearDown(self): self.BlogPost.drop_collection() @classmethod def tearDownClass(cls): cls.db.drop_database('EmbeddedDocumentListFieldTestCase') def test_no_keyword_filter(self): """ Tests the filter method of a List of Embedded Documents with a no keyword. """ filtered = self.post1.comments.filter() # Ensure nothing was changed # < 2.6 Incompatible > # self.assertListEqual(filtered, self.post1.comments) self.assertEqual(filtered, self.post1.comments) def test_single_keyword_filter(self): """ Tests the filter method of a List of Embedded Documents with a single keyword. """ filtered = self.post1.comments.filter(author='user1') # Ensure only 1 entry was returned. self.assertEqual(len(filtered), 1) # Ensure the entry returned is the correct entry. self.assertEqual(filtered[0].author, 'user1') def test_multi_keyword_filter(self): """ Tests the filter method of a List of Embedded Documents with multiple keywords. """ filtered = self.post2.comments.filter( author='user2', message='message2' ) # Ensure only 1 entry was returned. self.assertEqual(len(filtered), 1) # Ensure the entry returned is the correct entry. self.assertEqual(filtered[0].author, 'user2') self.assertEqual(filtered[0].message, 'message2') def test_chained_filter(self): """ Tests chained filter methods of a List of Embedded Documents """ filtered = self.post2.comments.filter(author='user2').filter( message='message2' ) # Ensure only 1 entry was returned. self.assertEqual(len(filtered), 1) # Ensure the entry returned is the correct entry. self.assertEqual(filtered[0].author, 'user2') self.assertEqual(filtered[0].message, 'message2') def test_unknown_keyword_filter(self): """ Tests the filter method of a List of Embedded Documents when the keyword is not a known keyword. """ # < 2.6 Incompatible > # with self.assertRaises(AttributeError): # self.post2.comments.filter(year=2) self.assertRaises(AttributeError, self.post2.comments.filter, year=2) def test_no_keyword_exclude(self): """ Tests the exclude method of a List of Embedded Documents with a no keyword. """ filtered = self.post1.comments.exclude() # Ensure everything was removed # < 2.6 Incompatible > # self.assertListEqual(filtered, []) self.assertEqual(filtered, []) def test_single_keyword_exclude(self): """ Tests the exclude method of a List of Embedded Documents with a single keyword. """ excluded = self.post1.comments.exclude(author='user1') # Ensure only 1 entry was returned. self.assertEqual(len(excluded), 1) # Ensure the entry returned is the correct entry. self.assertEqual(excluded[0].author, 'user2') def test_multi_keyword_exclude(self): """ Tests the exclude method of a List of Embedded Documents with multiple keywords. """ excluded = self.post2.comments.exclude( author='user3', message='message1' ) # Ensure only 2 entries were returned. self.assertEqual(len(excluded), 2) # Ensure the entries returned are the correct entries. self.assertEqual(excluded[0].author, 'user2') self.assertEqual(excluded[1].author, 'user2') def test_non_matching_exclude(self): """ Tests the exclude method of a List of Embedded Documents when the keyword does not match any entries. """ excluded = self.post2.comments.exclude(author='user4') # Ensure the 3 entries still exist. self.assertEqual(len(excluded), 3) def test_unknown_keyword_exclude(self): """ Tests the exclude method of a List of Embedded Documents when the keyword is not a known keyword. """ # < 2.6 Incompatible > # with self.assertRaises(AttributeError): # self.post2.comments.exclude(year=2) self.assertRaises(AttributeError, self.post2.comments.exclude, year=2) def test_chained_filter_exclude(self): """ Tests the exclude method after a filter method of a List of Embedded Documents. """ excluded = self.post2.comments.filter(author='user2').exclude( message='message2' ) # Ensure only 1 entry was returned. self.assertEqual(len(excluded), 1) # Ensure the entry returned is the correct entry. self.assertEqual(excluded[0].author, 'user2') self.assertEqual(excluded[0].message, 'message3') def test_count(self): """ Tests the count method of a List of Embedded Documents. """ self.assertEqual(self.post1.comments.count(), 2) self.assertEqual(self.post1.comments.count(), len(self.post1.comments)) def test_filtered_count(self): """ Tests the filter + count method of a List of Embedded Documents. """ count = self.post1.comments.filter(author='user1').count() self.assertEqual(count, 1) def test_single_keyword_get(self): """ Tests the get method of a List of Embedded Documents using a single keyword. """ comment = self.post1.comments.get(author='user1') # < 2.6 Incompatible > # self.assertIsInstance(comment, self.Comments) self.assertTrue(isinstance(comment, self.Comments)) self.assertEqual(comment.author, 'user1') def test_multi_keyword_get(self): """ Tests the get method of a List of Embedded Documents using multiple keywords. """ comment = self.post2.comments.get(author='user2', message='message2') # < 2.6 Incompatible > # self.assertIsInstance(comment, self.Comments) self.assertTrue(isinstance(comment, self.Comments)) self.assertEqual(comment.author, 'user2') self.assertEqual(comment.message, 'message2') def test_no_keyword_multiple_return_get(self): """ Tests the get method of a List of Embedded Documents without a keyword to return multiple documents. """ # < 2.6 Incompatible > # with self.assertRaises(MultipleObjectsReturned): # self.post1.comments.get() self.assertRaises(MultipleObjectsReturned, self.post1.comments.get) def test_keyword_multiple_return_get(self): """ Tests the get method of a List of Embedded Documents with a keyword to return multiple documents. """ # < 2.6 Incompatible > # with self.assertRaises(MultipleObjectsReturned): # self.post2.comments.get(author='user2') self.assertRaises( MultipleObjectsReturned, self.post2.comments.get, author='user2' ) def test_unknown_keyword_get(self): """ Tests the get method of a List of Embedded Documents with an unknown keyword. """ # < 2.6 Incompatible > # with self.assertRaises(AttributeError): # self.post2.comments.get(year=2020) self.assertRaises(AttributeError, self.post2.comments.get, year=2020) def test_no_result_get(self): """ Tests the get method of a List of Embedded Documents where get returns no results. """ # < 2.6 Incompatible > # with self.assertRaises(DoesNotExist): # self.post1.comments.get(author='user3') self.assertRaises( DoesNotExist, self.post1.comments.get, author='user3' ) def test_first(self): """ Tests the first method of a List of Embedded Documents to ensure it returns the first comment. """ comment = self.post1.comments.first() # Ensure a Comment object was returned. # < 2.6 Incompatible > # self.assertIsInstance(comment, self.Comments) self.assertTrue(isinstance(comment, self.Comments)) self.assertEqual(comment, self.post1.comments[0]) def test_create(self): """ Test the create method of a List of Embedded Documents. """ comment = self.post1.comments.create( author='user4', message='message1' ) self.post1.save() # Ensure the returned value is the comment object. # < 2.6 Incompatible > # self.assertIsInstance(comment, self.Comments) self.assertTrue(isinstance(comment, self.Comments)) self.assertEqual(comment.author, 'user4') self.assertEqual(comment.message, 'message1') # Ensure the new comment was actually saved to the database. # < 2.6 Incompatible > # self.assertIn( # comment, # self.BlogPost.objects(comments__author='user4')[0].comments # ) self.assertTrue( comment in self.BlogPost.objects( comments__author='user4' )[0].comments ) def test_filtered_create(self): """ Test the create method of a List of Embedded Documents chained to a call to the filter method. Filtering should have no effect on creation. """ comment = self.post1.comments.filter(author='user1').create( author='user4', message='message1' ) self.post1.save() # Ensure the returned value is the comment object. # < 2.6 Incompatible > # self.assertIsInstance(comment, self.Comments) self.assertTrue(isinstance(comment, self.Comments)) self.assertEqual(comment.author, 'user4') self.assertEqual(comment.message, 'message1') # Ensure the new comment was actually saved to the database. # < 2.6 Incompatible > # self.assertIn( # comment, # self.BlogPost.objects(comments__author='user4')[0].comments # ) self.assertTrue( comment in self.BlogPost.objects( comments__author='user4' )[0].comments ) def test_no_keyword_update(self): """ Tests the update method of a List of Embedded Documents with no keywords. """ original = list(self.post1.comments) number = self.post1.comments.update() self.post1.save() # Ensure that nothing was altered. # < 2.6 Incompatible > # self.assertIn( # original[0], # self.BlogPost.objects(id=self.post1.id)[0].comments # ) self.assertTrue( original[0] in self.BlogPost.objects(id=self.post1.id)[0].comments ) # < 2.6 Incompatible > # self.assertIn( # original[1], # self.BlogPost.objects(id=self.post1.id)[0].comments # ) self.assertTrue( original[1] in self.BlogPost.objects(id=self.post1.id)[0].comments ) # Ensure the method returned 0 as the number of entries # modified self.assertEqual(number, 0) def test_single_keyword_update(self): """ Tests the update method of a List of Embedded Documents with a single keyword. """ number = self.post1.comments.update(author='user4') self.post1.save() comments = self.BlogPost.objects(id=self.post1.id)[0].comments # Ensure that the database was updated properly. self.assertEqual(comments[0].author, 'user4') self.assertEqual(comments[1].author, 'user4') # Ensure the method returned 2 as the number of entries # modified self.assertEqual(number, 2) def test_save(self): """ Tests the save method of a List of Embedded Documents. """ comments = self.post1.comments new_comment = self.Comments(author='user4') comments.append(new_comment) comments.save() # Ensure that the new comment has been added to the database. # < 2.6 Incompatible > # self.assertIn( # new_comment, # self.BlogPost.objects(id=self.post1.id)[0].comments # ) self.assertTrue( new_comment in self.BlogPost.objects(id=self.post1.id)[0].comments ) def test_delete(self): """ Tests the delete method of a List of Embedded Documents. """ number = self.post1.comments.delete() self.post1.save() # Ensure that all the comments under post1 were deleted in the # database. # < 2.6 Incompatible > # self.assertListEqual( # self.BlogPost.objects(id=self.post1.id)[0].comments, [] # ) self.assertEqual( self.BlogPost.objects(id=self.post1.id)[0].comments, [] ) # Ensure that post1 comments were deleted from the list. # < 2.6 Incompatible > # self.assertListEqual(self.post1.comments, []) self.assertEqual(self.post1.comments, []) # Ensure that comments still returned a EmbeddedDocumentList object. # < 2.6 Incompatible > # self.assertIsInstance(self.post1.comments, EmbeddedDocumentList) self.assertTrue(isinstance(self.post1.comments, EmbeddedDocumentList)) # Ensure that the delete method returned 2 as the number of entries # deleted from the database self.assertEqual(number, 2) def test_empty_list_embedded_documents_with_unique_field(self): """ Tests that only one document with an empty list of embedded documents that have a unique field can be saved, but if the unique field is also sparse than multiple documents with an empty list can be saved. """ class EmbeddedWithUnique(EmbeddedDocument): number = IntField(unique=True) class A(Document): my_list = ListField(EmbeddedDocumentField(EmbeddedWithUnique)) A(my_list=[]).save() self.assertRaises(NotUniqueError, lambda: A(my_list=[]).save()) class EmbeddedWithSparseUnique(EmbeddedDocument): number = IntField(unique=True, sparse=True) class B(Document): my_list = ListField(EmbeddedDocumentField(EmbeddedWithSparseUnique)) B(my_list=[]).save() B(my_list=[]).save() def test_filtered_delete(self): """ Tests the delete method of a List of Embedded Documents after the filter method has been called. """ comment = self.post1.comments[1] number = self.post1.comments.filter(author='user2').delete() self.post1.save() # Ensure that only the user2 comment was deleted. # < 2.6 Incompatible > # self.assertNotIn( # comment, self.BlogPost.objects(id=self.post1.id)[0].comments # ) self.assertTrue( comment not in self.BlogPost.objects(id=self.post1.id)[0].comments ) self.assertEqual( len(self.BlogPost.objects(id=self.post1.id)[0].comments), 1 ) # Ensure that the user2 comment no longer exists in the list. # < 2.6 Incompatible > # self.assertNotIn(comment, self.post1.comments) self.assertTrue(comment not in self.post1.comments) self.assertEqual(len(self.post1.comments), 1) # Ensure that the delete method returned 1 as the number of entries # deleted from the database self.assertEqual(number, 1) def test_custom_data(self): """ Tests that custom data is saved in the field object and doesn't interfere with the rest of field functionalities. """ custom_data = {'a': 'a_value', 'b': [1, 2]} class CustomData(Document): a_field = IntField() c_field = IntField(custom_data=custom_data) a1 = CustomData(a_field=1, c_field=2).save() self.assertEqual(2, a1.c_field) self.assertFalse(hasattr(a1.c_field, 'custom_data')) self.assertTrue(hasattr(CustomData.c_field, 'custom_data')) self.assertEqual(custom_data['a'], CustomData.c_field.custom_data['a']) if __name__ == '__main__': unittest.main()
{ "content_hash": "5b3c68344eddb94a97fca93c583c5e51", "timestamp": "", "source": "github", "line_count": 4060, "max_line_length": 113, "avg_line_length": 32.217487684729065, "alnum_prop": 0.5825325107222311, "repo_name": "larsbutler/mongoengine", "id": "dbba6f9fb505c8380aac0e25e7e7c28cdcc5aa60", "size": "130829", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/fields/fields.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "1045433" } ], "symlink_target": "" }
import BoostBuild t = BoostBuild.Tester() # Create the needed files. t.write("jamroot.jam", """ using testing ; lib helper : helper.cpp ; unit-test test : test.cpp : <library>helper ; """) t.write("test.cpp", """ void helper(); int main() { helper(); } """) t.write("helper.cpp", """ void #if defined(_WIN32) __declspec(dllexport) #endif helper() {} """) t.run_build_system("link=static") t.expect_addition("bin/$toolset/debug/link-static/test.passed") t.cleanup()
{ "content_hash": "0c755bd8438c03969365f3d44d4e2090", "timestamp": "", "source": "github", "line_count": 28, "max_line_length": 63, "avg_line_length": 16.821428571428573, "alnum_prop": 0.6602972399150743, "repo_name": "goldcoin/gldcoin", "id": "94575ad3190a968bc3280656d227c02d8485e72d", "size": "700", "binary": false, "copies": "20", "ref": "refs/heads/master", "path": "BuildDeps/deps/boost/tools/build/v2/test/unit_test.py", "mode": "33188", "license": "mit", "language": [ { "name": "ASP", "bytes": "21289" }, { "name": "Assembly", "bytes": "1152300" }, { "name": "Awk", "bytes": "54072" }, { "name": "Batchfile", "bytes": "95617" }, { "name": "C", "bytes": "30167843" }, { "name": "C#", "bytes": "1801043" }, { "name": "C++", "bytes": "143719415" }, { "name": "CMake", "bytes": "7934" }, { "name": "CSS", "bytes": "369274" }, { "name": "Cuda", "bytes": "26749" }, { "name": "DIGITAL Command Language", "bytes": "320412" }, { "name": "Emacs Lisp", "bytes": "1639" }, { "name": "FORTRAN", "bytes": "1387" }, { "name": "Groff", "bytes": "29119" }, { "name": "HTML", "bytes": "187929099" }, { "name": "IDL", "bytes": "14" }, { "name": "Java", "bytes": "4131730" }, { "name": "JavaScript", "bytes": "210803" }, { "name": "Lex", "bytes": "1255" }, { "name": "Makefile", "bytes": "1926648" }, { "name": "Max", "bytes": "36857" }, { "name": "NSIS", "bytes": "5910" }, { "name": "Objective-C", "bytes": "88946" }, { "name": "Objective-C++", "bytes": "11420" }, { "name": "OpenEdge ABL", "bytes": "66157" }, { "name": "PHP", "bytes": "60328" }, { "name": "Perl", "bytes": "3895713" }, { "name": "Perl6", "bytes": "29655" }, { "name": "Prolog", "bytes": "42455" }, { "name": "Protocol Buffer", "bytes": "2764" }, { "name": "Python", "bytes": "1785357" }, { "name": "QML", "bytes": "593" }, { "name": "QMake", "bytes": "55372" }, { "name": "R", "bytes": "4009" }, { "name": "Rebol", "bytes": "354" }, { "name": "Scheme", "bytes": "4249" }, { "name": "Shell", "bytes": "1384609" }, { "name": "Tcl", "bytes": "2603631" }, { "name": "TeX", "bytes": "13404" }, { "name": "XS", "bytes": "198495" }, { "name": "XSLT", "bytes": "761090" }, { "name": "Yacc", "bytes": "18910" }, { "name": "eC", "bytes": "5157" } ], "symlink_target": "" }
"""Tests for unix_events.py.""" import collections import gc import errno import io import os import pprint import signal import socket import stat import sys import tempfile import threading import unittest from unittest import mock if sys.platform == 'win32': raise unittest.SkipTest('UNIX only') import asyncio from asyncio import log from asyncio import test_utils from asyncio import unix_events MOCK_ANY = mock.ANY @unittest.skipUnless(signal, 'Signals are not supported') class SelectorEventLoopSignalTests(test_utils.TestCase): def setUp(self): self.loop = asyncio.SelectorEventLoop() self.set_event_loop(self.loop) def test_check_signal(self): self.assertRaises( TypeError, self.loop._check_signal, '1') self.assertRaises( ValueError, self.loop._check_signal, signal.NSIG + 1) def test_handle_signal_no_handler(self): self.loop._handle_signal(signal.NSIG + 1) def test_handle_signal_cancelled_handler(self): h = asyncio.Handle(mock.Mock(), (), loop=mock.Mock()) h.cancel() self.loop._signal_handlers[signal.NSIG + 1] = h self.loop.remove_signal_handler = mock.Mock() self.loop._handle_signal(signal.NSIG + 1) self.loop.remove_signal_handler.assert_called_with(signal.NSIG + 1) @mock.patch('asyncio.unix_events.signal') def test_add_signal_handler_setup_error(self, m_signal): m_signal.NSIG = signal.NSIG m_signal.set_wakeup_fd.side_effect = ValueError self.assertRaises( RuntimeError, self.loop.add_signal_handler, signal.SIGINT, lambda: True) @mock.patch('asyncio.unix_events.signal') def test_add_signal_handler(self, m_signal): m_signal.NSIG = signal.NSIG cb = lambda: True self.loop.add_signal_handler(signal.SIGHUP, cb) h = self.loop._signal_handlers.get(signal.SIGHUP) self.assertIsInstance(h, asyncio.Handle) self.assertEqual(h._callback, cb) @mock.patch('asyncio.unix_events.signal') def test_add_signal_handler_install_error(self, m_signal): m_signal.NSIG = signal.NSIG def set_wakeup_fd(fd): if fd == -1: raise ValueError() m_signal.set_wakeup_fd = set_wakeup_fd class Err(OSError): errno = errno.EFAULT m_signal.signal.side_effect = Err self.assertRaises( Err, self.loop.add_signal_handler, signal.SIGINT, lambda: True) @mock.patch('asyncio.unix_events.signal') @mock.patch('asyncio.base_events.logger') def test_add_signal_handler_install_error2(self, m_logging, m_signal): m_signal.NSIG = signal.NSIG class Err(OSError): errno = errno.EINVAL m_signal.signal.side_effect = Err self.loop._signal_handlers[signal.SIGHUP] = lambda: True self.assertRaises( RuntimeError, self.loop.add_signal_handler, signal.SIGINT, lambda: True) self.assertFalse(m_logging.info.called) self.assertEqual(1, m_signal.set_wakeup_fd.call_count) @mock.patch('asyncio.unix_events.signal') @mock.patch('asyncio.base_events.logger') def test_add_signal_handler_install_error3(self, m_logging, m_signal): class Err(OSError): errno = errno.EINVAL m_signal.signal.side_effect = Err m_signal.NSIG = signal.NSIG self.assertRaises( RuntimeError, self.loop.add_signal_handler, signal.SIGINT, lambda: True) self.assertFalse(m_logging.info.called) self.assertEqual(2, m_signal.set_wakeup_fd.call_count) @mock.patch('asyncio.unix_events.signal') def test_remove_signal_handler(self, m_signal): m_signal.NSIG = signal.NSIG self.loop.add_signal_handler(signal.SIGHUP, lambda: True) self.assertTrue( self.loop.remove_signal_handler(signal.SIGHUP)) self.assertTrue(m_signal.set_wakeup_fd.called) self.assertTrue(m_signal.signal.called) self.assertEqual( (signal.SIGHUP, m_signal.SIG_DFL), m_signal.signal.call_args[0]) @mock.patch('asyncio.unix_events.signal') def test_remove_signal_handler_2(self, m_signal): m_signal.NSIG = signal.NSIG m_signal.SIGINT = signal.SIGINT self.loop.add_signal_handler(signal.SIGINT, lambda: True) self.loop._signal_handlers[signal.SIGHUP] = object() m_signal.set_wakeup_fd.reset_mock() self.assertTrue( self.loop.remove_signal_handler(signal.SIGINT)) self.assertFalse(m_signal.set_wakeup_fd.called) self.assertTrue(m_signal.signal.called) self.assertEqual( (signal.SIGINT, m_signal.default_int_handler), m_signal.signal.call_args[0]) @mock.patch('asyncio.unix_events.signal') @mock.patch('asyncio.base_events.logger') def test_remove_signal_handler_cleanup_error(self, m_logging, m_signal): m_signal.NSIG = signal.NSIG self.loop.add_signal_handler(signal.SIGHUP, lambda: True) m_signal.set_wakeup_fd.side_effect = ValueError self.loop.remove_signal_handler(signal.SIGHUP) self.assertTrue(m_logging.info) @mock.patch('asyncio.unix_events.signal') def test_remove_signal_handler_error(self, m_signal): m_signal.NSIG = signal.NSIG self.loop.add_signal_handler(signal.SIGHUP, lambda: True) m_signal.signal.side_effect = OSError self.assertRaises( OSError, self.loop.remove_signal_handler, signal.SIGHUP) @mock.patch('asyncio.unix_events.signal') def test_remove_signal_handler_error2(self, m_signal): m_signal.NSIG = signal.NSIG self.loop.add_signal_handler(signal.SIGHUP, lambda: True) class Err(OSError): errno = errno.EINVAL m_signal.signal.side_effect = Err self.assertRaises( RuntimeError, self.loop.remove_signal_handler, signal.SIGHUP) @mock.patch('asyncio.unix_events.signal') def test_close(self, m_signal): m_signal.NSIG = signal.NSIG self.loop.add_signal_handler(signal.SIGHUP, lambda: True) self.loop.add_signal_handler(signal.SIGCHLD, lambda: True) self.assertEqual(len(self.loop._signal_handlers), 2) m_signal.set_wakeup_fd.reset_mock() self.loop.close() self.assertEqual(len(self.loop._signal_handlers), 0) m_signal.set_wakeup_fd.assert_called_once_with(-1) @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'UNIX Sockets are not supported') class SelectorEventLoopUnixSocketTests(test_utils.TestCase): def setUp(self): self.loop = asyncio.SelectorEventLoop() self.set_event_loop(self.loop) def test_create_unix_server_existing_path_sock(self): with test_utils.unix_socket_path() as path: sock = socket.socket(socket.AF_UNIX) sock.bind(path) with sock: coro = self.loop.create_unix_server(lambda: None, path) with self.assertRaisesRegex(OSError, 'Address.*is already in use'): self.loop.run_until_complete(coro) def test_create_unix_server_existing_path_nonsock(self): with tempfile.NamedTemporaryFile() as file: coro = self.loop.create_unix_server(lambda: None, file.name) with self.assertRaisesRegex(OSError, 'Address.*is already in use'): self.loop.run_until_complete(coro) def test_create_unix_server_ssl_bool(self): coro = self.loop.create_unix_server(lambda: None, path='spam', ssl=True) with self.assertRaisesRegex(TypeError, 'ssl argument must be an SSLContext'): self.loop.run_until_complete(coro) def test_create_unix_server_nopath_nosock(self): coro = self.loop.create_unix_server(lambda: None, path=None) with self.assertRaisesRegex(ValueError, 'path was not specified, and no sock'): self.loop.run_until_complete(coro) def test_create_unix_server_path_inetsock(self): sock = socket.socket() with sock: coro = self.loop.create_unix_server(lambda: None, path=None, sock=sock) with self.assertRaisesRegex(ValueError, 'A UNIX Domain Socket was expected'): self.loop.run_until_complete(coro) @mock.patch('asyncio.unix_events.socket') def test_create_unix_server_bind_error(self, m_socket): # Ensure that the socket is closed on any bind error sock = mock.Mock() m_socket.socket.return_value = sock sock.bind.side_effect = OSError coro = self.loop.create_unix_server(lambda: None, path="/test") with self.assertRaises(OSError): self.loop.run_until_complete(coro) self.assertTrue(sock.close.called) sock.bind.side_effect = MemoryError coro = self.loop.create_unix_server(lambda: None, path="/test") with self.assertRaises(MemoryError): self.loop.run_until_complete(coro) self.assertTrue(sock.close.called) def test_create_unix_connection_path_sock(self): coro = self.loop.create_unix_connection( lambda: None, '/dev/null', sock=object()) with self.assertRaisesRegex(ValueError, 'path and sock can not be'): self.loop.run_until_complete(coro) def test_create_unix_connection_nopath_nosock(self): coro = self.loop.create_unix_connection( lambda: None, None) with self.assertRaisesRegex(ValueError, 'no path and sock were specified'): self.loop.run_until_complete(coro) def test_create_unix_connection_nossl_serverhost(self): coro = self.loop.create_unix_connection( lambda: None, '/dev/null', server_hostname='spam') with self.assertRaisesRegex(ValueError, 'server_hostname is only meaningful'): self.loop.run_until_complete(coro) def test_create_unix_connection_ssl_noserverhost(self): coro = self.loop.create_unix_connection( lambda: None, '/dev/null', ssl=True) with self.assertRaisesRegex( ValueError, 'you have to pass server_hostname when using ssl'): self.loop.run_until_complete(coro) class UnixReadPipeTransportTests(test_utils.TestCase): def setUp(self): self.loop = self.new_test_loop() self.protocol = test_utils.make_test_protocol(asyncio.Protocol) self.pipe = mock.Mock(spec_set=io.RawIOBase) self.pipe.fileno.return_value = 5 blocking_patcher = mock.patch('asyncio.unix_events._set_nonblocking') blocking_patcher.start() self.addCleanup(blocking_patcher.stop) fstat_patcher = mock.patch('os.fstat') m_fstat = fstat_patcher.start() st = mock.Mock() st.st_mode = stat.S_IFIFO m_fstat.return_value = st self.addCleanup(fstat_patcher.stop) def test_ctor(self): tr = unix_events._UnixReadPipeTransport( self.loop, self.pipe, self.protocol) self.loop.assert_reader(5, tr._read_ready) test_utils.run_briefly(self.loop) self.protocol.connection_made.assert_called_with(tr) def test_ctor_with_waiter(self): fut = asyncio.Future(loop=self.loop) unix_events._UnixReadPipeTransport( self.loop, self.pipe, self.protocol, fut) test_utils.run_briefly(self.loop) self.assertIsNone(fut.result()) @mock.patch('os.read') def test__read_ready(self, m_read): tr = unix_events._UnixReadPipeTransport( self.loop, self.pipe, self.protocol) m_read.return_value = b'data' tr._read_ready() m_read.assert_called_with(5, tr.max_size) self.protocol.data_received.assert_called_with(b'data') @mock.patch('os.read') def test__read_ready_eof(self, m_read): tr = unix_events._UnixReadPipeTransport( self.loop, self.pipe, self.protocol) m_read.return_value = b'' tr._read_ready() m_read.assert_called_with(5, tr.max_size) self.assertFalse(self.loop.readers) test_utils.run_briefly(self.loop) self.protocol.eof_received.assert_called_with() self.protocol.connection_lost.assert_called_with(None) @mock.patch('os.read') def test__read_ready_blocked(self, m_read): tr = unix_events._UnixReadPipeTransport( self.loop, self.pipe, self.protocol) m_read.side_effect = BlockingIOError tr._read_ready() m_read.assert_called_with(5, tr.max_size) test_utils.run_briefly(self.loop) self.assertFalse(self.protocol.data_received.called) @mock.patch('asyncio.log.logger.error') @mock.patch('os.read') def test__read_ready_error(self, m_read, m_logexc): tr = unix_events._UnixReadPipeTransport( self.loop, self.pipe, self.protocol) err = OSError() m_read.side_effect = err tr._close = mock.Mock() tr._read_ready() m_read.assert_called_with(5, tr.max_size) tr._close.assert_called_with(err) m_logexc.assert_called_with( test_utils.MockPattern( 'Fatal read error on pipe transport' '\nprotocol:.*\ntransport:.*'), exc_info=(OSError, MOCK_ANY, MOCK_ANY)) @mock.patch('os.read') def test_pause_reading(self, m_read): tr = unix_events._UnixReadPipeTransport( self.loop, self.pipe, self.protocol) m = mock.Mock() self.loop.add_reader(5, m) tr.pause_reading() self.assertFalse(self.loop.readers) @mock.patch('os.read') def test_resume_reading(self, m_read): tr = unix_events._UnixReadPipeTransport( self.loop, self.pipe, self.protocol) tr.resume_reading() self.loop.assert_reader(5, tr._read_ready) @mock.patch('os.read') def test_close(self, m_read): tr = unix_events._UnixReadPipeTransport( self.loop, self.pipe, self.protocol) tr._close = mock.Mock() tr.close() tr._close.assert_called_with(None) @mock.patch('os.read') def test_close_already_closing(self, m_read): tr = unix_events._UnixReadPipeTransport( self.loop, self.pipe, self.protocol) tr._closing = True tr._close = mock.Mock() tr.close() self.assertFalse(tr._close.called) @mock.patch('os.read') def test__close(self, m_read): tr = unix_events._UnixReadPipeTransport( self.loop, self.pipe, self.protocol) err = object() tr._close(err) self.assertTrue(tr._closing) self.assertFalse(self.loop.readers) test_utils.run_briefly(self.loop) self.protocol.connection_lost.assert_called_with(err) def test__call_connection_lost(self): tr = unix_events._UnixReadPipeTransport( self.loop, self.pipe, self.protocol) self.assertIsNotNone(tr._protocol) self.assertIsNotNone(tr._loop) err = None tr._call_connection_lost(err) self.protocol.connection_lost.assert_called_with(err) self.pipe.close.assert_called_with() self.assertIsNone(tr._protocol) self.assertIsNone(tr._loop) def test__call_connection_lost_with_err(self): tr = unix_events._UnixReadPipeTransport( self.loop, self.pipe, self.protocol) self.assertIsNotNone(tr._protocol) self.assertIsNotNone(tr._loop) err = OSError() tr._call_connection_lost(err) self.protocol.connection_lost.assert_called_with(err) self.pipe.close.assert_called_with() self.assertIsNone(tr._protocol) self.assertIsNone(tr._loop) class UnixWritePipeTransportTests(test_utils.TestCase): def setUp(self): self.loop = self.new_test_loop() self.protocol = test_utils.make_test_protocol(asyncio.BaseProtocol) self.pipe = mock.Mock(spec_set=io.RawIOBase) self.pipe.fileno.return_value = 5 blocking_patcher = mock.patch('asyncio.unix_events._set_nonblocking') blocking_patcher.start() self.addCleanup(blocking_patcher.stop) fstat_patcher = mock.patch('os.fstat') m_fstat = fstat_patcher.start() st = mock.Mock() st.st_mode = stat.S_IFSOCK m_fstat.return_value = st self.addCleanup(fstat_patcher.stop) def test_ctor(self): tr = unix_events._UnixWritePipeTransport( self.loop, self.pipe, self.protocol) self.loop.assert_reader(5, tr._read_ready) test_utils.run_briefly(self.loop) self.protocol.connection_made.assert_called_with(tr) def test_ctor_with_waiter(self): fut = asyncio.Future(loop=self.loop) tr = unix_events._UnixWritePipeTransport( self.loop, self.pipe, self.protocol, fut) self.loop.assert_reader(5, tr._read_ready) test_utils.run_briefly(self.loop) self.assertEqual(None, fut.result()) def test_can_write_eof(self): tr = unix_events._UnixWritePipeTransport( self.loop, self.pipe, self.protocol) self.assertTrue(tr.can_write_eof()) @mock.patch('os.write') def test_write(self, m_write): tr = unix_events._UnixWritePipeTransport( self.loop, self.pipe, self.protocol) m_write.return_value = 4 tr.write(b'data') m_write.assert_called_with(5, b'data') self.assertFalse(self.loop.writers) self.assertEqual([], tr._buffer) @mock.patch('os.write') def test_write_no_data(self, m_write): tr = unix_events._UnixWritePipeTransport( self.loop, self.pipe, self.protocol) tr.write(b'') self.assertFalse(m_write.called) self.assertFalse(self.loop.writers) self.assertEqual([], tr._buffer) @mock.patch('os.write') def test_write_partial(self, m_write): tr = unix_events._UnixWritePipeTransport( self.loop, self.pipe, self.protocol) m_write.return_value = 2 tr.write(b'data') m_write.assert_called_with(5, b'data') self.loop.assert_writer(5, tr._write_ready) self.assertEqual([b'ta'], tr._buffer) @mock.patch('os.write') def test_write_buffer(self, m_write): tr = unix_events._UnixWritePipeTransport( self.loop, self.pipe, self.protocol) self.loop.add_writer(5, tr._write_ready) tr._buffer = [b'previous'] tr.write(b'data') self.assertFalse(m_write.called) self.loop.assert_writer(5, tr._write_ready) self.assertEqual([b'previous', b'data'], tr._buffer) @mock.patch('os.write') def test_write_again(self, m_write): tr = unix_events._UnixWritePipeTransport( self.loop, self.pipe, self.protocol) m_write.side_effect = BlockingIOError() tr.write(b'data') m_write.assert_called_with(5, b'data') self.loop.assert_writer(5, tr._write_ready) self.assertEqual([b'data'], tr._buffer) @mock.patch('asyncio.unix_events.logger') @mock.patch('os.write') def test_write_err(self, m_write, m_log): tr = unix_events._UnixWritePipeTransport( self.loop, self.pipe, self.protocol) err = OSError() m_write.side_effect = err tr._fatal_error = mock.Mock() tr.write(b'data') m_write.assert_called_with(5, b'data') self.assertFalse(self.loop.writers) self.assertEqual([], tr._buffer) tr._fatal_error.assert_called_with( err, 'Fatal write error on pipe transport') self.assertEqual(1, tr._conn_lost) tr.write(b'data') self.assertEqual(2, tr._conn_lost) tr.write(b'data') tr.write(b'data') tr.write(b'data') tr.write(b'data') # This is a bit overspecified. :-( m_log.warning.assert_called_with( 'pipe closed by peer or os.write(pipe, data) raised exception.') @mock.patch('os.write') def test_write_close(self, m_write): tr = unix_events._UnixWritePipeTransport( self.loop, self.pipe, self.protocol) tr._read_ready() # pipe was closed by peer tr.write(b'data') self.assertEqual(tr._conn_lost, 1) tr.write(b'data') self.assertEqual(tr._conn_lost, 2) def test__read_ready(self): tr = unix_events._UnixWritePipeTransport(self.loop, self.pipe, self.protocol) tr._read_ready() self.assertFalse(self.loop.readers) self.assertFalse(self.loop.writers) self.assertTrue(tr._closing) test_utils.run_briefly(self.loop) self.protocol.connection_lost.assert_called_with(None) @mock.patch('os.write') def test__write_ready(self, m_write): tr = unix_events._UnixWritePipeTransport( self.loop, self.pipe, self.protocol) self.loop.add_writer(5, tr._write_ready) tr._buffer = [b'da', b'ta'] m_write.return_value = 4 tr._write_ready() m_write.assert_called_with(5, b'data') self.assertFalse(self.loop.writers) self.assertEqual([], tr._buffer) @mock.patch('os.write') def test__write_ready_partial(self, m_write): tr = unix_events._UnixWritePipeTransport( self.loop, self.pipe, self.protocol) self.loop.add_writer(5, tr._write_ready) tr._buffer = [b'da', b'ta'] m_write.return_value = 3 tr._write_ready() m_write.assert_called_with(5, b'data') self.loop.assert_writer(5, tr._write_ready) self.assertEqual([b'a'], tr._buffer) @mock.patch('os.write') def test__write_ready_again(self, m_write): tr = unix_events._UnixWritePipeTransport( self.loop, self.pipe, self.protocol) self.loop.add_writer(5, tr._write_ready) tr._buffer = [b'da', b'ta'] m_write.side_effect = BlockingIOError() tr._write_ready() m_write.assert_called_with(5, b'data') self.loop.assert_writer(5, tr._write_ready) self.assertEqual([b'data'], tr._buffer) @mock.patch('os.write') def test__write_ready_empty(self, m_write): tr = unix_events._UnixWritePipeTransport( self.loop, self.pipe, self.protocol) self.loop.add_writer(5, tr._write_ready) tr._buffer = [b'da', b'ta'] m_write.return_value = 0 tr._write_ready() m_write.assert_called_with(5, b'data') self.loop.assert_writer(5, tr._write_ready) self.assertEqual([b'data'], tr._buffer) @mock.patch('asyncio.log.logger.error') @mock.patch('os.write') def test__write_ready_err(self, m_write, m_logexc): tr = unix_events._UnixWritePipeTransport( self.loop, self.pipe, self.protocol) self.loop.add_writer(5, tr._write_ready) tr._buffer = [b'da', b'ta'] m_write.side_effect = err = OSError() tr._write_ready() m_write.assert_called_with(5, b'data') self.assertFalse(self.loop.writers) self.assertFalse(self.loop.readers) self.assertEqual([], tr._buffer) self.assertTrue(tr._closing) m_logexc.assert_called_with( test_utils.MockPattern( 'Fatal write error on pipe transport' '\nprotocol:.*\ntransport:.*'), exc_info=(OSError, MOCK_ANY, MOCK_ANY)) self.assertEqual(1, tr._conn_lost) test_utils.run_briefly(self.loop) self.protocol.connection_lost.assert_called_with(err) @mock.patch('os.write') def test__write_ready_closing(self, m_write): tr = unix_events._UnixWritePipeTransport( self.loop, self.pipe, self.protocol) self.loop.add_writer(5, tr._write_ready) tr._closing = True tr._buffer = [b'da', b'ta'] m_write.return_value = 4 tr._write_ready() m_write.assert_called_with(5, b'data') self.assertFalse(self.loop.writers) self.assertFalse(self.loop.readers) self.assertEqual([], tr._buffer) self.protocol.connection_lost.assert_called_with(None) self.pipe.close.assert_called_with() @mock.patch('os.write') def test_abort(self, m_write): tr = unix_events._UnixWritePipeTransport( self.loop, self.pipe, self.protocol) self.loop.add_writer(5, tr._write_ready) self.loop.add_reader(5, tr._read_ready) tr._buffer = [b'da', b'ta'] tr.abort() self.assertFalse(m_write.called) self.assertFalse(self.loop.readers) self.assertFalse(self.loop.writers) self.assertEqual([], tr._buffer) self.assertTrue(tr._closing) test_utils.run_briefly(self.loop) self.protocol.connection_lost.assert_called_with(None) def test__call_connection_lost(self): tr = unix_events._UnixWritePipeTransport( self.loop, self.pipe, self.protocol) self.assertIsNotNone(tr._protocol) self.assertIsNotNone(tr._loop) err = None tr._call_connection_lost(err) self.protocol.connection_lost.assert_called_with(err) self.pipe.close.assert_called_with() self.assertIsNone(tr._protocol) self.assertIsNone(tr._loop) def test__call_connection_lost_with_err(self): tr = unix_events._UnixWritePipeTransport( self.loop, self.pipe, self.protocol) self.assertIsNotNone(tr._protocol) self.assertIsNotNone(tr._loop) err = OSError() tr._call_connection_lost(err) self.protocol.connection_lost.assert_called_with(err) self.pipe.close.assert_called_with() self.assertIsNone(tr._protocol) self.assertIsNone(tr._loop) def test_close(self): tr = unix_events._UnixWritePipeTransport( self.loop, self.pipe, self.protocol) tr.write_eof = mock.Mock() tr.close() tr.write_eof.assert_called_with() def test_close_closing(self): tr = unix_events._UnixWritePipeTransport( self.loop, self.pipe, self.protocol) tr.write_eof = mock.Mock() tr._closing = True tr.close() self.assertFalse(tr.write_eof.called) def test_write_eof(self): tr = unix_events._UnixWritePipeTransport( self.loop, self.pipe, self.protocol) tr.write_eof() self.assertTrue(tr._closing) self.assertFalse(self.loop.readers) test_utils.run_briefly(self.loop) self.protocol.connection_lost.assert_called_with(None) def test_write_eof_pending(self): tr = unix_events._UnixWritePipeTransport( self.loop, self.pipe, self.protocol) tr._buffer = [b'data'] tr.write_eof() self.assertTrue(tr._closing) self.assertFalse(self.protocol.connection_lost.called) class AbstractChildWatcherTests(unittest.TestCase): def test_not_implemented(self): f = mock.Mock() watcher = asyncio.AbstractChildWatcher() self.assertRaises( NotImplementedError, watcher.add_child_handler, f, f) self.assertRaises( NotImplementedError, watcher.remove_child_handler, f) self.assertRaises( NotImplementedError, watcher.attach_loop, f) self.assertRaises( NotImplementedError, watcher.close) self.assertRaises( NotImplementedError, watcher.__enter__) self.assertRaises( NotImplementedError, watcher.__exit__, f, f, f) class BaseChildWatcherTests(unittest.TestCase): def test_not_implemented(self): f = mock.Mock() watcher = unix_events.BaseChildWatcher() self.assertRaises( NotImplementedError, watcher._do_waitpid, f) WaitPidMocks = collections.namedtuple("WaitPidMocks", ("waitpid", "WIFEXITED", "WIFSIGNALED", "WEXITSTATUS", "WTERMSIG", )) class ChildWatcherTestsMixin: ignore_warnings = mock.patch.object(log.logger, "warning") def setUp(self): self.loop = self.new_test_loop() self.running = False self.zombies = {} with mock.patch.object( self.loop, "add_signal_handler") as self.m_add_signal_handler: self.watcher = self.create_watcher() self.watcher.attach_loop(self.loop) def waitpid(self, pid, flags): if isinstance(self.watcher, asyncio.SafeChildWatcher) or pid != -1: self.assertGreater(pid, 0) try: if pid < 0: return self.zombies.popitem() else: return pid, self.zombies.pop(pid) except KeyError: pass if self.running: return 0, 0 else: raise ChildProcessError() def add_zombie(self, pid, returncode): self.zombies[pid] = returncode + 32768 def WIFEXITED(self, status): return status >= 32768 def WIFSIGNALED(self, status): return 32700 < status < 32768 def WEXITSTATUS(self, status): self.assertTrue(self.WIFEXITED(status)) return status - 32768 def WTERMSIG(self, status): self.assertTrue(self.WIFSIGNALED(status)) return 32768 - status def test_create_watcher(self): self.m_add_signal_handler.assert_called_once_with( signal.SIGCHLD, self.watcher._sig_chld) def waitpid_mocks(func): def wrapped_func(self): def patch(target, wrapper): return mock.patch(target, wraps=wrapper, new_callable=mock.Mock) with patch('os.WTERMSIG', self.WTERMSIG) as m_WTERMSIG, \ patch('os.WEXITSTATUS', self.WEXITSTATUS) as m_WEXITSTATUS, \ patch('os.WIFSIGNALED', self.WIFSIGNALED) as m_WIFSIGNALED, \ patch('os.WIFEXITED', self.WIFEXITED) as m_WIFEXITED, \ patch('os.waitpid', self.waitpid) as m_waitpid: func(self, WaitPidMocks(m_waitpid, m_WIFEXITED, m_WIFSIGNALED, m_WEXITSTATUS, m_WTERMSIG, )) return wrapped_func @waitpid_mocks def test_sigchld(self, m): # register a child callback = mock.Mock() with self.watcher: self.running = True self.watcher.add_child_handler(42, callback, 9, 10, 14) self.assertFalse(callback.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # child is running self.watcher._sig_chld() self.assertFalse(callback.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # child terminates (returncode 12) self.running = False self.add_zombie(42, 12) self.watcher._sig_chld() self.assertTrue(m.WIFEXITED.called) self.assertTrue(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) callback.assert_called_once_with(42, 12, 9, 10, 14) m.WIFSIGNALED.reset_mock() m.WIFEXITED.reset_mock() m.WEXITSTATUS.reset_mock() callback.reset_mock() # ensure that the child is effectively reaped self.add_zombie(42, 13) with self.ignore_warnings: self.watcher._sig_chld() self.assertFalse(callback.called) self.assertFalse(m.WTERMSIG.called) m.WIFSIGNALED.reset_mock() m.WIFEXITED.reset_mock() m.WEXITSTATUS.reset_mock() # sigchld called again self.zombies.clear() self.watcher._sig_chld() self.assertFalse(callback.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) @waitpid_mocks def test_sigchld_two_children(self, m): callback1 = mock.Mock() callback2 = mock.Mock() # register child 1 with self.watcher: self.running = True self.watcher.add_child_handler(43, callback1, 7, 8) self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # register child 2 with self.watcher: self.watcher.add_child_handler(44, callback2, 147, 18) self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # children are running self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # child 1 terminates (signal 3) self.add_zombie(43, -3) self.watcher._sig_chld() callback1.assert_called_once_with(43, -3, 7, 8) self.assertFalse(callback2.called) self.assertTrue(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertTrue(m.WTERMSIG.called) m.WIFSIGNALED.reset_mock() m.WIFEXITED.reset_mock() m.WTERMSIG.reset_mock() callback1.reset_mock() # child 2 still running self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # child 2 terminates (code 108) self.add_zombie(44, 108) self.running = False self.watcher._sig_chld() callback2.assert_called_once_with(44, 108, 147, 18) self.assertFalse(callback1.called) self.assertTrue(m.WIFEXITED.called) self.assertTrue(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) m.WIFSIGNALED.reset_mock() m.WIFEXITED.reset_mock() m.WEXITSTATUS.reset_mock() callback2.reset_mock() # ensure that the children are effectively reaped self.add_zombie(43, 14) self.add_zombie(44, 15) with self.ignore_warnings: self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WTERMSIG.called) m.WIFSIGNALED.reset_mock() m.WIFEXITED.reset_mock() m.WEXITSTATUS.reset_mock() # sigchld called again self.zombies.clear() self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) @waitpid_mocks def test_sigchld_two_children_terminating_together(self, m): callback1 = mock.Mock() callback2 = mock.Mock() # register child 1 with self.watcher: self.running = True self.watcher.add_child_handler(45, callback1, 17, 8) self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # register child 2 with self.watcher: self.watcher.add_child_handler(46, callback2, 1147, 18) self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # children are running self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # child 1 terminates (code 78) # child 2 terminates (signal 5) self.add_zombie(45, 78) self.add_zombie(46, -5) self.running = False self.watcher._sig_chld() callback1.assert_called_once_with(45, 78, 17, 8) callback2.assert_called_once_with(46, -5, 1147, 18) self.assertTrue(m.WIFSIGNALED.called) self.assertTrue(m.WIFEXITED.called) self.assertTrue(m.WEXITSTATUS.called) self.assertTrue(m.WTERMSIG.called) m.WIFSIGNALED.reset_mock() m.WIFEXITED.reset_mock() m.WTERMSIG.reset_mock() m.WEXITSTATUS.reset_mock() callback1.reset_mock() callback2.reset_mock() # ensure that the children are effectively reaped self.add_zombie(45, 14) self.add_zombie(46, 15) with self.ignore_warnings: self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WTERMSIG.called) @waitpid_mocks def test_sigchld_race_condition(self, m): # register a child callback = mock.Mock() with self.watcher: # child terminates before being registered self.add_zombie(50, 4) self.watcher._sig_chld() self.watcher.add_child_handler(50, callback, 1, 12) callback.assert_called_once_with(50, 4, 1, 12) callback.reset_mock() # ensure that the child is effectively reaped self.add_zombie(50, -1) with self.ignore_warnings: self.watcher._sig_chld() self.assertFalse(callback.called) @waitpid_mocks def test_sigchld_replace_handler(self, m): callback1 = mock.Mock() callback2 = mock.Mock() # register a child with self.watcher: self.running = True self.watcher.add_child_handler(51, callback1, 19) self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # register the same child again with self.watcher: self.watcher.add_child_handler(51, callback2, 21) self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # child terminates (signal 8) self.running = False self.add_zombie(51, -8) self.watcher._sig_chld() callback2.assert_called_once_with(51, -8, 21) self.assertFalse(callback1.called) self.assertTrue(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertTrue(m.WTERMSIG.called) m.WIFSIGNALED.reset_mock() m.WIFEXITED.reset_mock() m.WTERMSIG.reset_mock() callback2.reset_mock() # ensure that the child is effectively reaped self.add_zombie(51, 13) with self.ignore_warnings: self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(m.WTERMSIG.called) @waitpid_mocks def test_sigchld_remove_handler(self, m): callback = mock.Mock() # register a child with self.watcher: self.running = True self.watcher.add_child_handler(52, callback, 1984) self.assertFalse(callback.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # unregister the child self.watcher.remove_child_handler(52) self.assertFalse(callback.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # child terminates (code 99) self.running = False self.add_zombie(52, 99) with self.ignore_warnings: self.watcher._sig_chld() self.assertFalse(callback.called) @waitpid_mocks def test_sigchld_unknown_status(self, m): callback = mock.Mock() # register a child with self.watcher: self.running = True self.watcher.add_child_handler(53, callback, -19) self.assertFalse(callback.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # terminate with unknown status self.zombies[53] = 1178 self.running = False self.watcher._sig_chld() callback.assert_called_once_with(53, 1178, -19) self.assertTrue(m.WIFEXITED.called) self.assertTrue(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) callback.reset_mock() m.WIFEXITED.reset_mock() m.WIFSIGNALED.reset_mock() # ensure that the child is effectively reaped self.add_zombie(53, 101) with self.ignore_warnings: self.watcher._sig_chld() self.assertFalse(callback.called) @waitpid_mocks def test_remove_child_handler(self, m): callback1 = mock.Mock() callback2 = mock.Mock() callback3 = mock.Mock() # register children with self.watcher: self.running = True self.watcher.add_child_handler(54, callback1, 1) self.watcher.add_child_handler(55, callback2, 2) self.watcher.add_child_handler(56, callback3, 3) # remove child handler 1 self.assertTrue(self.watcher.remove_child_handler(54)) # remove child handler 2 multiple times self.assertTrue(self.watcher.remove_child_handler(55)) self.assertFalse(self.watcher.remove_child_handler(55)) self.assertFalse(self.watcher.remove_child_handler(55)) # all children terminate self.add_zombie(54, 0) self.add_zombie(55, 1) self.add_zombie(56, 2) self.running = False with self.ignore_warnings: self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) callback3.assert_called_once_with(56, 2, 3) @waitpid_mocks def test_sigchld_unhandled_exception(self, m): callback = mock.Mock() # register a child with self.watcher: self.running = True self.watcher.add_child_handler(57, callback) # raise an exception m.waitpid.side_effect = ValueError with mock.patch.object(log.logger, 'error') as m_error: self.assertEqual(self.watcher._sig_chld(), None) self.assertTrue(m_error.called) @waitpid_mocks def test_sigchld_child_reaped_elsewhere(self, m): # register a child callback = mock.Mock() with self.watcher: self.running = True self.watcher.add_child_handler(58, callback) self.assertFalse(callback.called) self.assertFalse(m.WIFEXITED.called) self.assertFalse(m.WIFSIGNALED.called) self.assertFalse(m.WEXITSTATUS.called) self.assertFalse(m.WTERMSIG.called) # child terminates self.running = False self.add_zombie(58, 4) # waitpid is called elsewhere os.waitpid(58, os.WNOHANG) m.waitpid.reset_mock() # sigchld with self.ignore_warnings: self.watcher._sig_chld() if isinstance(self.watcher, asyncio.FastChildWatcher): # here the FastChildWatche enters a deadlock # (there is no way to prevent it) self.assertFalse(callback.called) else: callback.assert_called_once_with(58, 255) @waitpid_mocks def test_sigchld_unknown_pid_during_registration(self, m): # register two children callback1 = mock.Mock() callback2 = mock.Mock() with self.ignore_warnings, self.watcher: self.running = True # child 1 terminates self.add_zombie(591, 7) # an unknown child terminates self.add_zombie(593, 17) self.watcher._sig_chld() self.watcher.add_child_handler(591, callback1) self.watcher.add_child_handler(592, callback2) callback1.assert_called_once_with(591, 7) self.assertFalse(callback2.called) @waitpid_mocks def test_set_loop(self, m): # register a child callback = mock.Mock() with self.watcher: self.running = True self.watcher.add_child_handler(60, callback) # attach a new loop old_loop = self.loop self.loop = self.new_test_loop() patch = mock.patch.object with patch(old_loop, "remove_signal_handler") as m_old_remove, \ patch(self.loop, "add_signal_handler") as m_new_add: self.watcher.attach_loop(self.loop) m_old_remove.assert_called_once_with( signal.SIGCHLD) m_new_add.assert_called_once_with( signal.SIGCHLD, self.watcher._sig_chld) # child terminates self.running = False self.add_zombie(60, 9) self.watcher._sig_chld() callback.assert_called_once_with(60, 9) @waitpid_mocks def test_set_loop_race_condition(self, m): # register 3 children callback1 = mock.Mock() callback2 = mock.Mock() callback3 = mock.Mock() with self.watcher: self.running = True self.watcher.add_child_handler(61, callback1) self.watcher.add_child_handler(62, callback2) self.watcher.add_child_handler(622, callback3) # detach the loop old_loop = self.loop self.loop = None with mock.patch.object( old_loop, "remove_signal_handler") as m_remove_signal_handler: self.watcher.attach_loop(None) m_remove_signal_handler.assert_called_once_with( signal.SIGCHLD) # child 1 & 2 terminate self.add_zombie(61, 11) self.add_zombie(62, -5) # SIGCHLD was not caught self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(callback3.called) # attach a new loop self.loop = self.new_test_loop() with mock.patch.object( self.loop, "add_signal_handler") as m_add_signal_handler: self.watcher.attach_loop(self.loop) m_add_signal_handler.assert_called_once_with( signal.SIGCHLD, self.watcher._sig_chld) callback1.assert_called_once_with(61, 11) # race condition! callback2.assert_called_once_with(62, -5) # race condition! self.assertFalse(callback3.called) callback1.reset_mock() callback2.reset_mock() # child 3 terminates self.running = False self.add_zombie(622, 19) self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) callback3.assert_called_once_with(622, 19) @waitpid_mocks def test_close(self, m): # register two children callback1 = mock.Mock() with self.watcher: self.running = True # child 1 terminates self.add_zombie(63, 9) # other child terminates self.add_zombie(65, 18) self.watcher._sig_chld() self.watcher.add_child_handler(63, callback1) self.watcher.add_child_handler(64, callback1) self.assertEqual(len(self.watcher._callbacks), 1) if isinstance(self.watcher, asyncio.FastChildWatcher): self.assertEqual(len(self.watcher._zombies), 1) with mock.patch.object( self.loop, "remove_signal_handler") as m_remove_signal_handler: self.watcher.close() m_remove_signal_handler.assert_called_once_with( signal.SIGCHLD) self.assertFalse(self.watcher._callbacks) if isinstance(self.watcher, asyncio.FastChildWatcher): self.assertFalse(self.watcher._zombies) class SafeChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase): def create_watcher(self): return asyncio.SafeChildWatcher() class FastChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase): def create_watcher(self): return asyncio.FastChildWatcher() class PolicyTests(unittest.TestCase): def create_policy(self): return asyncio.DefaultEventLoopPolicy() def test_get_child_watcher(self): policy = self.create_policy() self.assertIsNone(policy._watcher) watcher = policy.get_child_watcher() self.assertIsInstance(watcher, asyncio.SafeChildWatcher) self.assertIs(policy._watcher, watcher) self.assertIs(watcher, policy.get_child_watcher()) self.assertIsNone(watcher._loop) def test_get_child_watcher_after_set(self): policy = self.create_policy() watcher = asyncio.FastChildWatcher() policy.set_child_watcher(watcher) self.assertIs(policy._watcher, watcher) self.assertIs(watcher, policy.get_child_watcher()) def test_get_child_watcher_with_mainloop_existing(self): policy = self.create_policy() loop = policy.get_event_loop() self.assertIsNone(policy._watcher) watcher = policy.get_child_watcher() self.assertIsInstance(watcher, asyncio.SafeChildWatcher) self.assertIs(watcher._loop, loop) loop.close() def test_get_child_watcher_thread(self): def f(): policy.set_event_loop(policy.new_event_loop()) self.assertIsInstance(policy.get_event_loop(), asyncio.AbstractEventLoop) watcher = policy.get_child_watcher() self.assertIsInstance(watcher, asyncio.SafeChildWatcher) self.assertIsNone(watcher._loop) policy.get_event_loop().close() policy = self.create_policy() th = threading.Thread(target=f) th.start() th.join() def test_child_watcher_replace_mainloop_existing(self): policy = self.create_policy() loop = policy.get_event_loop() watcher = policy.get_child_watcher() self.assertIs(watcher._loop, loop) new_loop = policy.new_event_loop() policy.set_event_loop(new_loop) self.assertIs(watcher._loop, new_loop) policy.set_event_loop(None) self.assertIs(watcher._loop, None) loop.close() new_loop.close() if __name__ == '__main__': unittest.main()
{ "content_hash": "784c1fdb938f9d62a4cc3bf2f2436d02", "timestamp": "", "source": "github", "line_count": 1582, "max_line_length": 78, "avg_line_length": 33.554993678887485, "alnum_prop": 0.6104475924949138, "repo_name": "OptimusGitEtna/RestSymf", "id": "e397598222d8867af8d79011206a9af529605075", "size": "53084", "binary": false, "copies": "58", "ref": "refs/heads/master", "path": "Python-3.4.2/Lib/test/test_asyncio/test_unix_events.py", "mode": "33188", "license": "mit", "language": [ { "name": "Assembly", "bytes": "594205" }, { "name": "C", "bytes": "15348597" }, { "name": "C++", "bytes": "65109" }, { "name": "CSS", "bytes": "12039" }, { "name": "Common Lisp", "bytes": "24481" }, { "name": "JavaScript", "bytes": "10597" }, { "name": "Makefile", "bytes": "9444" }, { "name": "Objective-C", "bytes": "1390141" }, { "name": "PHP", "bytes": "93070" }, { "name": "PowerShell", "bytes": "1420" }, { "name": "Prolog", "bytes": "557" }, { "name": "Python", "bytes": "24018306" }, { "name": "Shell", "bytes": "440753" }, { "name": "TeX", "bytes": "323102" }, { "name": "Visual Basic", "bytes": "481" } ], "symlink_target": "" }
from __future__ import print_function import unittest import numpy as np from op_test import OpTest class TestFusionSquaredMatSubOp(OpTest): def setUp(self): self.op_type = 'fusion_squared_mat_sub' self.m = 11 self.n = 12 self.k = 4 self.scalar = 0.5 self.set_conf() matx = np.random.random((self.m, self.k)).astype("float32") maty = np.random.random((self.k, self.n)).astype("float32") self.inputs = {'X': matx, 'Y': maty} self.outputs = { 'Out': (np.dot(matx, maty)**2 - np.dot(matx**2, maty**2)) * self.scalar } self.attrs = {'scalar': self.scalar, } def set_conf(self): pass def test_check_output(self): self.check_output() class TestFusionSquaredMatSubOpCase1(TestFusionSquaredMatSubOp): def set_conf(self): self.scalar = -0.3 if __name__ == '__main__': unittest.main()
{ "content_hash": "b17f2f8ebd5c54f3a2962b1326f08dd4", "timestamp": "", "source": "github", "line_count": 39, "max_line_length": 76, "avg_line_length": 24.333333333333332, "alnum_prop": 0.5721812434141201, "repo_name": "baidu/Paddle", "id": "a097d3d9a20f0b4b5dddf286f064d5698de35b5f", "size": "1562", "binary": false, "copies": "3", "ref": "refs/heads/develop", "path": "python/paddle/fluid/tests/unittests/test_fusion_squared_mat_sub_op.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "217842" }, { "name": "C++", "bytes": "2771237" }, { "name": "CMake", "bytes": "113670" }, { "name": "Cuda", "bytes": "424141" }, { "name": "M4", "bytes": "40913" }, { "name": "Perl", "bytes": "11412" }, { "name": "Python", "bytes": "892636" }, { "name": "Shell", "bytes": "64351" } ], "symlink_target": "" }
from sys import version_info if version_info >= (2,6,0): def swig_import_helper(): from os.path import dirname import imp fp = None try: fp, pathname, description = imp.find_module('_mesos', [dirname(__file__)]) except ImportError: import _mesos return _mesos if fp is not None: try: _mod = imp.load_module('_mesos', fp, pathname, description) finally: fp.close() return _mod _mesos = swig_import_helper() del swig_import_helper else: import _mesos del version_info try: _swig_property = property except NameError: pass # Python < 2.2 doesn't have 'property'. def _swig_setattr_nondynamic(self,class_type,name,value,static=1): if (name == "thisown"): return self.this.own(value) if (name == "this"): if type(value).__name__ == 'SwigPyObject': self.__dict__[name] = value return method = class_type.__swig_setmethods__.get(name,None) if method: return method(self,value) if (not static) or hasattr(self,name): self.__dict__[name] = value else: raise AttributeError("You cannot add attributes to %s" % self) def _swig_setattr(self,class_type,name,value): return _swig_setattr_nondynamic(self,class_type,name,value,0) def _swig_getattr(self,class_type,name): if (name == "thisown"): return self.this.own() method = class_type.__swig_getmethods__.get(name,None) if method: return method(self) raise AttributeError(name) def _swig_repr(self): try: strthis = "proxy of " + self.this.__repr__() except: strthis = "" return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) try: _object = object _newclass = 1 except AttributeError: class _object : pass _newclass = 0 try: import weakref weakref_proxy = weakref.proxy except: weakref_proxy = lambda x: x class SwigPyIterator(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name) def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract") __repr__ = _swig_repr __swig_destroy__ = _mesos.delete_SwigPyIterator __del__ = lambda self : None; def value(self): return _mesos.SwigPyIterator_value(self) def incr(self, n = 1): return _mesos.SwigPyIterator_incr(self, n) def decr(self, n = 1): return _mesos.SwigPyIterator_decr(self, n) def distance(self, *args): return _mesos.SwigPyIterator_distance(self, *args) def equal(self, *args): return _mesos.SwigPyIterator_equal(self, *args) def copy(self): return _mesos.SwigPyIterator_copy(self) def next(self): return _mesos.SwigPyIterator_next(self) def __next__(self): return _mesos.SwigPyIterator___next__(self) def previous(self): return _mesos.SwigPyIterator_previous(self) def advance(self, *args): return _mesos.SwigPyIterator_advance(self, *args) def __eq__(self, *args): return _mesos.SwigPyIterator___eq__(self, *args) def __ne__(self, *args): return _mesos.SwigPyIterator___ne__(self, *args) def __iadd__(self, *args): return _mesos.SwigPyIterator___iadd__(self, *args) def __isub__(self, *args): return _mesos.SwigPyIterator___isub__(self, *args) def __add__(self, *args): return _mesos.SwigPyIterator___add__(self, *args) def __sub__(self, *args): return _mesos.SwigPyIterator___sub__(self, *args) def __iter__(self): return self SwigPyIterator_swigregister = _mesos.SwigPyIterator_swigregister SwigPyIterator_swigregister(SwigPyIterator) class SlaveOfferVector(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, SlaveOfferVector, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, SlaveOfferVector, name) __repr__ = _swig_repr def __nonzero__(self): return _mesos.SlaveOfferVector___nonzero__(self) def __bool__(self): return _mesos.SlaveOfferVector___bool__(self) def __len__(self): return _mesos.SlaveOfferVector___len__(self) def pop(self): return _mesos.SlaveOfferVector_pop(self) def __getslice__(self, *args): return _mesos.SlaveOfferVector___getslice__(self, *args) def __setslice__(self, *args): return _mesos.SlaveOfferVector___setslice__(self, *args) def __delslice__(self, *args): return _mesos.SlaveOfferVector___delslice__(self, *args) def __delitem__(self, *args): return _mesos.SlaveOfferVector___delitem__(self, *args) def __getitem__(self, *args): return _mesos.SlaveOfferVector___getitem__(self, *args) def __setitem__(self, *args): return _mesos.SlaveOfferVector___setitem__(self, *args) def append(self, *args): return _mesos.SlaveOfferVector_append(self, *args) def empty(self): return _mesos.SlaveOfferVector_empty(self) def size(self): return _mesos.SlaveOfferVector_size(self) def clear(self): return _mesos.SlaveOfferVector_clear(self) def swap(self, *args): return _mesos.SlaveOfferVector_swap(self, *args) def get_allocator(self): return _mesos.SlaveOfferVector_get_allocator(self) def pop_back(self): return _mesos.SlaveOfferVector_pop_back(self) def __init__(self, *args): this = _mesos.new_SlaveOfferVector(*args) try: self.this.append(this) except: self.this = this def push_back(self, *args): return _mesos.SlaveOfferVector_push_back(self, *args) def front(self): return _mesos.SlaveOfferVector_front(self) def back(self): return _mesos.SlaveOfferVector_back(self) def assign(self, *args): return _mesos.SlaveOfferVector_assign(self, *args) def resize(self, *args): return _mesos.SlaveOfferVector_resize(self, *args) def reserve(self, *args): return _mesos.SlaveOfferVector_reserve(self, *args) def capacity(self): return _mesos.SlaveOfferVector_capacity(self) __swig_destroy__ = _mesos.delete_SlaveOfferVector __del__ = lambda self : None; SlaveOfferVector_swigregister = _mesos.SlaveOfferVector_swigregister SlaveOfferVector_swigregister(SlaveOfferVector) class TaskDescriptionVector(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, TaskDescriptionVector, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, TaskDescriptionVector, name) __repr__ = _swig_repr def __nonzero__(self): return _mesos.TaskDescriptionVector___nonzero__(self) def __bool__(self): return _mesos.TaskDescriptionVector___bool__(self) def __len__(self): return _mesos.TaskDescriptionVector___len__(self) def pop(self): return _mesos.TaskDescriptionVector_pop(self) def __getslice__(self, *args): return _mesos.TaskDescriptionVector___getslice__(self, *args) def __setslice__(self, *args): return _mesos.TaskDescriptionVector___setslice__(self, *args) def __delslice__(self, *args): return _mesos.TaskDescriptionVector___delslice__(self, *args) def __delitem__(self, *args): return _mesos.TaskDescriptionVector___delitem__(self, *args) def __getitem__(self, *args): return _mesos.TaskDescriptionVector___getitem__(self, *args) def __setitem__(self, *args): return _mesos.TaskDescriptionVector___setitem__(self, *args) def append(self, *args): return _mesos.TaskDescriptionVector_append(self, *args) def empty(self): return _mesos.TaskDescriptionVector_empty(self) def size(self): return _mesos.TaskDescriptionVector_size(self) def clear(self): return _mesos.TaskDescriptionVector_clear(self) def swap(self, *args): return _mesos.TaskDescriptionVector_swap(self, *args) def get_allocator(self): return _mesos.TaskDescriptionVector_get_allocator(self) def pop_back(self): return _mesos.TaskDescriptionVector_pop_back(self) def __init__(self, *args): this = _mesos.new_TaskDescriptionVector(*args) try: self.this.append(this) except: self.this = this def push_back(self, *args): return _mesos.TaskDescriptionVector_push_back(self, *args) def front(self): return _mesos.TaskDescriptionVector_front(self) def back(self): return _mesos.TaskDescriptionVector_back(self) def assign(self, *args): return _mesos.TaskDescriptionVector_assign(self, *args) def resize(self, *args): return _mesos.TaskDescriptionVector_resize(self, *args) def reserve(self, *args): return _mesos.TaskDescriptionVector_reserve(self, *args) def capacity(self): return _mesos.TaskDescriptionVector_capacity(self) __swig_destroy__ = _mesos.delete_TaskDescriptionVector __del__ = lambda self : None; TaskDescriptionVector_swigregister = _mesos.TaskDescriptionVector_swigregister TaskDescriptionVector_swigregister(TaskDescriptionVector) class StringMap(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, StringMap, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, StringMap, name) __repr__ = _swig_repr def __nonzero__(self): return _mesos.StringMap___nonzero__(self) def __bool__(self): return _mesos.StringMap___bool__(self) def __len__(self): return _mesos.StringMap___len__(self) def __getitem__(self, *args): return _mesos.StringMap___getitem__(self, *args) def __delitem__(self, *args): return _mesos.StringMap___delitem__(self, *args) def has_key(self, *args): return _mesos.StringMap_has_key(self, *args) def keys(self): return _mesos.StringMap_keys(self) def values(self): return _mesos.StringMap_values(self) def items(self): return _mesos.StringMap_items(self) def __contains__(self, *args): return _mesos.StringMap___contains__(self, *args) def key_iterator(self): return _mesos.StringMap_key_iterator(self) def value_iterator(self): return _mesos.StringMap_value_iterator(self) def __iter__(self): return self.key_iterator() def iterkeys(self): return self.key_iterator() def itervalues(self): return self.value_iterator() def iteritems(self): return self.iterator() def __setitem__(self, *args): return _mesos.StringMap___setitem__(self, *args) def __init__(self, *args): this = _mesos.new_StringMap(*args) try: self.this.append(this) except: self.this = this def empty(self): return _mesos.StringMap_empty(self) def size(self): return _mesos.StringMap_size(self) def clear(self): return _mesos.StringMap_clear(self) def swap(self, *args): return _mesos.StringMap_swap(self, *args) def get_allocator(self): return _mesos.StringMap_get_allocator(self) def erase(self, *args): return _mesos.StringMap_erase(self, *args) def count(self, *args): return _mesos.StringMap_count(self, *args) __swig_destroy__ = _mesos.delete_StringMap __del__ = lambda self : None; StringMap_swigregister = _mesos.StringMap_swigregister StringMap_swigregister(StringMap) TASK_STARTING = _mesos.TASK_STARTING TASK_RUNNING = _mesos.TASK_RUNNING TASK_FINISHED = _mesos.TASK_FINISHED TASK_FAILED = _mesos.TASK_FAILED TASK_KILLED = _mesos.TASK_KILLED TASK_LOST = _mesos.TASK_LOST class TaskDescription(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, TaskDescription, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, TaskDescription, name) __repr__ = _swig_repr def __init__(self, *args): this = _mesos.new_TaskDescription(*args) try: self.this.append(this) except: self.this = this __swig_setmethods__["taskId"] = _mesos.TaskDescription_taskId_set __swig_getmethods__["taskId"] = _mesos.TaskDescription_taskId_get if _newclass:taskId = _swig_property(_mesos.TaskDescription_taskId_get, _mesos.TaskDescription_taskId_set) __swig_setmethods__["slaveId"] = _mesos.TaskDescription_slaveId_set __swig_getmethods__["slaveId"] = _mesos.TaskDescription_slaveId_get if _newclass:slaveId = _swig_property(_mesos.TaskDescription_slaveId_get, _mesos.TaskDescription_slaveId_set) __swig_setmethods__["name"] = _mesos.TaskDescription_name_set __swig_getmethods__["name"] = _mesos.TaskDescription_name_get if _newclass:name = _swig_property(_mesos.TaskDescription_name_get, _mesos.TaskDescription_name_set) __swig_setmethods__["params"] = _mesos.TaskDescription_params_set __swig_getmethods__["params"] = _mesos.TaskDescription_params_get if _newclass:params = _swig_property(_mesos.TaskDescription_params_get, _mesos.TaskDescription_params_set) __swig_setmethods__["arg"] = _mesos.TaskDescription_arg_set __swig_getmethods__["arg"] = _mesos.TaskDescription_arg_get if _newclass:arg = _swig_property(_mesos.TaskDescription_arg_get, _mesos.TaskDescription_arg_set) __swig_destroy__ = _mesos.delete_TaskDescription __del__ = lambda self : None; TaskDescription_swigregister = _mesos.TaskDescription_swigregister TaskDescription_swigregister(TaskDescription) class TaskStatus(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, TaskStatus, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, TaskStatus, name) __repr__ = _swig_repr def __init__(self, *args): this = _mesos.new_TaskStatus(*args) try: self.this.append(this) except: self.this = this __swig_setmethods__["taskId"] = _mesos.TaskStatus_taskId_set __swig_getmethods__["taskId"] = _mesos.TaskStatus_taskId_get if _newclass:taskId = _swig_property(_mesos.TaskStatus_taskId_get, _mesos.TaskStatus_taskId_set) __swig_setmethods__["state"] = _mesos.TaskStatus_state_set __swig_getmethods__["state"] = _mesos.TaskStatus_state_get if _newclass:state = _swig_property(_mesos.TaskStatus_state_get, _mesos.TaskStatus_state_set) __swig_setmethods__["data"] = _mesos.TaskStatus_data_set __swig_getmethods__["data"] = _mesos.TaskStatus_data_get if _newclass:data = _swig_property(_mesos.TaskStatus_data_get, _mesos.TaskStatus_data_set) __swig_destroy__ = _mesos.delete_TaskStatus __del__ = lambda self : None; TaskStatus_swigregister = _mesos.TaskStatus_swigregister TaskStatus_swigregister(TaskStatus) class SlaveOffer(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, SlaveOffer, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, SlaveOffer, name) __repr__ = _swig_repr def __init__(self, *args): this = _mesos.new_SlaveOffer(*args) try: self.this.append(this) except: self.this = this __swig_setmethods__["slaveId"] = _mesos.SlaveOffer_slaveId_set __swig_getmethods__["slaveId"] = _mesos.SlaveOffer_slaveId_get if _newclass:slaveId = _swig_property(_mesos.SlaveOffer_slaveId_get, _mesos.SlaveOffer_slaveId_set) __swig_setmethods__["host"] = _mesos.SlaveOffer_host_set __swig_getmethods__["host"] = _mesos.SlaveOffer_host_get if _newclass:host = _swig_property(_mesos.SlaveOffer_host_get, _mesos.SlaveOffer_host_set) __swig_setmethods__["params"] = _mesos.SlaveOffer_params_set __swig_getmethods__["params"] = _mesos.SlaveOffer_params_get if _newclass:params = _swig_property(_mesos.SlaveOffer_params_get, _mesos.SlaveOffer_params_set) __swig_destroy__ = _mesos.delete_SlaveOffer __del__ = lambda self : None; SlaveOffer_swigregister = _mesos.SlaveOffer_swigregister SlaveOffer_swigregister(SlaveOffer) class FrameworkMessage(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, FrameworkMessage, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, FrameworkMessage, name) __repr__ = _swig_repr def __init__(self, *args): this = _mesos.new_FrameworkMessage(*args) try: self.this.append(this) except: self.this = this __swig_setmethods__["slaveId"] = _mesos.FrameworkMessage_slaveId_set __swig_getmethods__["slaveId"] = _mesos.FrameworkMessage_slaveId_get if _newclass:slaveId = _swig_property(_mesos.FrameworkMessage_slaveId_get, _mesos.FrameworkMessage_slaveId_set) __swig_setmethods__["taskId"] = _mesos.FrameworkMessage_taskId_set __swig_getmethods__["taskId"] = _mesos.FrameworkMessage_taskId_get if _newclass:taskId = _swig_property(_mesos.FrameworkMessage_taskId_get, _mesos.FrameworkMessage_taskId_set) __swig_setmethods__["data"] = _mesos.FrameworkMessage_data_set __swig_getmethods__["data"] = _mesos.FrameworkMessage_data_get if _newclass:data = _swig_property(_mesos.FrameworkMessage_data_get, _mesos.FrameworkMessage_data_set) __swig_destroy__ = _mesos.delete_FrameworkMessage __del__ = lambda self : None; FrameworkMessage_swigregister = _mesos.FrameworkMessage_swigregister FrameworkMessage_swigregister(FrameworkMessage) class ExecutorInfo(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, ExecutorInfo, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, ExecutorInfo, name) __repr__ = _swig_repr def __init__(self, *args): this = _mesos.new_ExecutorInfo(*args) try: self.this.append(this) except: self.this = this __swig_setmethods__["uri"] = _mesos.ExecutorInfo_uri_set __swig_getmethods__["uri"] = _mesos.ExecutorInfo_uri_get if _newclass:uri = _swig_property(_mesos.ExecutorInfo_uri_get, _mesos.ExecutorInfo_uri_set) __swig_setmethods__["initArg"] = _mesos.ExecutorInfo_initArg_set __swig_getmethods__["initArg"] = _mesos.ExecutorInfo_initArg_get if _newclass:initArg = _swig_property(_mesos.ExecutorInfo_initArg_get, _mesos.ExecutorInfo_initArg_set) __swig_setmethods__["params"] = _mesos.ExecutorInfo_params_set __swig_getmethods__["params"] = _mesos.ExecutorInfo_params_get if _newclass:params = _swig_property(_mesos.ExecutorInfo_params_get, _mesos.ExecutorInfo_params_set) __swig_destroy__ = _mesos.delete_ExecutorInfo __del__ = lambda self : None; ExecutorInfo_swigregister = _mesos.ExecutorInfo_swigregister ExecutorInfo_swigregister(ExecutorInfo) class Scheduler(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, Scheduler, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, Scheduler, name) __repr__ = _swig_repr __swig_destroy__ = _mesos.delete_Scheduler __del__ = lambda self : None; def getFrameworkName(self, *args): return _mesos.Scheduler_getFrameworkName(self, *args) def getExecutorInfo(self, *args): return _mesos.Scheduler_getExecutorInfo(self, *args) def registered(self, *args): return _mesos.Scheduler_registered(self, *args) def resourceOffer(self, *args): return _mesos.Scheduler_resourceOffer(self, *args) def offerRescinded(self, *args): return _mesos.Scheduler_offerRescinded(self, *args) def statusUpdate(self, *args): return _mesos.Scheduler_statusUpdate(self, *args) def frameworkMessage(self, *args): return _mesos.Scheduler_frameworkMessage(self, *args) def slaveLost(self, *args): return _mesos.Scheduler_slaveLost(self, *args) def error(self, *args): return _mesos.Scheduler_error(self, *args) def __init__(self): if self.__class__ == Scheduler: _self = None else: _self = self this = _mesos.new_Scheduler(_self, ) try: self.this.append(this) except: self.this = this def __disown__(self): self.this.disown() _mesos.disown_Scheduler(self) return weakref_proxy(self) Scheduler_swigregister = _mesos.Scheduler_swigregister Scheduler_swigregister(Scheduler) class SchedulerDriver(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, SchedulerDriver, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, SchedulerDriver, name) __repr__ = _swig_repr __swig_destroy__ = _mesos.delete_SchedulerDriver __del__ = lambda self : None; def start(self): return _mesos.SchedulerDriver_start(self) def stop(self): return _mesos.SchedulerDriver_stop(self) def join(self): return _mesos.SchedulerDriver_join(self) def run(self): return _mesos.SchedulerDriver_run(self) def sendFrameworkMessage(self, *args): return _mesos.SchedulerDriver_sendFrameworkMessage(self, *args) def killTask(self, *args): return _mesos.SchedulerDriver_killTask(self, *args) def replyToOffer(self, *args): return _mesos.SchedulerDriver_replyToOffer(self, *args) def reviveOffers(self): return _mesos.SchedulerDriver_reviveOffers(self) def sendHints(self, *args): return _mesos.SchedulerDriver_sendHints(self, *args) def __init__(self): this = _mesos.new_SchedulerDriver() try: self.this.append(this) except: self.this = this SchedulerDriver_swigregister = _mesos.SchedulerDriver_swigregister SchedulerDriver_swigregister(SchedulerDriver) class MesosSchedulerDriver(SchedulerDriver): __swig_setmethods__ = {} for _s in [SchedulerDriver]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{})) __setattr__ = lambda self, name, value: _swig_setattr(self, MesosSchedulerDriver, name, value) __swig_getmethods__ = {} for _s in [SchedulerDriver]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{})) __getattr__ = lambda self, name: _swig_getattr(self, MesosSchedulerDriver, name) __repr__ = _swig_repr def __init__(self, *args): this = _mesos.new_MesosSchedulerDriver(*args) try: self.this.append(this) except: self.this = this self.scheduler = args[0] __swig_destroy__ = _mesos.delete_MesosSchedulerDriver __del__ = lambda self : None; def start(self): return _mesos.MesosSchedulerDriver_start(self) def stop(self): return _mesos.MesosSchedulerDriver_stop(self) def join(self): return _mesos.MesosSchedulerDriver_join(self) def run(self): return _mesos.MesosSchedulerDriver_run(self) def sendFrameworkMessage(self, *args): return _mesos.MesosSchedulerDriver_sendFrameworkMessage(self, *args) def killTask(self, *args): return _mesos.MesosSchedulerDriver_killTask(self, *args) def replyToOffer(self, *args): return _mesos.MesosSchedulerDriver_replyToOffer(self, *args) def reviveOffers(self): return _mesos.MesosSchedulerDriver_reviveOffers(self) def sendHints(self, *args): return _mesos.MesosSchedulerDriver_sendHints(self, *args) def getScheduler(self): return _mesos.MesosSchedulerDriver_getScheduler(self) MesosSchedulerDriver_swigregister = _mesos.MesosSchedulerDriver_swigregister MesosSchedulerDriver_swigregister(MesosSchedulerDriver) class ExecutorArgs(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, ExecutorArgs, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, ExecutorArgs, name) __repr__ = _swig_repr def __init__(self, *args): this = _mesos.new_ExecutorArgs(*args) try: self.this.append(this) except: self.this = this __swig_setmethods__["slaveId"] = _mesos.ExecutorArgs_slaveId_set __swig_getmethods__["slaveId"] = _mesos.ExecutorArgs_slaveId_get if _newclass:slaveId = _swig_property(_mesos.ExecutorArgs_slaveId_get, _mesos.ExecutorArgs_slaveId_set) __swig_setmethods__["host"] = _mesos.ExecutorArgs_host_set __swig_getmethods__["host"] = _mesos.ExecutorArgs_host_get if _newclass:host = _swig_property(_mesos.ExecutorArgs_host_get, _mesos.ExecutorArgs_host_set) __swig_setmethods__["frameworkId"] = _mesos.ExecutorArgs_frameworkId_set __swig_getmethods__["frameworkId"] = _mesos.ExecutorArgs_frameworkId_get if _newclass:frameworkId = _swig_property(_mesos.ExecutorArgs_frameworkId_get, _mesos.ExecutorArgs_frameworkId_set) __swig_setmethods__["frameworkName"] = _mesos.ExecutorArgs_frameworkName_set __swig_getmethods__["frameworkName"] = _mesos.ExecutorArgs_frameworkName_get if _newclass:frameworkName = _swig_property(_mesos.ExecutorArgs_frameworkName_get, _mesos.ExecutorArgs_frameworkName_set) __swig_setmethods__["data"] = _mesos.ExecutorArgs_data_set __swig_getmethods__["data"] = _mesos.ExecutorArgs_data_get if _newclass:data = _swig_property(_mesos.ExecutorArgs_data_get, _mesos.ExecutorArgs_data_set) __swig_destroy__ = _mesos.delete_ExecutorArgs __del__ = lambda self : None; ExecutorArgs_swigregister = _mesos.ExecutorArgs_swigregister ExecutorArgs_swigregister(ExecutorArgs) class Executor(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, Executor, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, Executor, name) __repr__ = _swig_repr __swig_destroy__ = _mesos.delete_Executor __del__ = lambda self : None; def init(self, *args): return _mesos.Executor_init(self, *args) def launchTask(self, *args): return _mesos.Executor_launchTask(self, *args) def killTask(self, *args): return _mesos.Executor_killTask(self, *args) def frameworkMessage(self, *args): return _mesos.Executor_frameworkMessage(self, *args) def shutdown(self, *args): return _mesos.Executor_shutdown(self, *args) def error(self, *args): return _mesos.Executor_error(self, *args) def __init__(self): if self.__class__ == Executor: _self = None else: _self = self this = _mesos.new_Executor(_self, ) try: self.this.append(this) except: self.this = this def __disown__(self): self.this.disown() _mesos.disown_Executor(self) return weakref_proxy(self) Executor_swigregister = _mesos.Executor_swigregister Executor_swigregister(Executor) class ExecutorDriver(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, ExecutorDriver, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, ExecutorDriver, name) __repr__ = _swig_repr __swig_destroy__ = _mesos.delete_ExecutorDriver __del__ = lambda self : None; def start(self): return _mesos.ExecutorDriver_start(self) def stop(self): return _mesos.ExecutorDriver_stop(self) def join(self): return _mesos.ExecutorDriver_join(self) def run(self): return _mesos.ExecutorDriver_run(self) def sendStatusUpdate(self, *args): return _mesos.ExecutorDriver_sendStatusUpdate(self, *args) def sendFrameworkMessage(self, *args): return _mesos.ExecutorDriver_sendFrameworkMessage(self, *args) def __init__(self): this = _mesos.new_ExecutorDriver() try: self.this.append(this) except: self.this = this ExecutorDriver_swigregister = _mesos.ExecutorDriver_swigregister ExecutorDriver_swigregister(ExecutorDriver) class MesosExecutorDriver(ExecutorDriver): __swig_setmethods__ = {} for _s in [ExecutorDriver]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{})) __setattr__ = lambda self, name, value: _swig_setattr(self, MesosExecutorDriver, name, value) __swig_getmethods__ = {} for _s in [ExecutorDriver]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{})) __getattr__ = lambda self, name: _swig_getattr(self, MesosExecutorDriver, name) __repr__ = _swig_repr def __init__(self, *args): this = _mesos.new_MesosExecutorDriver(*args) try: self.this.append(this) except: self.this = this self.executor = args[0] __swig_destroy__ = _mesos.delete_MesosExecutorDriver __del__ = lambda self : None; def start(self): return _mesos.MesosExecutorDriver_start(self) def stop(self): return _mesos.MesosExecutorDriver_stop(self) def join(self): return _mesos.MesosExecutorDriver_join(self) def run(self): return _mesos.MesosExecutorDriver_run(self) def sendStatusUpdate(self, *args): return _mesos.MesosExecutorDriver_sendStatusUpdate(self, *args) def sendFrameworkMessage(self, *args): return _mesos.MesosExecutorDriver_sendFrameworkMessage(self, *args) def getExecutor(self): return _mesos.MesosExecutorDriver_getExecutor(self) MesosExecutorDriver_swigregister = _mesos.MesosExecutorDriver_swigregister MesosExecutorDriver_swigregister(MesosExecutorDriver)
{ "content_hash": "a808ce890d67e94f0b19ec3fa1a59e68", "timestamp": "", "source": "github", "line_count": 543, "max_line_length": 125, "avg_line_length": 52.95580110497237, "alnum_prop": 0.6868718483741958, "repo_name": "charlescearl/VirtualMesos", "id": "fb4ef7ecb1497f2b3a9433781d22e598f59d3fa6", "size": "29026", "binary": false, "copies": "1", "ref": "refs/heads/mesos-vm", "path": "src/swig/python/mesos.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "2047732" }, { "name": "C++", "bytes": "44919805" }, { "name": "D", "bytes": "3341703" }, { "name": "Emacs Lisp", "bytes": "7798" }, { "name": "Java", "bytes": "14984708" }, { "name": "JavaScript", "bytes": "39087" }, { "name": "Objective-C", "bytes": "118273" }, { "name": "PHP", "bytes": "152555" }, { "name": "Perl", "bytes": "623347" }, { "name": "Python", "bytes": "3910489" }, { "name": "Ruby", "bytes": "67470" }, { "name": "Shell", "bytes": "15673503" }, { "name": "Smalltalk", "bytes": "56562" }, { "name": "VimL", "bytes": "3774" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('account', '0004_auto_20170511_2207'), ] operations = [ migrations.AddField( model_name='accountuser', name='account', field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='account.Account'), preserve_default=False, ), ]
{ "content_hash": "640936dd8449877b3d5f47699834d931", "timestamp": "", "source": "github", "line_count": 20, "max_line_length": 114, "avg_line_length": 25.75, "alnum_prop": 0.6330097087378641, "repo_name": "fxer/cujo", "id": "adedb185672d278cddfc0c0a30b5ea44f5773cde", "size": "588", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "apps/account/migrations/0005_accountuser_account.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "2042" }, { "name": "HTML", "bytes": "9342" }, { "name": "JavaScript", "bytes": "1624" }, { "name": "Python", "bytes": "37652" } ], "symlink_target": "" }
import os import sys import Image, ImageDraw, ImageStat import numpy as np import matplotlib.pyplot as plt from matplotlib.offsetbox import OffsetImage, AnnotationBbox imagepath = sys.argv[1] # The image to build the histogram of histHeight = 120 # Height of the histogram histWidth = 256 # Width of the histogram multiplerValue = 1 # The multiplier value basically increases # the histogram height so that love values # are easier to see, this in effect chops off # the top of the histogram. showFstopLines = True # True/False to hide outline fStopLines = 5 # Colours to be used backgroundColor = (51,51,51) # Background color lineColor = (102,102,102) # Line color of fStop Markers red = (255,60,60) # Color for the red lines green = (51,204,51) # Color for the green lines blue = (0,102,255) # Color for the blue lines ################################################################################## img = Image.open(imagepath) if len(sys.argv) > 2 and sys.argv[2] == "brightness_hist": backgroundColor = (255,255,255) # Background color red = (51,51,51) # Background color green = (51,51,51) # Background color blue = (51,51,51) # Background color width, height = img.size pix = img.load() for x in range(width): for y in range(height): brightness = max(pix[x, y]) pix[x, y] = (brightness, brightness, brightness) hist = img.histogram() histMax = max(hist) # comon color #average = sum(hist)/len(hist) #histMax = 0 #for i in hist: # if int(i) > 3*average: pass # elif int(i) > histMax: # histMax = int(i) #histHeight = histMax xScale = float(histWidth)/len(hist) # xScaling yScale = float((histHeight)*multiplerValue)/histMax # yScaling im = Image.new("RGBA", (histWidth, histHeight), backgroundColor) draw = ImageDraw.Draw(im) # Draw Outline is required if showFstopLines: xmarker = histWidth/fStopLines x =0 for i in range(1,fStopLines+1): draw.line((x, 0, x, histHeight), fill=lineColor) x+=xmarker draw.line((histWidth-1, 0, histWidth-1, 200), fill=lineColor) draw.line((0, 0, 0, histHeight), fill=lineColor) # Draw the RGB histogram lines x=0; c=0; for i in hist: if int(i)==0: pass else: color = red if c>255: color = green if c>511: color = blue draw.line((x, histHeight, x, histHeight-i*yScale), fill=color) if x>255: x=0 else: x+=1 c+=1 #resize the image #resize_factor = 1 #im = im.resize((histWidth*resize_factor, histHeight*resize_factor), Image.NEAREST) x,y = np.random.rand(2,10) fig = plt.figure() ax = fig.add_subplot(111) im = ax.imshow(im,extent=[0,histWidth, 0,histHeight]) # Now save and show the histogram output_file_name = os.path.splitext(imagepath)[0] + "_histogram.png" plt.savefig(output_file_name)
{ "content_hash": "a46c54f240fe3add69c64380ec7866f8", "timestamp": "", "source": "github", "line_count": 103, "max_line_length": 83, "avg_line_length": 28.058252427184467, "alnum_prop": 0.6391003460207613, "repo_name": "amirchohan/HDR", "id": "d2035d37f5656ac9a4380b87954374e7365889e1", "size": "3159", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "img_hist.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "161195" }, { "name": "C++", "bytes": "88036" }, { "name": "Java", "bytes": "22900" }, { "name": "Python", "bytes": "3159" }, { "name": "Shell", "bytes": "2472" } ], "symlink_target": "" }
QUASIMODE_START_KEY = "KEYCODE_RCONTROL" QUASIMODE_END_KEY = "KEYCODE_RETURN" QUASIMODE_CANCEL_KEY1 = "KEYCODE_ESCAPE" QUASIMODE_CANCEL_KEY2 = "KEYCODE_RCONTROL" # Whether the Quasimode is actually modal ("sticky"). IS_QUASIMODE_MODAL = True # Amount of time, in seconds (float), to wait from the time # that the quasimode begins drawing to the time that the # suggestion list begins to be displayed. Setting this to a # value greater than 0 will effectively create a # "spring-loaded suggestion list" behavior. QUASIMODE_SUGGESTION_DELAY = 0.2 # The maximum number of suggestions to display in the quasimode. QUASIMODE_MAX_SUGGESTIONS = 6 # The minimum number of characters the user must type before the # auto-completion mechanism engages. QUASIMODE_MIN_AUTOCOMPLETE_CHARS = 2 # The message displayed when the user types some text that is not a command. BAD_COMMAND_MSG = "<p><command>%s</command> is not a command.</p>"\ "%s" # Minimum number of characters that should have been typed into the # quasimode for a bad command message to be shown. BAD_COMMAND_MSG_MIN_CHARS = 2 # The captions for the above message, indicating commands that are related # to the command the user typed. ONE_SUGG_CAPTION = "<caption>Did you mean <command>%s</command>?</caption>" # The string that is displayed in the quasimode window when the user # first enters the quasimode. QUASIMODE_DEFAULT_HELP = u"Welcome to Enso! Enter a command, " \ u"or type \u201chelp\u201d for assistance." # The string displayed when the user has typed some characters but there # is no matching command. QUASIMODE_NO_COMMAND_HELP = "There is no matching command. "\ "Use backspace to delete characters." # Message XML for the Splash message shown when Enso first loads. OPENING_MSG_XML = "<p>Welcome to <command>Enso</command>!</p>" + \ "<caption>Copyright &#169; 2008 Humanized, Inc.</caption>" # Message XML displayed when the mouse hovers over a mini message. MINI_MSG_HELP_XML = "<p>The <command>hide mini messages</command>" \ " and <command>put</command> commands control" \ " these mini-messages.</p>" ABOUT_BOX_XML = u"<p><command>Enso</command> Community Edition</p>" \ "<caption> </caption>" \ "<p>Copyright &#169; 2008 <command>Humanized, Inc.</command></p>" \ "<p>Copyright &#169; 2008-2009 <command>Enso Community</command></p>" \ "<p>Version 1.0</p>" # List of default platforms supported by Enso; platforms are specific # types of providers that provide a suite of platform-specific # functionality. DEFAULT_PLATFORMS = ["enso.platform.win32"] # List of modules/packages that support the provider interface to # provide required platform-specific functionality to Enso. PROVIDERS = [] PROVIDERS.extend(DEFAULT_PLATFORMS) # List of modules/packages that support the plugin interface to # extend Enso. The plugins are loaded in the order that they # are specified in this list. PLUGINS = ["enso.contrib.scriptotron", "enso.contrib.help", "enso.contrib.google", "enso.contrib.evaluate"] FONT_NAME = {"normal" : "Gentium (Humanized)", "italic" : "Gentium Italic"}
{ "content_hash": "082b3f509177fd56bb1b1d6ba6907076", "timestamp": "", "source": "github", "line_count": 79, "max_line_length": 76, "avg_line_length": 39.822784810126585, "alnum_prop": 0.7256834075015893, "repo_name": "tartakynov/enso", "id": "1eeaa4d171fbc904502608212faf6eb42b9941f5", "size": "3462", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "enso/config.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "9734342" }, { "name": "C++", "bytes": "403798" }, { "name": "JavaScript", "bytes": "3338" }, { "name": "Objective-C", "bytes": "15094" }, { "name": "Python", "bytes": "765642" }, { "name": "Shell", "bytes": "235" } ], "symlink_target": "" }
""" Define various entities and populate them """ import re import sys import numpy as np from collections import defaultdict, OrderedDict from datetime import datetime from ngi_reports.utils import statusdb class Sample: """Sample class """ def __init__(self): self.customer_name = '' self.ngi_id = '' self.preps = {} self.qscore = '' self.total_reads = 0.0 self.initial_qc = { 'initial_qc_status' : '', 'concentration': '', 'conc_units':'', 'volume_(ul)': '', 'amount_(ng)': '', 'rin': '' } self.well_location = '' class Prep: """Prep class """ def __init__(self): self.avg_size = 'NA' self.barcode = 'NA' self.label = '' self.qc_status = 'NA' self.seq_fc = 'NA' class Flowcell: """Flowcell class """ def __init__(self): self.date = '' self.lanes = OrderedDict() self.name = '' self.run_name = '' self.run_setup = [] self.seq_meth = '' self.type = '' self.run_params = {} self.chemistry = {} self.casava = None self.seq_software = {} class Lane: """Lane class """ def __init__(self): self.avg_qval = '' self.cluster = '' self.id = '' self.phix = '' def set_lane_info(self, to_set, key, lane_info, reads, as_million=False): """Set the average value of gives key from given lane info :param str to_set: class parameter to be set :param str key: key to be fetched :param dict lane_info: a dictionary with required lane info :param str reads: number of reads for keys to be fetched """ try: v = np.mean([float(lane_info.get('{} R{}'.format(key, str(r)))) for r in range(1,int(reads)+1)]) val = '{:.2f}'.format(round(v/1000000, 2)) if as_million else '{:.2f}'.format(round(v, 2)) except TypeError: val = None if to_set == 'cluster': self.cluster = val elif to_set == 'avg_qval': self.avg_qval = val elif to_set == 'fc_phix': self.phix = val class AbortedSampleInfo: """Aborted Sample info class """ def __init__(self, user_id, status): self.status = status self.user_id = user_id class Project: """Project class """ def __init__(self): self.aborted_samples = OrderedDict() self.samples = OrderedDict() self.flowcells = {} self.accredited = { 'library_preparation': 'N/A', 'data_processing': 'N/A', 'sequencing': 'N/A', 'data_analysis':'N/A' } self.application = '' self.best_practice = False self.cluster = '' self.contact = '' self.dates = { 'order_received': None, 'open_date': None, 'first_initial_qc_start_date': None, 'contract_received': None, 'samples_received': None, 'queue_date': None, 'all_samples_sequenced': None } self.is_finished_lib = False self.is_hiseqx = False self.library_construction = '' self.missing_fc = False self.ngi_facility = '' self.ngi_name = '' self.samples_unit = '#reads' self.num_samples = 0 self.num_lanes = 0 self.ngi_id = '' self.reference = { 'genome': None, 'organism': None } self.report_date = '' self.sequencing_setup = '' self.skip_fastq = False self.user_ID = '' def populate(self, log, organism_names, **kwargs): project = kwargs.get('project', '') if not project: log.error('A project must be provided, so not proceeding.') sys.exit('A project was not provided, stopping execution...') self.skip_fastq = kwargs.get('skip_fastq') self.cluster = kwargs.get('cluster') pcon = statusdb.ProjectSummaryConnection() assert pcon, 'Could not connect to {} database in StatusDB'.format('project') if re.match('^P\d+$', project): self.ngi_id = project id_view, pid_as_uppmax_dest = (True, True) else: self.ngi_name = project id_view, pid_as_uppmax_dest = (False, False) proj = pcon.get_entry(project, use_id_view=id_view) if not proj: log.error('No such project name/id "{}", check if provided information is right'.format(project)) sys.exit('Project not found in statusdb, stopping execution...') self.ngi_name = proj.get('project_name') if proj.get('source') != 'lims': log.error('The source for data for project {} is not LIMS.'.format(project)) raise BaseException proj_details = proj.get('details',{}) if 'aborted' in proj_details: log.warn('Project {} was aborted, so not proceeding.'.format(project)) sys.exit('Project {} was aborted, stopping execution...'.format(project)) if not id_view: self.ngi_id = proj.get('project_id') for date in self.dates: self.dates[date] = proj_details.get(date, None) if proj.get('project_summary',{}).get('all_samples_sequenced'): self.dates['all_samples_sequenced'] = proj.get('project_summary',{}).get('all_samples_sequenced') self.contact = proj.get('contact') self.application = proj.get('application') self.num_samples = proj.get('no_of_samples') self.ngi_facility = 'Genomics {} Stockholm'.format(proj_details.get('type')) if proj_details.get('type') else None self.reference['genome'] = None if proj.get('reference_genome') == 'other' else proj.get('reference_genome') self.reference['organism'] = organism_names.get(self.reference['genome'], None) self.user_ID = proj_details.get('customer_project_reference','') self.num_lanes = proj_details.get('sequence_units_ordered_(lanes)') self.library_construction_method = proj_details.get('library_construction_method') self.library_prep_option = proj_details.get('library_prep_option', '') if 'dds' in proj.get('delivery_type','').lower(): self.cluster = 'dds' elif 'grus' in proj.get('delivery_type','').lower(): self.cluster = 'grus' elif 'hdd' in proj.get('delivery_type','').lower(): self.cluster = 'hdd' else: self.cluster = 'unknown' self.best_practice = False if proj_details.get('best_practice_bioinformatics','No') == 'No' else True self.library_construction = self.get_library_method(self.ngi_name, self.application, self.library_construction_method, self.library_prep_option) self.is_finished_lib = True if 'by user' in self.library_construction.lower() else False for key in self.accredited: self.accredited[key] = proj_details.get('accredited_({})'.format(key)) if 'hiseqx' in proj_details.get('sequencing_platform', ''): self.is_hiseqx = True self.sequencing_setup = proj_details.get('sequencing_setup') for sample_id, sample in sorted(proj.get('samples', {}).items()): if kwargs.get('samples', []) and sample_id not in kwargs.get('samples', []): log.info('Will not include sample {} as it is not in given list'.format(sample_id)) continue customer_name = sample.get('customer_name','NA') #Get once for a project if self.dates['first_initial_qc_start_date'] is not None: self.dates['first_initial_qc_start_date'] = sample.get('first_initial_qc_start_date') log.info('Processing sample {}'.format(sample_id)) ## Check if the sample is aborted before processing if sample.get('details',{}).get('status_(manual)') == 'Aborted': log.info('Sample {} is aborted, so skipping it'.format(sample_id)) self.aborted_samples[sample_id] = AbortedSampleInfo(customer_name, 'Aborted') continue samObj = Sample() samObj.ngi_id = sample_id samObj.customer_name = customer_name samObj.well_location = sample.get('well_location') ## Basic fields from Project database # Initial qc if sample.get('initial_qc'): for item in samObj.initial_qc: samObj.initial_qc[item] = sample['initial_qc'].get(item) #Library prep ## get total reads if available or mark sample as not sequenced try: #check if sample was sequenced. More accurate value will be calculated from flowcell yield total_reads = float(sample['details']['total_reads_(m)']) except KeyError: log.warn('Sample {} doesnt have total reads, so adding it to NOT sequenced samples list.'.format(sample_id)) self.aborted_samples[sample_id] = AbortedSampleInfo(customer_name, 'Not sequenced') ## dont gather unnecessary information if not going to be looked up if not kwargs.get('yield_from_fc'): continue ## Go through each prep for each sample in the Projects database for prep_id, prep in list(sample.get('library_prep', {}).items()): prepObj = Prep() prepObj.label = prep_id if prep.get('reagent_label') and prep.get('prep_status'): prepObj.barcode = prep.get('reagent_label', 'NA') prepObj.qc_status = prep.get('prep_status', 'NA') ## get flow cell information for each prep from project database (only if -b flag is set) if kwargs.get('barcode_from_fc'): prepObj.seq_fc = [] for fc in sample.get('library_prep').get(prep_id).get('sequenced_fc'): prepObj.seq_fc.append(fc.split('_')[-1]) else: log.warn('Could not fetch barcode/prep status for sample {} in prep {}'.format(sample_id, prep_id)) if 'pcr-free' not in self.library_construction.lower(): if prep.get('library_validation'): lib_valids = prep['library_validation'] keys = sorted([k for k in list(lib_valids.keys()) if re.match('^[\d\-]*$',k)], key=lambda k: datetime.strptime(lib_valids[k]['start_date'], '%Y-%m-%d'), reverse=True) try: prepObj.avg_size = re.sub(r'(\.[0-9]{,2}).*$', r'\1', str(lib_valids[keys[0]]['average_size_bp'])) except: log.warn('Insufficient info "{}" for sample {}'.format('average_size_bp', sample_id)) else: log.warn('No library validation step found {}'.format(sample_id)) samObj.preps[prep_id] = prepObj # exception for case of multi-barcoded sample from different preps run on the same fc (only if -b flag is set) if kwargs.get('barcode_from_fc'): list_of_barcodes = sum([[all_barcodes.barcode for all_barcodes in list(samObj.preps.values())]], []) if len(list(dict.fromkeys(list_of_barcodes))) >= 1: list_of_flowcells = sum([all_flowcells.seq_fc for all_flowcells in list(samObj.preps.values())], []) if len(list_of_flowcells) != len(list(dict.fromkeys(list_of_flowcells))): #the sample was run twice on the same flowcell, only possible with different barcodes for the same sample log.error('Ambiguous preps for barcodes on flowcell. Please run ngi_pipelines without the -b flag and amend the report manually') sys.exit('Stopping execution...') if not samObj.preps: log.warn('No library prep information was available for sample {}'.format(sample_id)) self.samples[sample_id] = samObj #Get Flowcell data fcon = statusdb.FlowcellRunMetricsConnection() assert fcon, 'Could not connect to {} database in StatusDB'.format('flowcell') xcon = statusdb.X_FlowcellRunMetricsConnection() assert xcon, 'Could not connect to {} database in StatusDB'.format('x_flowcells') flowcell_info = fcon.get_project_flowcell(self.ngi_id, self.dates['open_date']) flowcell_info.update(xcon.get_project_flowcell(self.ngi_id, self.dates['open_date'])) sample_qval = defaultdict(dict) for fc in list(flowcell_info.values()): if fc['name'] in kwargs.get('exclude_fc'): continue fcObj = Flowcell() fcObj.name = fc['name'] fcObj.run_name = fc['run_name'] fcObj.date = fc['date'] # get database document from appropriate database if fc['db'] == 'x_flowcells': fc_details = xcon.get_entry(fc['run_name']) else: fc_details = fcon.get_entry(fc['run_name']) # set the fc type fc_inst = fc_details.get('RunInfo', {}).get('Instrument','') if fc_inst.startswith('ST-'): fcObj.type = 'HiSeqX' self.is_hiseqx = True fc_runp = fc_details.get('RunParameters',{}).get('Setup',{}) elif '-' in fcObj.name : fcObj.type = 'MiSeq' fc_runp = fc_details.get('RunParameters',{}) elif fc_inst.startswith('A'): fcObj.type = 'NovaSeq6000' fc_runp = fc_details.get('RunParameters',{}) elif fc_inst.startswith('NS'): fcObj.type = 'NextSeq500' fc_runp = fc_details.get('RunParameters',{}) elif fc_inst.startswith('VH'): fcObj.type = 'NextSeq2000' fc_runp = fc_details.get('RunParameters',{}) else: fcObj.type = 'HiSeq2500' fc_runp = fc_details.get('RunParameters',{}).get('Setup',{}) ## Fetch run setup for the flowcell fcObj.run_setup = fc_details.get('RunInfo').get('Reads') if fcObj.type == 'NovaSeq6000': fcObj.chemistry = {'WorkflowType' : fc_runp.get('WorkflowType'), 'FlowCellMode' : fc_runp.get('RfidsInfo', {}).get('FlowCellMode')} elif fcObj.type == 'NextSeq500': fcObj.chemistry = {'Chemistry': fc_runp.get('Chemistry').replace('NextSeq ', '')} elif fcObj.type == 'NextSeq2000': NS2000_FC_PAT = re.compile("P[1,2,3]") fcObj.chemistry = {'Chemistry': NS2000_FC_PAT.findall(fc_runp.get('FlowCellMode'))[0]} else: fcObj.chemistry = {'Chemistry' : fc_runp.get('ReagentKitVersion', fc_runp.get('Sbs'))} try: fcObj.casava = list(fc_details['DemultiplexConfig'].values())[0]['Software']['Version'] except (KeyError, IndexError): continue if fcObj.type == 'MiSeq': fcObj.seq_software = {'RTAVersion': fc_runp.get('RTAVersion'), 'ApplicationVersion': fc_runp.get('MCSVersion') } elif fcObj.type == 'NextSeq500' or fcObj.type == 'NextSeq2000': fcObj.seq_software = {'RTAVersion': fc_runp.get('RTAVersion', fc_runp.get('RtaVersion')), 'ApplicationName': fc_runp.get('ApplicationName') if fc_runp.get('ApplicationName') else fc_runp.get('Setup').get('ApplicationName'), 'ApplicationVersion': fc_runp.get('ApplicationVersion') if fc_runp.get('ApplicationVersion') else fc_runp.get('Setup').get('ApplicationVersion') } else: fcObj.seq_software = {'RTAVersion': fc_runp.get('RTAVersion', fc_runp.get('RtaVersion')), 'ApplicationName': fc_runp.get('ApplicationName', fc_runp.get('Application')), 'ApplicationVersion': fc_runp.get('ApplicationVersion') } ## collect info of samples and their library prep / LIMS indexes on the FC (only if -b option is set) if kwargs.get('barcode_from_fc'): log.info('\'barcodes_from_fc\' option was given so index sequences for the report will be taken from the flowcell instead of LIMS') preps_samples_on_fc = [] list_additional_samples = [] ## get all samples from flow cell that belong to the project fc_samples = [] for fc_sample in fc_details.get('samplesheet_csv'): if fc_sample.get('Sample_Name').split('_')[0] == self.ngi_id: fc_samples.append(fc_sample.get('Sample_Name')) ## iterate through all samples in project to identify their prep_ID (only if they are on the flowcell) for sample_ID in list(self.samples): for prep_ID in list(self.samples.get(sample_ID).preps): sample_preps = self.samples.get(sample_ID).preps if fcObj.name in sample_preps.get(prep_ID).seq_fc: preps_samples_on_fc.append([sample_ID, prep_ID]) else: continue ## get (if any) samples that are on the fc, but are not recorded in LIMS (i.e. added bc from undet reads) if len(set(list(self.samples))) != len(set(fc_samples)): list_additional_samples = list(set(fc_samples) - set(self.samples)) list_additional_samples.sort() # generate a list of all additional samples log.info('The flowcell {} contains {} sample(s) ({}) that has/have not been defined in LIMS. They will be added to the report.'.format(fc_details.get('RunInfo').get('Id'), len(list_additional_samples), ', '.join(list_additional_samples))) undet_iteration = 1 # creating additional sample and prep Objects for additional_sample in list_additional_samples: AsamObj = Sample() AsamObj.ngi_id = additional_sample AsamObj.customer_name = 'unknown' + str(undet_iteration) # additional samples will be named "unknown[number]" in the report AsamObj.well_location = 'NA' AsamObj.preps['NA'] = Prep() AsamObj.preps['NA'].label = 'NA' self.samples[additional_sample] = AsamObj preps_samples_on_fc.append([additional_sample, 'NA']) undet_iteration+=1 ## Collect quality info for samples and collect lanes of interest for stat in fc_details.get('illumina',{}).get('Demultiplex_Stats',{}).get('Barcode_lane_statistics',[]): if re.sub('_+','.',stat['Project'],1) != self.ngi_name and stat['Project'] != self.ngi_name: continue lane = stat.get('Lane') if fc['db'] == 'x_flowcells': sample = stat.get('Sample') barcode = stat.get('Barcode sequence') qval_key, base_key = ('% >= Q30bases', 'PF Clusters') else: sample = stat.get('Sample ID') barcode = stat.get('Index') qval_key, base_key = ('% of >= Q30 Bases (PF)', '# Reads') ## if '-b' flag is set, we override the barcodes from LIMS with the barcodes from the flowcell for all samples if kwargs.get('barcode_from_fc'): new_barcode = '-'.join(barcode.split('+')) # change the barcode layout to match the one used for the report lib_prep = [] # adding the now required library prep, set to NA for all non-LIMS samples if sample in list_additional_samples: lib_prep.append('NA') else: # adding library prep for LIMS samples, we identified them earlier for sub_prep_sample in preps_samples_on_fc: if sub_prep_sample[0] == sample: lib_prep.append(sub_prep_sample[1]) for prep_o_samples in lib_prep: # changing the barcode happens here! self.samples.get(sample).preps.get(prep_o_samples).barcode = new_barcode #skip if there are no lanes or samples if not lane or not sample or not barcode: log.warn('Insufficient info/malformed data in Barcode_lane_statistics in FC {}, skipping...'.format(fcObj.name)) continue if kwargs.get('samples', []) and sample not in kwargs.get('samples', []): continue try: r_idx = '{}_{}_{}'.format(lane, fcObj.name, barcode) r_len_list = [x['NumCycles'] for x in fcObj.run_setup if x['IsIndexedRead'] == 'N'] r_len_list = [int(x) for x in r_len_list] r_num = len(r_len_list) qval = float(stat.get(qval_key)) pfrd = int(stat.get(base_key).replace(',','')) pfrd = pfrd/2 if fc['db'] == 'flowcell' else pfrd base = pfrd * sum(r_len_list) sample_qval[sample][r_idx] = {'qval': qval, 'reads': pfrd, 'bases': base} except (TypeError, ValueError, AttributeError) as e: log.warn('Something went wrong while fetching Q30 for sample {} with barcode {} in FC {} at lane {}'.format(sample, barcode, fcObj.name, lane)) pass ## collect lanes of interest to proceed later fc_lane_summary = fc_details.get('lims_data', {}).get('run_summary', {}) if lane not in fcObj.lanes: laneObj = Lane() lane_sum = fc_lane_summary.get(lane, fc_lane_summary.get('A',{})) laneObj.id = lane laneObj.set_lane_info('cluster', 'Reads PF (M)' if 'NovaSeq' in fcObj.type or 'NextSeq' in fcObj.type else 'Clusters PF', lane_sum, str(r_num), False if 'NovaSeq' in fcObj.type or 'NextSeq' in fcObj.type else True) laneObj.set_lane_info('avg_qval', '% Bases >=Q30', lane_sum, str(r_num)) laneObj.set_lane_info('fc_phix', '% Error Rate', lane_sum, str(r_num)) if kwargs.get('fc_phix',{}).get(fcObj.name, {}): laneObj.phix = kwargs.get('fc_phix').get(fcObj.name).get(lane) fcObj.lanes[lane] = laneObj ## Check if the above created lane object has all needed info for k,v in vars(laneObj).items(): if not v: log.warn('Could not fetch {} for FC {} at lane {}'.format(k, fcObj.name, lane)) self.flowcells[fcObj.name] = fcObj if not self.flowcells: log.warn('There is no flowcell to process for project {}'.format(self.ngi_name)) self.missing_fc = True if sample_qval and kwargs.get('yield_from_fc'): log.info('\'yield_from_fc\' option was given so will compute the yield from collected flowcells') for sample in list(self.samples.keys()): if sample not in list(sample_qval.keys()): del self.samples[sample] ## calculate average Q30 over all lanes and flowcell max_total_reads = 0 for sample in sorted(sample_qval.keys()): try: qinfo = sample_qval[sample] total_qvalsbp, total_bases, total_reads = (0, 0, 0) for k in qinfo: total_qvalsbp += qinfo[k]['qval'] * qinfo[k]['bases'] total_bases += qinfo[k]['bases'] total_reads += qinfo[k]['reads'] avg_qval = float(total_qvalsbp)/total_bases if total_bases else float(total_qvalsbp) self.samples[sample].qscore = '{:.2f}'.format(round(avg_qval, 2)) # Sample has been sequenced and should be removed from the aborted/not sequenced list if sample in self.aborted_samples: log.info('Sample {} was sequenced, so removing it from NOT sequenced samples list'.format(sample)) del self.aborted_samples[sample] ## Get/overwrite yield from the FCs computed instead of statusDB value if total_reads: self.samples[sample].total_reads = total_reads if total_reads > max_total_reads: max_total_reads = total_reads except (TypeError, KeyError): log.error('Could not calcluate average Q30 for sample {}'.format(sample)) #Cut down total reads to bite sized numbers samples_divisor = 1 if max_total_reads > 1000: if max_total_reads > 1000000: self.samples_unit = 'Mreads' samples_divisor = 1000000 else: self.samples_unit = 'Kreads' samples_divisor = 1000 for sample in self.samples: self.samples[sample].total_reads = '{:.2f}'.format(self.samples[sample].total_reads/float(samples_divisor)) def get_library_method(self, project_name, application, library_construction_method, library_prep_option): """Get the library construction method and return as formatted string """ if application == 'Finished library': return 'Library was prepared by user.' try: lib_meth_pat = r'^(.*?),(.*?),(.*?),(.*?)[\[,](.*)$' #Input, Type, Option, Category -/, doucment number lib_head = ['Input', 'Type', 'Option', 'Category'] lib_meth = re.search(lib_meth_pat, library_construction_method) if lib_meth: lib_meth_list = lib_meth.groups()[:4] #not interested in the document number lib_list = [] for name,value in zip(lib_head, lib_meth_list): value = value.strip() #remove empty space(s) at the ends if value == 'By user': return 'Library was prepared by user.' if value and value != '-': lib_list.append('* {}: {}'.format(name, value)) return ('\n'.join(lib_list)) else: if library_prep_option: return '* Method: {}\n* Option: {}'.format(library_construction_method, library_prep_option) else: return '* Method: {}'.format(library_construction_method) except KeyError: log.error('Could not find library construction method for project {} in statusDB'.format(project_name)) return None
{ "content_hash": "3de321b65e6fd40bb21c0cb548004746", "timestamp": "", "source": "github", "line_count": 560, "max_line_length": 258, "avg_line_length": 50.72678571428571, "alnum_prop": 0.5255394797057064, "repo_name": "NationalGenomicsInfrastructure/ngi_reports", "id": "b5de82f183062da6bc5f66e92f3f7d5f9ec9b16e", "size": "28407", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ngi_reports/utils/entities.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "87274" }, { "name": "Python", "bytes": "58486" }, { "name": "TeX", "bytes": "12112" } ], "symlink_target": "" }
from __future__ import absolute_import import re from collections import deque import six class Token: def __init__(self, **kwds): self.buffer = None self.__dict__.update(kwds) def __str__(self): return self.__dict__.__str__() def regexec (regex, input): matches = regex.match(input) if matches: return (input[matches.start():matches.end()],)+matches.groups() return None class Lexer(object): RE_INPUT = re.compile(r'\r\n|\r') RE_COMMENT = re.compile(r'^ *\/\/(-)?([^\n]*)') RE_TAG = re.compile(r'^(\w[-:\w]*)') RE_FILTER = re.compile(r'^:(\w+)') RE_DOCTYPE = re.compile(r'^(?:!!!|doctype) *([^\n]+)?') RE_ID = re.compile(r'^#([\w-]+)') RE_CLASS = re.compile(r'^\.([\w-]+)') RE_TEXT = re.compile(r'^(?:\| ?)?([^\n]+)') RE_EXTENDS = re.compile(r'^extends? +([^\n]+)') RE_PREPEND = re.compile(r'^prepend +([^\n]+)') RE_APPEND = re.compile(r'^append +([^\n]+)') RE_BLOCK = re.compile(r'''^block(( +(?:(prepend|append) +)?([^\n]*))|\n)''') RE_YIELD = re.compile(r'^yield *') RE_INCLUDE = re.compile(r'^include +([^\n]+)') RE_ASSIGNMENT = re.compile(r'^(-\s+var\s+)?(\w+) += *([^;\n]+)( *;? *)') RE_MIXIN = re.compile(r'^mixin +([-\w]+)(?: *\((.*)\))?') RE_CALL = re.compile(r'^\+([-\w]+)(?: *\((.*)\))?') RE_CONDITIONAL = re.compile(r'^(?:- *)?(if|unless|else if|elif|else)\b([^\n]*)') RE_BLANK = re.compile(r'^\n *\n') # RE_WHILE = re.compile(r'^while +([^\n]+)') RE_EACH = re.compile(r'^(?:- *)?(?:each|for) +([\w, ]+) +in +([^\n]+)') RE_CODE = re.compile(r'^(!?=|-)([^\n]+)') RE_ATTR_INTERPOLATE = re.compile(r'#\{([^}]+)\}') RE_ATTR_PARSE = re.compile(r'''^['"]|['"]$''') RE_INDENT_TABS = re.compile(r'^\n(\t*) *') RE_INDENT_SPACES = re.compile(r'^\n( *)') RE_COLON = re.compile(r'^: *') # RE_ = re.compile(r'') def __init__(self,string,**options): if isinstance(string, six.binary_type): string = six.text_type(string, 'utf8') self.options = options self.input = self.RE_INPUT.sub('\n',string) self.colons = self.options.get('colons',False) self.deferredTokens = deque() self.lastIndents = 0 self.lineno = 1 self.stash = deque() self.indentStack = deque() self.indentRe = None self.pipeless = False def tok(self,type,val=None): return Token(type=type,line=self.lineno,val=val) def consume(self,len): self.input = self.input[len:] def scan(self,regexp,type): captures = regexec(regexp,self.input) # print regexp,type, self.input, captures if captures: # print captures self.consume(len(captures[0])) # print 'a',self.input if len(captures)==1: return self.tok(type,None) return self.tok(type,captures[1]) def defer(self,tok): self.deferredTokens.append(tok) def lookahead(self,n): # print self.stash fetch = n-len(self.stash) while True: fetch -=1 if not fetch>=0: break self.stash.append(self.next()) return self.stash[n-1] def indexOfDelimiters(self,start,end): str,nstart,nend,pos = self.input,0,0,0 for i,s in enumerate(str): if start == s: nstart +=1 elif end == s: nend +=1 if nend==nstart: pos = i break return pos def stashed (self): # print self.stash return len(self.stash) and self.stash.popleft() def deferred (self): return len(self.deferredTokens) and self.deferredTokens.popleft() def eos (self): # print 'eos',bool(self.input) if self.input: return if self.indentStack: self.indentStack.popleft() return self.tok('outdent') else: return self.tok('eos') def blank(self): if self.pipeless: return captures = regexec(self.RE_BLANK,self.input) if captures: self.consume(len(captures[0])-1) return self.next() def comment(self): captures = regexec(self.RE_COMMENT,self.input) if captures: self.consume(len(captures[0])) tok = self.tok('comment',captures[2]) tok.buffer = '-'!=captures[1] return tok def tag(self): captures = regexec(self.RE_TAG,self.input) # print self.input,captures,re.match('^(\w[-:\w]*)',self.input) if captures: self.consume(len(captures[0])) name = captures[1] if name.endswith(':'): name = name[:-1] tok = self.tok('tag',name) self.defer(self.tok(':')) while self.input[0]== ' ': self.input = self.input[1:] else: tok = self.tok('tag',name) return tok def filter(self): return self.scan(self.RE_FILTER, 'filter') def doctype(self): # print self.scan(self.RE_DOCTYPE, 'doctype') return self.scan(self.RE_DOCTYPE, 'doctype') def id(self): return self.scan(self.RE_ID, 'id') def className(self): return self.scan(self.RE_CLASS, 'class') def text(self): return self.scan(self.RE_TEXT, 'text') def extends(self): return self.scan(self.RE_EXTENDS, 'extends') def prepend(self): captures = regexec(self.RE_PREPEND,self.input) if captures: self.consume(len(captures[0])) mode,name = 'prepend',captures[1] tok = self.tok('block',name) tok.mode = mode return tok def append(self): captures = regexec(self.RE_APPEND,self.input) if captures: self.consume(len(captures[0])) mode,name = 'append',captures[1] tok = self.tok('block',name) tok.mode = mode return tok def block(self): captures = regexec(self.RE_BLOCK,self.input) if captures: self.consume(len(captures[0])) mode = captures[3] or 'replace' name = captures[4] or '' tok = self.tok('block',name) tok.mode = mode return tok def _yield(self): return self.scan(self.RE_YIELD, 'yield') def include(self): return self.scan(self.RE_INCLUDE, 'include') def assignment(self): captures = regexec(self.RE_ASSIGNMENT,self.input) if captures: self.consume(len(captures[0])) name,val = captures[2:4] tok = self.tok('assignment') tok.name = name tok.val = val return tok def mixin(self): captures = regexec(self.RE_MIXIN,self.input) if captures: self.consume(len(captures[0])) tok = self.tok('mixin',captures[1]) tok.args = captures[2] return tok def call(self): captures = regexec(self.RE_CALL,self.input) if captures: self.consume(len(captures[0])) tok = self.tok('call',captures[1]) tok.args = captures[2] return tok def conditional(self): captures = regexec(self.RE_CONDITIONAL,self.input) if captures: self.consume(len(captures[0])) type,sentence = captures[1:] tok = self.tok('conditional',type) tok.sentence = sentence return tok # def _while(self): # captures = regexec(self.RE_WHILE,self.input) # if captures: # self.consume(len(captures[0])) # return self.tok('code','while(%s)'%captures[1]) def each(self): captures = regexec(self.RE_EACH,self.input) if captures: self.consume(len(captures[0])) tok = self.tok('each',None) tok.keys = [x.strip() for x in captures[1].split(',')] tok.code = captures[2] return tok def code(self): captures = regexec(self.RE_CODE,self.input) if captures: self.consume(len(captures[0])) flags, name = captures[1:] tok = self.tok('code',name) tok.escape = flags.startswith('=') #print captures tok.buffer = '=' in flags # print tok.buffer return tok def attrs(self): if '(' == self.input[0]: index = self.indexOfDelimiters('(',')') string = self.input[1:index] tok = self.tok('attrs') l = len(string) colons = self.colons states = ['key'] class Namespace: key = u'' val = u'' quote = u'' literal = True def reset(self): self.key = self.val = self.quote = u'' self.literal = True def __str__(self): return dict(key=self.key,val=self.val,quote=self.quote,literal=self.literal).__str__() ns = Namespace() def state(): return states[-1] def interpolate(attr): attr, num = self.RE_ATTR_INTERPOLATE.subn(lambda matchobj:'%s+%s+%s'%(ns.quote,matchobj.group(1),ns.quote),attr) return attr, (num>0) self.consume(index+1) from .utils import odict tok.attrs = odict() tok.static_attrs = set() str_nums = list(map(str, range(10))) def parse(c): real = c if colons and ':'==c: c = '=' ns.literal = ns.literal and (state() not in ('object','array','expr')) if c in (',','\n'): s = state() if s in ('expr','array','string','object'): ns.val += c else: states.append('key') ns.val = ns.val.strip() ns.key = ns.key.strip() if not ns.key: return # ns.literal = ns.quote if not ns.literal: if '!'==ns.key[-1]: ns.literal = True ns.key = ns.key[:-1] ns.key = ns.key.strip("'\"") if not ns.val: tok.attrs[ns.key] = True else: tok.attrs[ns.key], is_interpolated = interpolate(ns.val) ns.literal = ns.literal and not is_interpolated if ns.literal: tok.static_attrs.add(ns.key) ns.reset() elif '=' == c: s = state() if s == 'key char': ns.key += real elif s in ('val','expr','array','string','object'): ns.val+= real else: states.append('val') elif '(' == c: if state() in ('val','expr'): states.append('expr') ns.val+=c elif ')' == c: if state() in ('val','expr'): states.pop() ns.val+=c elif '{' == c: if 'val'==state(): states.append('object') ns.val+=c elif '}' == c: if 'object'==state(): states.pop() ns.val+=c elif '[' == c: if 'val'==state(): states.append('array') ns.val+=c elif ']' == c: if 'array'==state(): states.pop() ns.val+=c elif c in ('"',"'"): s = state() if 'key'==s: states.append('key char') elif 'key char'==s: states.pop() elif 'string'==s: if c==ns.quote: states.pop() ns.val +=c else: states.append('string') ns.val +=c ns.quote = c elif ''== c: pass else: s = state() ns.literal = ns.literal and (s in ('key','string') or c in str_nums) # print c, s, ns.literal if s in ('key','key char'): ns.key += c else: ns.val += c for char in string: parse(char) parse(',') return tok def indent(self): if self.indentRe: captures = regexec(self.indentRe,self.input) else: regex = self.RE_INDENT_TABS captures = regexec(regex,self.input) if captures and not captures[1]: regex = self.RE_INDENT_SPACES captures = regexec(regex,self.input) if captures and captures[1]: self.indentRe = regex if captures: indents = len(captures[1]) self.lineno += 1 self.consume(indents+1) if not self.input: return self.tok('newline') if self.input[0] in (' ','\t'): raise Exception('Invalid indentation, you can use tabs or spaces but not both') if '\n' == self.input[0]: return self.tok('newline') if self.indentStack and indents< self.indentStack[0]: while self.indentStack and self.indentStack[0]>indents: self.stash.append(self.tok('outdent')) self.indentStack.popleft() tok = self.stash.pop() elif indents and (not self.indentStack or indents != self.indentStack[0]): self.indentStack.appendleft(indents) tok = self.tok('indent',indents) else: tok = self.tok('newline') return tok def pipelessText(self): if self.pipeless: if '\n' == self.input[0]: return i = self.input.find('\n') if -1 == i: i = len(self.input) str = self.input[:i] self.consume(len(str)) return self.tok('text',str) def colon(self): return self.scan(self.RE_COLON,':') def advance(self): return self.stashed() or self.next() def next(self): return self.deferred() \ or self.blank() \ or self.eos() \ or self.pipelessText() \ or self._yield() \ or self.doctype() \ or self.extends() \ or self.append() \ or self.prepend() \ or self.block() \ or self.include() \ or self.mixin() \ or self.call() \ or self.conditional() \ or self.each() \ or self.assignment() \ or self.tag() \ or self.filter() \ or self.code() \ or self.id() \ or self.className() \ or self.attrs() \ or self.indent() \ or self.comment() \ or self.colon() \ or self.text() ##or self._while() \
{ "content_hash": "fe7c39c7075294bca0104e28f6d51e66", "timestamp": "", "source": "github", "line_count": 451, "max_line_length": 128, "avg_line_length": 34.310421286031044, "alnum_prop": 0.46807548145275946, "repo_name": "glennyonemitsu/muhJade", "id": "e5d766737521880e10b77c4689fce60cb19f7689", "size": "15474", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pyjade/lexer.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "100445" }, { "name": "Shell", "bytes": "132" } ], "symlink_target": "" }
import click """ Command Line Interface for the cage package. """ # This is only used to make '-h' a shorter way to access the CLI help CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) @click.group(context_settings=CONTEXT_SETTINGS) def main(): """ Python tools for studying cage-like anions for solid electrolytes in batteries. """ pass # region * Setup @main.group(context_settings=CONTEXT_SETTINGS, short_help="Set up Nwchem calculations for the anion.") def setup(): """ Tools for setting up calculations for investigating the local cation-anion interaction around cage-like molecules. """ pass @setup.command(context_settings=CONTEXT_SETTINGS, short_help="Initial anion geometric optimization.") @click.argument('filename') @click.option('--charge', '-c', default=0) def optimize(filename, charge): """ Set up the initial anion optimization. """ from cage.cli.commands.setup import optimize optimize(filename, charge) @setup.command(context_settings=CONTEXT_SETTINGS, short_help="Set up docking sites.") @click.argument('filename') @click.option('--cation', '-C', default='Li', help="The cation to be placed on the dock, provided as a string " "of the chemical symbol, e.g. 'Li' or 'Na'.") @click.option('--distance', '-d', default=2.0) @click.option('--facets', '-f', type=str, default='tuple') @click.option('--verbose', '-v', is_flag=True) def dock(filename, cation, distance, facets, verbose): """ Set up a geometry optimization of a cation docked on a facet in NwChem for all non-equivalent facets of the anion or a list of chosen facets. It is recommended to use an anion which has first been optimized using 'cage setup optimize'. """ from cage.cli.commands.setup import docksetup if facets == 'tuple': facets = tuple else: facets = [int(number) for number in facets.split()] docksetup(filename, cation, distance, facets, verbose) @setup.command(context_settings=CONTEXT_SETTINGS) @click.argument('filename') @click.option('--cation', '-C', default='Li') @click.option('--facets', '-f', type=str, default='tuple') @click.option('--operation', '-O', default='energy') @click.option('--end_radii', '-R', default=(3.0, 6.0)) @click.option('--nradii', default=30) @click.option('--adensity', default=50) def chain(filename, cation, facets, operation, end_radii, nradii, adensity): """ Set up a 2D landscape along a chain of facets. """ from cage.cli.commands.setup import chainsetup if facets == 'tuple': facets = tuple else: facets = [int(number) for number in facets.split()] chainsetup(filename=filename, facets=facets, cation=cation, operation=operation, end_radii=end_radii, nradii=nradii, adensity=adensity) @setup.command(context_settings=CONTEXT_SETTINGS) @click.argument('filename') @click.option('--cation', '-C', default='Li') @click.option('--distance', '-d', default=2) @click.option("--facets", "-f", type=str, default="tuple") @click.option('--edges', is_flag=True) def path(filename, cation, distance, facets, edges): """ Set up the paths between facets that share a vertex. """ from cage.cli.commands.setup import pathsetup if facets == 'tuple': facets = tuple else: facets = [int(number) for number in facets.split()] pathsetup(filename, cation, distance, facets, edges) @setup.command(context_settings=CONTEXT_SETTINGS) @click.argument('paths_dir') @click.option('--nimages', '-n', default=10) def neb(paths_dir, nimages): """ Set up the nudged elastic band calculation. """ from cage.cli.commands.setup import nebsetup nebsetup(paths_dir, nimages) @setup.command(context_settings=CONTEXT_SETTINGS) @click.argument("facet_index") @click.argument("filename") @click.option("--cation", "-C", default="Li", help="Cation for which to calculate the reference energy.") @click.option("--distance", "-d", default=8.0, help="Maximum distance at which to place the cation for the " "reference energy point.") @click.option("--verbose", "-v", is_flag=True) def reference(facet_index, filename, cation, distance, verbose): """Set up a calculation to determine a reference energy.""" from cage.cli.commands.setup import reference # Convert string input to integer facet_index = int(facet_index) reference(facet_index=facet_index, filename=filename, cation=cation, end_radius=distance, verbose=verbose) @setup.command(context_settings=CONTEXT_SETTINGS) @click.argument('filename') @click.option('--cation', '-C', default='Li') @click.option('--radius', '-R', default=6.0) @click.option('--density', default=20) def sphere(filename, cation, radius, density): """ Set up a spherical landscape calculation. """ from cage.cli.commands.setup import spheresetup spheresetup(filename=filename, cation=cation, radius=radius, density=density) # endregion # region * Setup - Twocat @setup.group(context_settings=CONTEXT_SETTINGS) def twocat(): """ Set up calculations for anions with two cations. These require the results from docking calculations for single cations on the anion. """ pass @twocat.command(context_settings=CONTEXT_SETTINGS) @click.argument('dock_dir') @click.option('--cation', '-C', default='Li') @click.option('--operation', '-O', default='energy') @click.option('--endradii', '-R', default=(3, 6)) @click.option('--nradii', default=30) @click.option('--adensity', default=50) @click.option('--tolerance', default=1e-2) @click.option('--verbose', '-v', is_flag=True) def chain(dock_dir, cation, operation, endradii, nradii, adensity, tolerance, verbose): """ Similar to the single cation case, this command sets up a 2D landscape between the normals of a chain of non-equivalent facets. """ from cage.cli.commands.setup import twocat_chainsetup twocat_chainsetup(dock_dir, cation, operation, endradii, nradii, adensity, tolerance, verbose) # endregion # region * Analyze @main.group(context_settings=CONTEXT_SETTINGS, short_help="Analyze the results from NwChem calculations.") def analyze(): """ Scripts to help analyze the output of several calculations to quickly visualize results. """ pass @analyze.command(context_settings=CONTEXT_SETTINGS) @click.argument('lands_dir') @click.option('--cation', '-C', default='Li') @click.option('--energy_range', '-E', default=(0.0, 0.0)) @click.option('--interp_mesh', '-I', default=(0.03, 0.01)) @click.option('--end_radii', '-R', default=(0.0, 0.0)) @click.option('--contour_levels', '-l', default=0.1) @click.option('--verbose', '-v', is_flag=True) @click.option("--coulomb_charge", "-c", default=0) @click.option("--reference_energy", "-r", default=0.0) @click.option("--interp_method", "-m", default="griddata") def landscape(lands_dir, cation, energy_range, interp_mesh, end_radii, contour_levels, verbose, coulomb_charge, reference_energy, interp_method): """ Analyze the landscape data. """ from cage.cli.commands.analyze import landscape_analysis if reference_energy == 0.0: reference_energy = None landscape_analysis(directory=lands_dir, cation=cation, energy_range=energy_range, interp_mesh=interp_mesh, end_radii=end_radii, contour_levels=contour_levels, verbose=verbose, coulomb_charge=coulomb_charge, reference_energy=reference_energy, interp_method=interp_method, set_contour_levels_manually=True) @analyze.command(context_settings=CONTEXT_SETTINGS) @click.argument('lands_dir') @click.option('--cation', '-C', default='Li') @click.option('--interp_mesh', '-I', default=(0.03, 0.01)) @click.option('--end_radii', '-R', default=(0.0, 0.0)) @click.option('--verbose', '-v', is_flag=True) @click.option("--coulomb_charge", "-c", default=0) @click.option("--reference_energy", "-r", default=0.0) def barrier(lands_dir, cation, interp_mesh, end_radii, verbose, coulomb_charge, reference_energy): """ Analyze the barriers in landscape data. """ from cage.cli.commands.analyze import barrier_analysis if reference_energy == 0.0: reference_energy = None barrier_analysis(lands_dir=lands_dir, cation=cation, interp_mesh=interp_mesh, end_radii=end_radii, verbose=verbose, coulomb=coulomb_charge, reference_energy=reference_energy) @analyze.command(context_settings=CONTEXT_SETTINGS) @click.argument("reference_dir") @click.option("--coulomb_charge", "-c", default=0) def reference(reference_dir, coulomb_charge): from cage.cli.commands.analyze import reference reference(reference_dir, coulomb_charge) @analyze.command(context_settings=CONTEXT_SETTINGS) @click.argument('lands_dir') @click.option('--cation', '-C', default='Li') @click.option('--energy_range', '-E', default=(0.0, 0.0)) @click.option('--interp_mesh', '-I', default=(0.01, 0.01)) @click.option('--contour_levels', '-l', default=0.1) @click.option("--reference_energy", "-r", default=0.0) @click.option("--interp_method", "-m", default="griddata") def sphere(lands_dir, cation, energy_range, interp_mesh, contour_levels, reference_energy, interp_method): """ Analyze the landscape data. """ from cage.cli.commands.analyze import sphere_analysis if reference_energy == 0.0: reference_energy = None sphere_analysis(directory=lands_dir, cation=cation, interp_mesh=interp_mesh, energy_range=energy_range, contour_levels=contour_levels, reference_energy=reference_energy, interp_method=interp_method) # endregion # region * Util @main.group(context_settings=CONTEXT_SETTINGS) def util(): """ A set of utility scripts for the cage package. """ pass @util.command(context_settings=CONTEXT_SETTINGS) @click.argument('output_file') def geo(output_file): """ Write the initial and final geometry of a nwchem optimization. """ from cage.cli.commands.util import geo geo(output_file=output_file) @util.command(context_settings=CONTEXT_SETTINGS) @click.argument('output_file') def energy(output_file): from cage.cli.commands.util import energy energy(output_file=output_file) @util.command(context_settings=CONTEXT_SETTINGS) @click.argument('output_file') def check(output_file): """ Check the output of calculations. """ from cage.cli.commands.util import check_calculation check_calculation(output=output_file) @util.command(context_settings=CONTEXT_SETTINGS) @click.argument('output_file') def process(output_file): """ Process the output of calculations. """ from cage.cli.commands.util import process_output process_output(output=output_file) @util.command(context_settings=CONTEXT_SETTINGS) @click.argument('directory') def gather(directory): """ Gather the results of a landscape calculation. """ from cage.cli.commands.util import gather_landscape gather_landscape(directory=directory) @util.command(context_settings=CONTEXT_SETTINGS) @click.argument('filename') def visualize(filename): """ Visualize the facets of a molecule. """ from cage.cli.commands.util import visualize_facets visualize_facets(filename=filename) # endregion # region * Workflow @main.group(context_settings=CONTEXT_SETTINGS) def workflow(): """ Workflow setup scripts. """ pass @workflow.command(context_settings=CONTEXT_SETTINGS) @click.argument('filename') @click.option('--cation', '-C', default='Li') @click.option('--facets', '-f', type=str, default='tuple') @click.option('--operation', '-O', default='energy') @click.option('--end_radii', '-R', default=(3.0, 6.0)) @click.option('--nradii', default=30) @click.option('--adensity', default=50) def landscape(filename, cation, facets, operation, end_radii, nradii, adensity): """ Set up a 2D landscape along a chain of facets. """ from cage.workflow import landscape_workflow if facets == 'tuple': facets = tuple else: facets = [int(number) for number in facets.split()] landscape_workflow(filename=filename, facets=facets, cation=cation, operation=operation, end_radii=end_radii, nradii=nradii, adensity=adensity) @workflow.command(context_settings=CONTEXT_SETTINGS) @click.argument('filename') @click.option('--cation', '-C', default='Li') @click.option("--radius", '-R', default=6.0) @click.option('--density', default=20) def sphere(filename, cation, radius, density): """ Set up a 2D spherical landscape. """ from cage.workflow import sphere_workflow sphere_workflow(filename=filename, cation=cation, radius=radius, density=density) @workflow.command(context_settings=CONTEXT_SETTINGS, short_help="Anion geometric optimization.") @click.argument('filename') @click.option('--charge', '-c', default=0) def optimize(filename, charge): """ Set up the initial anion optimization. """ from cage.workflow import optimize_workflow optimize_workflow(filename, charge) @workflow.command(context_settings=CONTEXT_SETTINGS) def test(): """ Testing if I can get workflows to work on quartz... Returns: """ from cage.workflow import test_workflow test_workflow() # endregion workflow
{ "content_hash": "b83e0190874b3156b368a8877fe1e3a6", "timestamp": "", "source": "github", "line_count": 448, "max_line_length": 79, "avg_line_length": 31.747767857142858, "alnum_prop": 0.6491598115728046, "repo_name": "mbercx/cage", "id": "54b5e391e11c167e4d5377ebf2dfb4ec33bdb5ce", "size": "14223", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cage/cli/cli.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "242454" } ], "symlink_target": "" }
import os import django # Django settings for conf project. settings_dir = os.path.dirname(__file__) PROJECT_ROOT = os.path.abspath(os.path.dirname(settings_dir)) DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', '[email protected]'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME': 'test.sqlite', # Or path to database file if using sqlite3. } } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/New_York' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = '' # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = '7@m$nx@q%-$la^fy_(-rhxtvoxk118hrprg=q86f(@k*6^^vf8' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'conf.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'conf.wsgi.application' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(PROJECT_ROOT, 'templates/'), ) TEMPLATE_CONTEXT_PROCESSORS = ( # default template context processors 'django.core.context_processors.debug', 'django.core.context_processors.request', 'django.core.context_processors.media', 'django.core.context_processors.static', 'django.contrib.auth.context_processors.auth', ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.admin', ) if django.VERSION[1] < 7: INSTALLED_APPS += ( 'south', ) INSTALLED_APPS += ( 'addendum', ) #MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',) #INSTALLED_APPS += ('debug_toolbar',) INTERNAL_IPS = ('127.0.0.1',) DEBUG_TOOLBAR_CONFIG = { 'INTERCEPT_REDIRECTS': False, 'TAG': 'body', } DEBUG_TOOLBAR_PANELS = ( 'debug_toolbar.panels.version.VersionDebugPanel', 'debug_toolbar.panels.timer.TimerDebugPanel', 'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel', 'debug_toolbar.panels.headers.HeaderDebugPanel', 'debug_toolbar.panels.request_vars.RequestVarsDebugPanel', 'debug_toolbar.panels.template.TemplateDebugPanel', 'debug_toolbar.panels.sql.SQLDebugPanel', 'debug_toolbar.panels.signals.SignalDebugPanel', 'debug_toolbar.panels.logger.LoggingPanel', ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
{ "content_hash": "dbbf35a7a5b63f25d2878673c590dd52", "timestamp": "", "source": "github", "line_count": 196, "max_line_length": 108, "avg_line_length": 32.015306122448976, "alnum_prop": 0.7024701195219123, "repo_name": "adw0rd/django-addendum-inline", "id": "fe406d63546f099a9e917adc074ef2cacf8aecff", "size": "6275", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "example/conf/settings.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "HTML", "bytes": "5523" }, { "name": "Python", "bytes": "36743" } ], "symlink_target": "" }
from django.conf import settings from django.core import exceptions from django.utils.importlib import import_module CLASS_PATH_ERROR = 'django-shop is unable to interpret settings value for %s. '\ '%s should be in the form of a tupple: '\ '(\'path.to.models.Class\', \'app_label\').' def load_class(class_path, setting_name=None): """ Loads a class given a class_path. The setting value may be a string or a tuple. The setting_name parameter is only there for pretty error output, and therefore is optional """ if not isinstance(class_path, basestring): try: class_path, app_label = class_path except: if setting_name: raise exceptions.ImproperlyConfigured(CLASS_PATH_ERROR % ( setting_name, setting_name)) else: raise exceptions.ImproperlyConfigured(CLASS_PATH_ERROR % ( 'this setting', 'It')) try: class_module, class_name = class_path.rsplit('.', 1) except ValueError: if setting_name: txt = '%s isn\'t a valid module. Check your %s setting' % ( class_path, setting_name) else: txt = '%s isn\'t a valid module.' % class_path raise exceptions.ImproperlyConfigured(txt) try: mod = import_module(class_module) except ImportError, e: if setting_name: txt = 'Error importing backend %s: "%s". Check your %s setting' % ( class_module, e, setting_name) else: txt = 'Error importing backend %s: "%s".' % (class_module, e) raise exceptions.ImproperlyConfigured(txt) try: clazz = getattr(mod, class_name) except AttributeError: if setting_name: txt = ('Backend module "%s" does not define a "%s" class. Check' ' your %s setting' % (class_module, class_name, setting_name)) else: txt = 'Backend module "%s" does not define a "%s" class.' % ( class_module, class_name) raise exceptions.ImproperlyConfigured(txt) return clazz def get_model_string(model_name): """ Returns the model string notation Django uses for lazily loaded ForeignKeys (eg 'auth.User') to prevent circular imports. This is needed to allow our crazy custom model usage. """ setting_name = 'SHOP_%s_MODEL' % model_name.upper().replace('_', '') class_path = getattr(settings, setting_name, None) if not class_path: return 'shop.%s' % model_name elif isinstance(class_path, basestring): parts = class_path.split('.') try: index = parts.index('models') - 1 except ValueError, e: raise exceptions.ImproperlyConfigured(CLASS_PATH_ERROR % ( setting_name, setting_name)) app_label, model_name = parts[index], parts[-1] else: try: class_path, app_label = class_path model_name = class_path.split('.')[-1] except: raise exceptions.ImproperlyConfigured(CLASS_PATH_ERROR % ( setting_name, setting_name)) return '%s.%s' % (app_label, model_name)
{ "content_hash": "301709dbca78193c02c4dc86187584f6", "timestamp": "", "source": "github", "line_count": 92, "max_line_length": 80, "avg_line_length": 35.59782608695652, "alnum_prop": 0.5804580152671756, "repo_name": "thenewguy/django-shop", "id": "2b6358e9fe0cea95ce9169a714f8c79225b3a6ad", "size": "3298", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "shop/util/loader.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "362160" }, { "name": "Shell", "bytes": "916" } ], "symlink_target": "" }
from gevent import monkey monkey.patch_all() import sys import click from bottle import static_file, Bottle, run, view from bottle import TEMPLATE_PATH as T from bottle.ext.websocket import GeventWebSocketServer from bottle.ext.auth.decorator import login from gevent.pywsgi import WSGIServer from geventwebsocket.handler import WebSocketHandler from beaker.middleware import SessionMiddleware from mining.controllers.api import api_app from mining.controllers.data import data_app from mining.utils import conf from mining.auth import auth from mining.settings import TEMPLATE_PATH, STATIC_PATH from mining.celeryc import celery_app from mining.bin.scheduler import scheduler_app from mining.bin.demo.build_admin import build reload(sys) sys.setdefaultencoding('utf-8') T.insert(0, TEMPLATE_PATH) session_opts = { 'session.type': 'file', 'session.data_dir': '/tmp/openmining.data', 'session.lock_dir': '/tmp/openmining.lock', 'session.cookie_expires': 50000, 'session.auto': True } app = SessionMiddleware(Bottle(), session_opts) app.wrap_app.mount('/api', api_app) app.wrap_app.mount('/data', data_app) app.wrap_app.install(auth) @app.wrap_app.route('/assets/<path:path>', name='assets') def static(path): yield static_file(path, root=STATIC_PATH) @app.wrap_app.route('/') @login() @view('index.html') def index(): return {'get_url': app.wrap_app.get_url, 'protocol': conf('openmining')['protocol'], 'lang': conf('openmining')['lang']} @app.wrap_app.route('/login') @view('login.html') def login(): return {'get_url': app.wrap_app.get_url, 'lang': conf('openmining')['lang']} @click.group() def cmds(): pass @cmds.command() @click.option('--port', type=int, help=u'Set application server port!') @click.option('--ip', type=str, help=u'Set application server ip!') @click.option('--debug', default=False, help=u'Set application server debug!') def runserver(port, ip, debug): if debug is None: server = WSGIServer((ip, port), app, handler_class=WebSocketHandler) server.serve_forever() click.echo(u'OpenMining start server at: {}:{}'.format(ip, port)) run(app=app, host=ip, port=port, debug=debug, reloader=True, server=GeventWebSocketServer) @cmds.command() @click.option('--concurrency', type=int, default=4, help="""Number of child processes processing the queue. The default is the number of CPUs available on your system.""") def celery(concurrency): click.echo(u'OpenMining start tasks') args = ["celery", "worker", "--concurrency={}".format(concurrency)] celery_app.start(args) @cmds.command() def scheduler(): click.echo(u'OpenMining start scheduler') scheduler_app() @cmds.command() @click.option('--level', type=int, default=0, help="What level of data volume?") def build_demo(level): click.echo(u'OpenMining load demo system') build(level) if __name__ == "__main__": default_map = {"runserver": conf('openmining')} default_map["runserver"]["debug"] = False cmds(default_map=default_map)
{ "content_hash": "c8337d4700eda385c5d33ba9ff3db78a", "timestamp": "", "source": "github", "line_count": 116, "max_line_length": 76, "avg_line_length": 27.025862068965516, "alnum_prop": 0.6883572567783094, "repo_name": "AndrzejR/mining", "id": "5dd64ba67de1b1ede48710cf7fafc04d5665b225", "size": "3181", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "manage.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "723" }, { "name": "Python", "bytes": "85228" }, { "name": "Shell", "bytes": "809" } ], "symlink_target": "" }
import xyz class Gettext(xyz.Package): pkg_name = 'gettext' configure = xyz.Package.host_lib_configure rules = Gettext
{ "content_hash": "07d8283ca7f1b7905c418cc18e1976d1", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 46, "avg_line_length": 18.428571428571427, "alnum_prop": 0.7131782945736435, "repo_name": "BreakawayConsulting/xyz", "id": "aca89f87b5073edc45e1185d5d8f37c51b580380", "size": "129", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "rules/gettext.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "58183" } ], "symlink_target": "" }
import asyncio import diffusion from diffusion.features.control.metrics.topic_metrics import TopicMetricCollectorBuilder server_url = "ws://localhost:8080" principal = "admin" credentials = diffusion.Credentials("password") # Because Python SDK for Diffusion is async, all the code needs to be # wrapped inside a coroutine function, and executed using asyncio.run. async def main(): # Runs the session metric collector example. # creating the session async with diffusion.Session( url=server_url, principal=principal, credentials=credentials ) as session: metrics = session.metrics topic_selector = "selector" try: print( f"""\ Adding the topic metric collector 'Test' with topic selector '{topic_selector}'. """ ) collector = ( TopicMetricCollectorBuilder() .group_by_topic_type(True) .maximum_groups(10) .group_by_path_prefix_parts(55) .create("Test", topic_selector) ) await metrics.put_topic_metric_collector(collector) print(f"Topic metric collector '{collector.name}' added.") except Exception as ex: print(f"Failed to add topic metric collector : {ex}.") return try: print("The following topic metric collectors exist:") list_topic_metric_collectors = await metrics.list_topic_metric_collectors() for topic_metric_collector in list_topic_metric_collectors: print( f"Name: '{topic_metric_collector.name}', " f"Maximum Groups: {topic_metric_collector.maximum_groups}, " f"Topic selector: '{topic_metric_collector.topic_selector}', " f"Group By Path Prefix Parts: {topic_metric_collector.group_by_path_prefix_parts}, " f"Exports to Prometheus: '{topic_metric_collector.exports_to_prometheus}', " f"Groups by topic type: '{topic_metric_collector.groups_by_topic_type}'" ) except Exception as ex: print(f"Failed to list topic metric collectors : {ex}.") return try: await metrics.remove_topic_metric_collector(collector.name) print(f"Collector '{collector.name}' removed.") except Exception as ex: print(f"Failed to remove topic metric collector : {ex}.") if __name__ == "__main__": asyncio.run(main())
{ "content_hash": "59315eff64c6d740d4fb91688fc1d367", "timestamp": "", "source": "github", "line_count": 70, "max_line_length": 104, "avg_line_length": 36.67142857142857, "alnum_prop": 0.5987534086482275, "repo_name": "pushtechnology/diffusion-examples", "id": "77edaa0187d8fce93164a1b8a9000958337da7de", "size": "3063", "binary": false, "copies": "1", "ref": "refs/heads/6.8", "path": "python/examples/metrics/topic_metrics.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "300206" }, { "name": "C#", "bytes": "316143" }, { "name": "HTML", "bytes": "3414" }, { "name": "Java", "bytes": "294854" }, { "name": "JavaScript", "bytes": "74740" }, { "name": "Makefile", "bytes": "15236" }, { "name": "Objective-C", "bytes": "180172" }, { "name": "Python", "bytes": "26889" }, { "name": "Swift", "bytes": "17428" }, { "name": "TypeScript", "bytes": "75546" } ], "symlink_target": "" }
from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['deprecated'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: katello short_description: Manage Katello Resources deprecated: removed_in: "2.12" why: "Replaced by re-designed individual modules living at https://github.com/theforeman/foreman-ansible-modules" alternative: https://github.com/theforeman/foreman-ansible-modules description: - Allows the management of Katello resources inside your Foreman server. version_added: "2.3" author: - Eric D Helms (@ehelms) requirements: - nailgun >= 0.28.0 - python >= 2.6 - datetime options: server_url: description: - URL of Foreman server. required: true username: description: - Username on Foreman server. required: true password: description: - Password for user accessing Foreman server. required: true entity: description: - The Foreman resource that the action will be performed on (e.g. organization, host). choices: - repository - manifest - repository_set - sync_plan - content_view - lifecycle_environment - activation_key - product required: true action: description: - action associated to the entity resource to set or edit in dictionary format. - Possible Action in relation to Entitys. - "sync (available when entity=product or entity=repository)" - "publish (available when entity=content_view)" - "promote (available when entity=content_view)" choices: - sync - publish - promote required: false params: description: - Parameters associated to the entity resource and action, to set or edit in dictionary format. - Each choice may be only available with specific entitys and actions. - "Possible Choices are in the format of param_name ([entry,action,action,...],[entity,..],...)." - The action "None" means no action specified. - Possible Params in relation to entity and action. - "name ([product,sync,None], [repository,sync], [repository_set,None], [sync_plan,None]," - "[content_view,promote,publish,None], [lifecycle_environment,None], [activation_key,None])" - "organization ([product,sync,None] ,[repository,sync,None], [repository_set,None], [sync_plan,None], " - "[content_view,promote,publish,None], [lifecycle_environment,None], [activation_key,None])" - "content ([manifest,None])" - "product ([repository,sync,None], [repository_set,None], [sync_plan,None])" - "basearch ([repository_set,None])" - "releaserver ([repository_set,None])" - "sync_date ([sync_plan,None])" - "interval ([sync_plan,None])" - "repositories ([content_view,None])" - "from_environment ([content_view,promote])" - "to_environment([content_view,promote])" - "prior ([lifecycle_environment,None])" - "content_view ([activation_key,None])" - "lifecycle_environment ([activation_key,None])" required: true task_timeout: description: - The timeout in seconds to wait for the started Foreman action to finish. - If the timeout is reached and the Foreman action did not complete, the ansible task fails. However the foreman action does not get canceled. default: 1000 version_added: "2.7" required: false verify_ssl: description: - verify the ssl/https connection (e.g for a valid certificate) default: false type: bool required: false ''' EXAMPLES = ''' --- # Simple Example: - name: Create Product katello: username: admin password: admin server_url: https://fakeserver.com entity: product params: name: Centos 7 delegate_to: localhost # Abstraction Example: # katello.yml --- - name: "{{ name }}" katello: username: admin password: admin server_url: https://fakeserver.com entity: "{{ entity }}" params: "{{ params }}" delegate_to: localhost # tasks.yml --- - include: katello.yml vars: name: Create Dev Environment entity: lifecycle_environment params: name: Dev prior: Library organization: Default Organization - include: katello.yml vars: name: Create Centos Product entity: product params: name: Centos 7 organization: Default Organization - include: katello.yml vars: name: Create 7.2 Repository entity: repository params: name: Centos 7.2 product: Centos 7 organization: Default Organization content_type: yum url: http://mirror.centos.org/centos/7/os/x86_64/ - include: katello.yml vars: name: Create Centos 7 View entity: content_view params: name: Centos 7 View organization: Default Organization repositories: - name: Centos 7.2 product: Centos 7 - include: katello.yml vars: name: Enable RHEL Product entity: repository_set params: name: Red Hat Enterprise Linux 7 Server (RPMs) product: Red Hat Enterprise Linux Server organization: Default Organization basearch: x86_64 releasever: 7 - include: katello.yml vars: name: Promote Contentview Environment with longer timeout task_timeout: 10800 entity: content_view action: promote params: name: MyContentView organization: MyOrganisation from_environment: Testing to_environment: Production # Best Practices # In Foreman, things can be done in parallel. # When a conflicting action is already running, # the task will fail instantly instead of waiting for the already running action to complete. # So you should use a "until success" loop to catch this. - name: Promote Contentview Environment with increased Timeout katello: username: ansibleuser password: supersecret task_timeout: 10800 entity: content_view action: promote params: name: MyContentView organization: MyOrganisation from_environment: Testing to_environment: Production register: task_result until: task_result is success retries: 9 delay: 120 ''' RETURN = '''# ''' import datetime import os import traceback try: from nailgun import entities, entity_fields, entity_mixins from nailgun.config import ServerConfig HAS_NAILGUN_PACKAGE = True except Exception: HAS_NAILGUN_PACKAGE = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native class NailGun(object): def __init__(self, server, entities, module, task_timeout): self._server = server self._entities = entities self._module = module entity_mixins.TASK_TIMEOUT = task_timeout def find_organization(self, name, **params): org = self._entities.Organization(self._server, name=name, **params) response = org.search(set(), {'search': 'name={0}'.format(name)}) if len(response) == 1: return response[0] else: self._module.fail_json(msg="No organization found for %s" % name) def find_lifecycle_environment(self, name, organization): org = self.find_organization(organization) lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=name, organization=org) response = lifecycle_env.search() if len(response) == 1: return response[0] else: self._module.fail_json(msg="No Lifecycle Found found for %s" % name) def find_product(self, name, organization): org = self.find_organization(organization) product = self._entities.Product(self._server, name=name, organization=org) response = product.search() if len(response) == 1: return response[0] else: self._module.fail_json(msg="No Product found for %s" % name) def find_repository(self, name, product, organization): product = self.find_product(product, organization) repository = self._entities.Repository(self._server, name=name, product=product) repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization) repository.organization = product.organization response = repository.search() if len(response) == 1: return response[0] else: self._module.fail_json(msg="No Repository found for %s" % name) def find_content_view(self, name, organization): org = self.find_organization(organization) content_view = self._entities.ContentView(self._server, name=name, organization=org) response = content_view.search() if len(response) == 1: return response[0] else: self._module.fail_json(msg="No Content View found for %s" % name) def organization(self, params): name = params['name'] del params['name'] org = self.find_organization(name, **params) if org: org = self._entities.Organization(self._server, name=name, id=org.id, **params) org.update() else: org = self._entities.Organization(self._server, name=name, **params) org.create() return True def manifest(self, params): org = self.find_organization(params['organization']) params['organization'] = org.id try: file = open(os.getcwd() + params['content'], 'r') content = file.read() finally: file.close() manifest = self._entities.Subscription(self._server) try: manifest.upload( data={'organization_id': org.id}, files={'content': content} ) return True except Exception as e: if "Import is the same as existing data" in e.message: return False else: self._module.fail_json(msg="Manifest import failed with %s" % to_native(e), exception=traceback.format_exc()) def product(self, params): org = self.find_organization(params['organization']) params['organization'] = org.id product = self._entities.Product(self._server, **params) response = product.search() if len(response) == 1: product.id = response[0].id product.update() else: product.create() return True def sync_product(self, params): org = self.find_organization(params['organization']) product = self.find_product(params['name'], org.name) return product.sync() def repository(self, params): product = self.find_product(params['product'], params['organization']) params['product'] = product.id del params['organization'] repository = self._entities.Repository(self._server, **params) repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization) repository.organization = product.organization response = repository.search() if len(response) == 1: repository.id = response[0].id repository.update() else: repository.create() return True def sync_repository(self, params): org = self.find_organization(params['organization']) repository = self.find_repository(params['name'], params['product'], org.name) return repository.sync() def repository_set(self, params): product = self.find_product(params['product'], params['organization']) del params['product'] del params['organization'] if not product: return False else: reposet = self._entities.RepositorySet(self._server, product=product, name=params['name']) reposet = reposet.search()[0] formatted_name = [params['name'].replace('(', '').replace(')', '')] formatted_name.append(params['basearch']) if 'releasever' in params: formatted_name.append(params['releasever']) formatted_name = ' '.join(formatted_name) repository = self._entities.Repository(self._server, product=product, name=formatted_name) repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization) repository.organization = product.organization repository = repository.search() if len(repository) == 0: if 'releasever' in params: reposet.enable(data={'basearch': params['basearch'], 'releasever': params['releasever']}) else: reposet.enable(data={'basearch': params['basearch']}) return True def sync_plan(self, params): org = self.find_organization(params['organization']) params['organization'] = org.id params['sync_date'] = datetime.datetime.strptime(params['sync_date'], "%H:%M") products = params['products'] del params['products'] sync_plan = self._entities.SyncPlan( self._server, name=params['name'], organization=org ) response = sync_plan.search() sync_plan.sync_date = params['sync_date'] sync_plan.interval = params['interval'] if len(response) == 1: sync_plan.id = response[0].id sync_plan.update() else: response = sync_plan.create() sync_plan.id = response[0].id if products: ids = [] for name in products: product = self.find_product(name, org.name) ids.append(product.id) sync_plan.add_products(data={'product_ids': ids}) return True def content_view(self, params): org = self.find_organization(params['organization']) content_view = self._entities.ContentView(self._server, name=params['name'], organization=org) response = content_view.search() if len(response) == 1: content_view.id = response[0].id content_view.update() else: content_view = content_view.create() if params['repositories']: repos = [] for repository in params['repositories']: repository = self.find_repository(repository['name'], repository['product'], org.name) repos.append(repository) content_view.repository = repos content_view.update(['repository']) def find_content_view_version(self, name, organization, environment): env = self.find_lifecycle_environment(environment, organization) content_view = self.find_content_view(name, organization) content_view_version = self._entities.ContentViewVersion(self._server, content_view=content_view) response = content_view_version.search(['content_view'], {'environment_id': env.id}) if len(response) == 1: return response[0] else: self._module.fail_json(msg="No Content View version found for %s" % response) def publish(self, params): content_view = self.find_content_view(params['name'], params['organization']) return content_view.publish() def promote(self, params): to_environment = self.find_lifecycle_environment(params['to_environment'], params['organization']) version = self.find_content_view_version(params['name'], params['organization'], params['from_environment']) data = {'environment_id': to_environment.id} return version.promote(data=data) def lifecycle_environment(self, params): org = self.find_organization(params['organization']) prior_env = self.find_lifecycle_environment(params['prior'], params['organization']) lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=params['name'], organization=org, prior=prior_env) response = lifecycle_env.search() if len(response) == 1: lifecycle_env.id = response[0].id lifecycle_env.update() else: lifecycle_env.create() return True def activation_key(self, params): org = self.find_organization(params['organization']) activation_key = self._entities.ActivationKey(self._server, name=params['name'], organization=org) response = activation_key.search() if len(response) == 1: activation_key.id = response[0].id activation_key.update() else: activation_key.create() if params['content_view']: content_view = self.find_content_view(params['content_view'], params['organization']) lifecycle_environment = self.find_lifecycle_environment(params['lifecycle_environment'], params['organization']) activation_key.content_view = content_view activation_key.environment = lifecycle_environment activation_key.update() return True def main(): module = AnsibleModule( argument_spec=dict( server_url=dict(type='str', required=True), username=dict(type='str', required=True, no_log=True), password=dict(type='str', required=True, no_log=True), entity=dict(type='str', required=True, choices=['repository', 'manifest', 'repository_set', 'sync_plan', 'content_view', 'lifecycle_environment', 'activation_key', 'product']), action=dict(type='str', choices=['sync', 'publish', 'promote']), verify_ssl=dict(type='bool', default=False), task_timeout=dict(type='int', default=1000), params=dict(type='dict', required=True, no_log=True), ), supports_check_mode=True, ) if not HAS_NAILGUN_PACKAGE: module.fail_json(msg="Missing required nailgun module (check docs or install with: pip install nailgun") server_url = module.params['server_url'] username = module.params['username'] password = module.params['password'] entity = module.params['entity'] action = module.params['action'] params = module.params['params'] verify_ssl = module.params['verify_ssl'] task_timeout = module.params['task_timeout'] server = ServerConfig( url=server_url, auth=(username, password), verify=verify_ssl ) ng = NailGun(server, entities, module, task_timeout) # Lets make an connection to the server with username and password try: org = entities.Organization(server) org.search() except Exception as e: module.fail_json(msg="Failed to connect to Foreman server: %s " % e) result = False if entity == 'product': if action == 'sync': result = ng.sync_product(params) else: result = ng.product(params) elif entity == 'repository': if action == 'sync': result = ng.sync_repository(params) else: result = ng.repository(params) elif entity == 'manifest': result = ng.manifest(params) elif entity == 'repository_set': result = ng.repository_set(params) elif entity == 'sync_plan': result = ng.sync_plan(params) elif entity == 'content_view': if action == 'publish': result = ng.publish(params) elif action == 'promote': result = ng.promote(params) else: result = ng.content_view(params) elif entity == 'lifecycle_environment': result = ng.lifecycle_environment(params) elif entity == 'activation_key': result = ng.activation_key(params) else: module.fail_json(changed=False, result="Unsupported entity supplied") module.exit_json(changed=result, result="%s updated" % entity) if __name__ == '__main__': main()
{ "content_hash": "9e8b35209ac923c0655acd018d7055f3", "timestamp": "", "source": "github", "line_count": 615, "max_line_length": 154, "avg_line_length": 33.458536585365856, "alnum_prop": 0.6076687563784808, "repo_name": "thaim/ansible", "id": "e0974201884eb0ba5e6f7e27cc4af869e1dd002f", "size": "20773", "binary": false, "copies": "19", "ref": "refs/heads/fix-broken-link", "path": "lib/ansible/modules/remote_management/foreman/_katello.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "7" }, { "name": "Shell", "bytes": "246" } ], "symlink_target": "" }
from fudge.inspector import * def test_import_all(): assert "arg" in globals()
{ "content_hash": "de9433820dd7b84cbf5e985b23853253", "timestamp": "", "source": "github", "line_count": 4, "max_line_length": 29, "avg_line_length": 20.75, "alnum_prop": 0.6987951807228916, "repo_name": "fudge-py/fudge", "id": "6df21b4ae236be341d588e64fb64a96d6c6786a3", "size": "84", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "fudge/tests/test_inspector_import_all.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "2170" }, { "name": "HTML", "bytes": "514" }, { "name": "JavaScript", "bytes": "25377" }, { "name": "Python", "bytes": "149202" }, { "name": "Shell", "bytes": "1910" } ], "symlink_target": "" }
ENRICH_METADATA_OPTION = 'enrich_metadata' BASE_METADATA_QUERY_FILENAME = 'base_metadata_query_filename' REFRESH_OPTION = 'refresh_metadata_tables' ROW_COUNT_OPTION = 'sync_row_counts' # Metadata config keys ASSET_NAME_KEY = 'name' TABLE_CONTAINER_DEF_KEY = 'table_container_def' TABLE_DEF_KEY = 'table_def' COLUMN_DEF_KEY = 'column_def' # Base Metadata fields TABLE_TYPE_KEY = 'type' VIEW_TYPE_VALUE = 'view' # Metadata enrich attributes keys METADATA_ENRICH_ENTRY_PREFIX = 'entry_prefix' METADATA_ENRICH_ENTRY_ID_PATTERN_FOR_PREFIX = 'entry_id_pattern_for_prefix' # Metadata scrape base entries BASE_ENTRIES_KEY = 'base_entries' # Metadata scrape sql objects SQL_OBJECTS_KEY = 'sql_objects' SQL_OBJECT_ITEM_NAME = 'name' SQL_OBJECT_TYPE = 'type' SQL_OBJECT_NAME = 'name' SQL_OBJECT_ITEM_QUERY_KEY = 'query' SQL_OBJECT_ITEM_QUERY_FILENAME_PREFIX = 'query' SQL_OBJECT_ITEM_QUERY_FILENAME_SUFFIX = 'sql_object.sql' SQL_OBJECT_ITEM_METADATA_DEF_KEY = 'metadata_def' SQL_OBJECT_ITEM_METADATA_DEF_FILENAME_PREFIX = 'metadata_definition' SQL_OBJECT_ITEM_METADATA_DEF_FILENAME_SUFFIX = 'sql_object.json' SQL_OBJECT_ITEM_ENABLED_FLAG = 'enabled' # Metadata scrape sql objects items SQL_OBJECT_ITEMS_KEY = 'items' # Metadata scrape sql objects model types SQL_OBJECT_TAG_MODEL = 'tag' SQL_OBJECT_ENTRY_MODEL = 'entry' # Metadata scrape sql objects field types SQL_OBJECT_DOUBLE_FIELD = 'double' SQL_OBJECT_STRING_FIELD = 'string' SQL_OBJECT_BOOLEAN_FIELD = 'bool' SQL_OBJECT_TIMESTAMP_FIELD = 'timestamp' # Metadata scrape sql objects fields SQL_OBJECT_FIELDS = 'fields' SQL_OBJECT_FIELD_TARGET = 'target' SQL_OBJECT_FIELD_TARGET_DEFINITION = 'definition' SQL_OBJECT_FIELD_TARGET_NAME = 'field_name' SQL_OBJECT_FIELD_TARGET_MODEL = 'model' SQL_OBJECT_FIELD_TARGET_TYPE = 'type' # SQL Objects config tag fields names SQL_OBJECT_CONFIG_FIELD_NAME = 'name' SQL_OBJECT_CONFIG_FIELD_PURPOSE = 'purpose' SQL_OBJECT_CONFIG_FIELD_INPUTS = 'inputs' SQL_OBJECT_CONFIG_FIELD_OUTPUTS = 'outputs' # Metadata scrape sql objects entry pre defined field SQL_OBJECT_ENTRY_CREATE_TIME = 'create_time' SQL_OBJECT_ENTRY_UPDATE_TIME = 'update_time' SQL_OBJECT_ENTRY_DESCRIPTION = 'description'
{ "content_hash": "549813740a77277377a73b55848d9406", "timestamp": "", "source": "github", "line_count": 69, "max_line_length": 75, "avg_line_length": 31.579710144927535, "alnum_prop": 0.7581459385039009, "repo_name": "GoogleCloudPlatform/datacatalog-connectors-rdbms", "id": "f1481ce738b932b3bb21bf82d4677f9fa433e69e", "size": "2802", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "google-datacatalog-rdbms-connector/src/google/datacatalog_connectors/rdbms/common/constants.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "9250" }, { "name": "Python", "bytes": "457511" }, { "name": "Shell", "bytes": "19222" } ], "symlink_target": "" }
"""Support for Daikin AC sensors.""" from homeassistant.components.sensor import SensorEntity from homeassistant.const import ( CONF_DEVICE_CLASS, CONF_ICON, CONF_NAME, CONF_TYPE, CONF_UNIT_OF_MEASUREMENT, ) from . import DOMAIN as DAIKIN_DOMAIN, DaikinApi from .const import ( ATTR_COOL_ENERGY, ATTR_HEAT_ENERGY, ATTR_HUMIDITY, ATTR_INSIDE_TEMPERATURE, ATTR_OUTSIDE_TEMPERATURE, ATTR_TARGET_HUMIDITY, ATTR_TOTAL_POWER, SENSOR_TYPE_ENERGY, SENSOR_TYPE_HUMIDITY, SENSOR_TYPE_POWER, SENSOR_TYPE_TEMPERATURE, SENSOR_TYPES, ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Old way of setting up the Daikin sensors. Can only be called when a user accidentally mentions the platform in their config. But even in that case it would have been ignored. """ async def async_setup_entry(hass, entry, async_add_entities): """Set up Daikin climate based on config_entry.""" daikin_api = hass.data[DAIKIN_DOMAIN].get(entry.entry_id) sensors = [ATTR_INSIDE_TEMPERATURE] if daikin_api.device.support_outside_temperature: sensors.append(ATTR_OUTSIDE_TEMPERATURE) if daikin_api.device.support_energy_consumption: sensors.append(ATTR_TOTAL_POWER) sensors.append(ATTR_COOL_ENERGY) sensors.append(ATTR_HEAT_ENERGY) if daikin_api.device.support_humidity: sensors.append(ATTR_HUMIDITY) sensors.append(ATTR_TARGET_HUMIDITY) async_add_entities([DaikinSensor.factory(daikin_api, sensor) for sensor in sensors]) class DaikinSensor(SensorEntity): """Representation of a Sensor.""" @staticmethod def factory(api: DaikinApi, monitored_state: str): """Initialize any DaikinSensor.""" cls = { SENSOR_TYPE_TEMPERATURE: DaikinClimateSensor, SENSOR_TYPE_HUMIDITY: DaikinClimateSensor, SENSOR_TYPE_POWER: DaikinPowerSensor, SENSOR_TYPE_ENERGY: DaikinPowerSensor, }[SENSOR_TYPES[monitored_state][CONF_TYPE]] return cls(api, monitored_state) def __init__(self, api: DaikinApi, monitored_state: str) -> None: """Initialize the sensor.""" self._api = api self._sensor = SENSOR_TYPES[monitored_state] self._name = f"{api.name} {self._sensor[CONF_NAME]}" self._device_attribute = monitored_state @property def unique_id(self): """Return a unique ID.""" return f"{self._api.device.mac}-{self._device_attribute}" @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the sensor.""" raise NotImplementedError @property def device_class(self): """Return the class of this device.""" return self._sensor.get(CONF_DEVICE_CLASS) @property def icon(self): """Return the icon of this device.""" return self._sensor.get(CONF_ICON) @property def unit_of_measurement(self): """Return the unit of measurement.""" return self._sensor[CONF_UNIT_OF_MEASUREMENT] async def async_update(self): """Retrieve latest state.""" await self._api.async_update() @property def device_info(self): """Return a device description for device registry.""" return self._api.device_info class DaikinClimateSensor(DaikinSensor): """Representation of a Climate Sensor.""" @property def state(self): """Return the internal state of the sensor.""" if self._device_attribute == ATTR_INSIDE_TEMPERATURE: return self._api.device.inside_temperature if self._device_attribute == ATTR_OUTSIDE_TEMPERATURE: return self._api.device.outside_temperature if self._device_attribute == ATTR_HUMIDITY: return self._api.device.humidity if self._device_attribute == ATTR_TARGET_HUMIDITY: return self._api.device.target_humidity return None class DaikinPowerSensor(DaikinSensor): """Representation of a power/energy consumption sensor.""" @property def state(self): """Return the state of the sensor.""" if self._device_attribute == ATTR_TOTAL_POWER: return round(self._api.device.current_total_power_consumption, 3) if self._device_attribute == ATTR_COOL_ENERGY: return round(self._api.device.last_hour_cool_energy_consumption, 3) if self._device_attribute == ATTR_HEAT_ENERGY: return round(self._api.device.last_hour_heat_energy_consumption, 3) return None
{ "content_hash": "cf34406fc280dab89bc01547a8a8cc96", "timestamp": "", "source": "github", "line_count": 143, "max_line_length": 88, "avg_line_length": 32.77622377622377, "alnum_prop": 0.6554299125240025, "repo_name": "kennedyshead/home-assistant", "id": "a5b515ea918946832b1fe868947f01bc94e0b5cb", "size": "4687", "binary": false, "copies": "2", "ref": "refs/heads/dev", "path": "homeassistant/components/daikin/sensor.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "1795" }, { "name": "Python", "bytes": "33970989" }, { "name": "Shell", "bytes": "4900" } ], "symlink_target": "" }
from __future__ import absolute_import, division, print_function import unittest from blaze.py2help import skipIf from blaze.datadescriptor import data_descriptor_from_cffi, dd_as_py from datashape import dshape try: import cffi ffi = cffi.FFI() except ImportError: cffi = None class TestCFFIMemBufDataDescriptor(unittest.TestCase): @skipIf(cffi is None, 'cffi is not installed') def test_scalar(self): a = ffi.new('int *', 3) dd = data_descriptor_from_cffi(ffi, a, writable=True) self.assertEqual(dd.dshape, dshape('int32')) self.assertEqual(dd_as_py(dd), 3) self.assertTrue(isinstance(dd_as_py(dd), int)) a = ffi.new('float *', 3.25) dd = data_descriptor_from_cffi(ffi, a, writable=True) self.assertEqual(dd.dshape, dshape('float32')) self.assertEqual(dd_as_py(dd), 3.25) self.assertTrue(isinstance(dd_as_py(dd), float)) @skipIf(cffi is None, 'cffi is not installed') def test_1d_array(self): # An array where the size is in the type a = ffi.new('short[32]', [2*i for i in range(32)]) dd = data_descriptor_from_cffi(ffi, a, writable=True) self.assertEqual(dd.dshape, dshape('32 * int16')) self.assertEqual(dd_as_py(dd), [2*i for i in range(32)]) # An array where the size is not in the type a = ffi.new('double[]', [1.5*i for i in range(32)]) dd = data_descriptor_from_cffi(ffi, a, writable=True) self.assertEqual(dd.dshape, dshape('32 * float64')) self.assertEqual(dd_as_py(dd), [1.5*i for i in range(32)]) @skipIf(cffi is None, 'cffi is not installed') def test_2d_array(self): # An array where the leading array size is in the type vals = [[2**i + j for i in range(35)] for j in range(32)] a = ffi.new('long long[32][35]', vals) dd = data_descriptor_from_cffi(ffi, a, writable=True) self.assertEqual(dd.dshape, dshape('32 * 35 * int64')) self.assertEqual(dd_as_py(dd), vals) # An array where the leading array size is not in the type vals = [[a + b*2 for a in range(35)] for b in range(32)] a = ffi.new('unsigned char[][35]', vals) dd = data_descriptor_from_cffi(ffi, a, writable=True) self.assertEqual(dd.dshape, dshape('32 * 35 * uint8')) self.assertEqual(dd_as_py(dd), vals) @skipIf(cffi is None, 'cffi is not installed') def test_3d_array(self): # Simple 3D array vals = [[[(i + 2*j + 3*k) for i in range(10)] for j in range(12)] for k in range(14)] a = ffi.new('unsigned int[14][12][10]', vals) dd = data_descriptor_from_cffi(ffi, a, writable=True) self.assertEqual(dd.dshape, dshape('14 * 12 * 10 * uint32')) self.assertEqual(dd_as_py(dd), vals) if __name__ == '__main__': unittest.main()
{ "content_hash": "eb4f59349d0863ccab87811d8f431d08", "timestamp": "", "source": "github", "line_count": 76, "max_line_length": 68, "avg_line_length": 38.6578947368421, "alnum_prop": 0.5987066031313819, "repo_name": "mwiebe/blaze", "id": "a820cbb0a295b81def745fdde843b16ce32bb9f8", "size": "2938", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "blaze/datadescriptor/tests/test_cffi_membuf_data_descriptor.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "143372" }, { "name": "JavaScript", "bytes": "56478" }, { "name": "Python", "bytes": "524428" }, { "name": "Ruby", "bytes": "1188" }, { "name": "Shell", "bytes": "13149" } ], "symlink_target": "" }
"""The WattTime integration.""" from __future__ import annotations from datetime import timedelta from aiowatttime import Client from aiowatttime.emissions import RealTimeEmissionsResponseType from aiowatttime.errors import InvalidCredentialsError, WattTimeError from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( CONF_LATITUDE, CONF_LONGITUDE, CONF_PASSWORD, CONF_USERNAME, Platform, ) from homeassistant.core import HomeAssistant from homeassistant.exceptions import ConfigEntryAuthFailed from homeassistant.helpers import aiohttp_client from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed from .const import DOMAIN, LOGGER DEFAULT_UPDATE_INTERVAL = timedelta(minutes=5) PLATFORMS: list[Platform] = [Platform.SENSOR] async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Set up WattTime from a config entry.""" session = aiohttp_client.async_get_clientsession(hass) try: client = await Client.async_login( entry.data[CONF_USERNAME], entry.data[CONF_PASSWORD], session=session ) except InvalidCredentialsError as err: raise ConfigEntryAuthFailed("Invalid username/password") from err except WattTimeError as err: LOGGER.error("Error while authenticating with WattTime: %s", err) return False async def async_update_data() -> RealTimeEmissionsResponseType: """Get the latest realtime emissions data.""" try: return await client.emissions.async_get_realtime_emissions( entry.data[CONF_LATITUDE], entry.data[CONF_LONGITUDE] ) except InvalidCredentialsError as err: raise ConfigEntryAuthFailed("Invalid username/password") from err except WattTimeError as err: raise UpdateFailed( f"Error while requesting data from WattTime: {err}" ) from err coordinator = DataUpdateCoordinator( hass, LOGGER, name=entry.title, update_interval=DEFAULT_UPDATE_INTERVAL, update_method=async_update_data, ) await coordinator.async_config_entry_first_refresh() hass.data.setdefault(DOMAIN, {}) hass.data[DOMAIN][entry.entry_id] = coordinator await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS) entry.async_on_unload(entry.add_update_listener(async_reload_entry)) return True async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Unload a config entry.""" unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS) if unload_ok: hass.data[DOMAIN].pop(entry.entry_id) return unload_ok async def async_reload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> None: """Handle an options update.""" await hass.config_entries.async_reload(config_entry.entry_id)
{ "content_hash": "5263cc597697974ac4cd276f4aac0c97", "timestamp": "", "source": "github", "line_count": 87, "max_line_length": 88, "avg_line_length": 33.93103448275862, "alnum_prop": 0.7157859078590786, "repo_name": "mezz64/home-assistant", "id": "cac73f597f6b7f72d4988f94435422142df421cd", "size": "2952", "binary": false, "copies": "3", "ref": "refs/heads/dev", "path": "homeassistant/components/watttime/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2963" }, { "name": "PLSQL", "bytes": "840" }, { "name": "Python", "bytes": "52481895" }, { "name": "Shell", "bytes": "6252" } ], "symlink_target": "" }
from sys import argv import numpy from gensim import corpora, models, similarities from gensim.corpora import MmCorpus from nltk.corpus import stopwords import os from pprint import pprint from numpy import median # Enables ease of recall in the future modelFile = '.' + os.sep + 'ToxicologyModel' os.makedirs(modelFile,exist_ok=True) # File, Documents Text, & Dictionary arrays fileArray = os.listdir('..' + os.sep + 'Domains' + os.sep + 'ToxicologyTxt' + os.sep) half = int(len(fileArray) / 2) fileArrayHalf = fileArray[:half] fileArraySecondHalf = fileArray[half:] if "cached" not in argv: #get stopwords stops = set(stopwords.words('english')) dictionary=[] documents=[] for file in fileArrayHalf: with open('..'+os.sep+'Domains'+os.sep+'ToxicologyTxt'+os.sep+file,mode='rt',errors='ignore') as openFile: words = openFile.read() documents.append(words.lower().split()) for word in words.lower().split(): dictionary.append(word) openFile.close() #map list of words to id#s & save for reference dictionary = corpora.Dictionary([dictionary]) dictionary.save(modelFile+os.sep+modelFile+'.dict') # print # of documents print("\n\n# of documents: ", len(documents), "\nReading in test documents...") # Vectorize the body of documents agains the dictionary corpus = [dictionary.doc2bow(document) for document in documents] print("Serializing the corpus of documents...") corpora.MmCorpus.serialize('..' + os.sep + 'DomainModels' + os.sep + 'ToxicologyModel' + os.sep + 'ToxicologyModel.serial',corpus) tfIdfModel = models.TfidfModel(corpus) tfIdfModel.save('..' + os.sep + 'DomainModels' + os.sep + 'ToxicologyModel' + os.sep + 'ToxicologyModel.mm') else: print("Loading saved corpora...") dictionary=corpora.Dictionary.load(modelFile+os.sep+modelFile+'.dict') tfIdfModel=models.TfidfModel.load('..' + os.sep + 'DomainModels' + os.sep + 'ToxicologyModel' + os.sep + 'ToxicologyModel.mm') corpus=MmCorpus('..' + os.sep + 'DomainModels' + os.sep + 'ToxicologyModel' + os.sep + 'ToxicologyModel.serial') #corpus to tdidf matrix corpus=tfIdfModel[corpus] print("Ensure correct corpus length: ",len(corpus),"\n") print("Document #1 from the corpus (tfIdf): \n",corpus[1][1:20],"\n") if "cached" not in argv: #Train LSI print("Training LSI...") lsi = models.LsiModel(corpus, id2word=dictionary, num_topics=700) lsi.save('..'+os.sep+'DomainModels'+os.sep+'ToxicologyModel'+os.sep+'ToxicologyModel.lsi') else: lsi=models.LsiModel.load('..'+os.sep+'DomainModels'+os.sep+'ToxicologyModel'+os.sep+'ToxicologyModel.lsi') #transform corpus to LSI space, index it, & save index = similarities.MatrixSimilarity(lsi[corpus]) index.save(modelFile+os.sep+'ToxicologyModel.index') print("Calculating the cosines of each document...") index = similarities.MatrixSimilarity.load(modelFile+os.sep+'ToxicologyModel.index') #Test files print("Vectorizing the test documents (Bag of Words)...\n") testVectorArray=[] count=1 for file in fileArraySecondHalf: with open('..'+os.sep+'Domains'+os.sep+'ToxicologyTxt'+os.sep+file,mode='rt',errors='ignore') as current: current=current.read() # Vectorize test documents against the dictionary (Bag of Words) testVector=dictionary.doc2bow(current.lower().split()) testVector=tfIdfModel[testVector] if count==1: print("(Tdidf) Frequency vectors:\n", "(wordId,frequency): ", fileArraySecondHalf[0], "\n", testVector[1:20], end='\n\n') testVectorArray+=lsi[testVector] count += 1 print("Preview an LSI vector: ",fileArraySecondHalf[0],"\n",testVectorArray[1:20],end='\n\n') print("Creating vector space for entire corpus...") #perform a similarity query against the corpus & print (document_number, document_similarity) print("Calculating cosine similarity between corpus and test documents...\n") similarity=[] similarity=index[testVectorArray] pprint("Max similarity between halves of toxicology: ") pprint(max(similarity)) pprint("Mean similarity between halves of toxicology: ") pprint(numpy.mean(similarity)) pprint("Median similarity between halves of toxicology: ") pprint(numpy.median(similarity)) # print(testfile4+"\n(Indexed list of documents,test document #3 similarity):") # pprint(list(enumerate(similarity4))[1:20]) #take the average similarity of each vector # temp1=0 # temp2=0 # temp3=0 # temp4=0 # for sim1 in similarity1: # temp1=temp1+sim1 # average1=temp1/len(similarity1) # for sim2 in similarity2: # temp2=temp2+sim2 # average2=temp2/len(similarity2) # for sim3 in similarity3: # temp3=temp3+sim3 # average3=temp3/len(similarity3) # for sim4 in similarity4: # temp4=temp4+sim4 # average4=temp4/len(similarity4) # # #get the highest similarity values # max1=max(similarity1) # max2=max(similarity2) # max3=max(similarity3) # max4=max(similarity4) # # #get median similarity values & round # median1=median(numpy.round(similarity1,2)) # median2=median(numpy.round(similarity2,2)) # median3=median(numpy.round(similarity3,2)) # median4=median(numpy.round(similarity4,2)) # # # #print average similarity # print("Test1 average similarity: ",average1) # print("Test2 average similarity: ",average2) # print("Test3 average similarity: ",average3) # print("Test4 average similarity: ",average4) # # #print highest single similarity # print("Highest similarity value: ",max1) # print("Highest similarity value: ",max2) # print("Highest similarity value: ",max3) # print("Highest similarity value: ",max4) # # #print median values # print("Median similarity value: ",median1) # print("Median similarity value: ",median2) # print("Median similarity value: ",median3) # print("Median similarity value: ",median4)
{ "content_hash": "6f23367055300a5bc5dc34163865f468", "timestamp": "", "source": "github", "line_count": 156, "max_line_length": 134, "avg_line_length": 37.51282051282051, "alnum_prop": 0.7081339712918661, "repo_name": "MasterofKnuth/Nanoinformatics", "id": "ea720b469464cc9e37d662f453eb977484a5c6a8", "size": "5852", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "LSI/ScratchModelRunners/LsiToxicology.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "71425" }, { "name": "Shell", "bytes": "68" } ], "symlink_target": "" }
import copy import os import re import mxnet as mx import numpy as np from common import models from mxnet.test_utils import discard_stderr import pickle as pkl def test_symbol_basic(): mlist = [] mlist.append(models.mlp2()) for m in mlist: m.list_arguments() m.list_outputs() def test_symbol_compose(): data = mx.symbol.Variable('data') net1 = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10) net1 = mx.symbol.FullyConnected(data=net1, name='fc2', num_hidden=100) net1.list_arguments() == ['data', 'fc1_weight', 'fc1_bias', 'fc2_weight', 'fc2_bias'] net2 = mx.symbol.FullyConnected(name='fc3', num_hidden=10) net2 = mx.symbol.Activation(data=net2, act_type='relu') net2 = mx.symbol.FullyConnected(data=net2, name='fc4', num_hidden=20) #print(net2.debug_str()) composed = net2(fc3_data=net1, name='composed') #print(composed.debug_str()) multi_out = mx.symbol.Group([composed, net1]) assert len(multi_out.list_outputs()) == 2 def test_symbol_copy(): data = mx.symbol.Variable('data') data_2 = copy.deepcopy(data) data_3 = copy.copy(data) assert data.tojson() == data_2.tojson() assert data.tojson() == data_3.tojson() def test_symbol_internal(): data = mx.symbol.Variable('data') oldfc = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10) net1 = mx.symbol.FullyConnected(data=oldfc, name='fc2', num_hidden=100) assert net1.list_arguments() == ['data', 'fc1_weight', 'fc1_bias', 'fc2_weight', 'fc2_bias'] internal = net1.get_internals() fc1 = internal['fc1_output'] assert fc1.list_arguments() == oldfc.list_arguments() def test_symbol_children(): data = mx.symbol.Variable('data') oldfc = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10) net1 = mx.symbol.FullyConnected(data=oldfc, name='fc2', num_hidden=100) assert net1.get_children().list_outputs() == ['fc1_output', 'fc2_weight', 'fc2_bias'] assert net1.get_children().get_children().list_outputs() == ['data', 'fc1_weight', 'fc1_bias'] assert net1.get_children()['fc2_weight'].list_arguments() == ['fc2_weight'] assert net1.get_children()['fc2_weight'].get_children() is None data = mx.sym.Variable('data') sliced = mx.sym.SliceChannel(data, num_outputs=3, name='slice') concat = mx.sym.Concat(*list(sliced)) assert concat.get_children().list_outputs() == \ ['slice_output0', 'slice_output1', 'slice_output2'] assert sliced.get_children().list_outputs() == ['data'] def test_symbol_pickle(): mlist = [models.mlp2(), models.conv()] data = pkl.dumps(mlist) mlist2 = pkl.loads(data) for x, y in zip(mlist, mlist2): assert x.tojson() == y.tojson() def test_symbol_saveload(): sym = models.mlp2() fname = 'tmp_sym.json' sym.save(fname) data2 = mx.symbol.load(fname) # save because of order assert sym.tojson() == data2.tojson() os.remove(fname) def test_symbol_infer_type(): data = mx.symbol.Variable('data') f32data = mx.symbol.Cast(data=data, dtype='float32') fc1 = mx.symbol.FullyConnected(data = f32data, name='fc1', num_hidden=128) mlp = mx.symbol.SoftmaxOutput(data = fc1, name = 'softmax') arg, out, aux = mlp.infer_type(data=np.float16) assert arg == [np.float16, np.float32, np.float32, np.float32] assert out == [np.float32] assert aux == [] def test_symbol_infer_shape(): num_hidden = 128 num_dim = 64 num_sample = 10 data = mx.symbol.Variable('data') prev = mx.symbol.Variable('prevstate') x2h = mx.symbol.FullyConnected(data=data, name='x2h', num_hidden=num_hidden) h2h = mx.symbol.FullyConnected(data=prev, name='h2h', num_hidden=num_hidden) out = mx.symbol.Activation(data=mx.sym.elemwise_add(x2h, h2h), name='out', act_type='relu') # shape inference will fail because information is not available for h2h ret = out.infer_shape(data=(num_sample, num_dim)) assert ret == (None, None, None) arg, out_shapes, aux_shapes = out.infer_shape_partial(data=(num_sample, num_dim)) arg_shapes = dict(zip(out.list_arguments(), arg)) assert arg_shapes['data'] == (num_sample, num_dim) assert arg_shapes['x2h_weight'] == (num_hidden, num_dim) assert arg_shapes['h2h_weight'] == () # now we can do full shape inference state_shape = out_shapes[0] arg, out_shapes, aux_shapes = out.infer_shape(data=(num_sample, num_dim), prevstate=state_shape) arg_shapes = dict(zip(out.list_arguments(), arg)) assert arg_shapes['data'] == (num_sample, num_dim) assert arg_shapes['x2h_weight'] == (num_hidden, num_dim) assert arg_shapes['h2h_weight'] == (num_hidden, num_hidden) def test_symbol_infer_shape_var(): "Test specifying shape information when constructing a variable" shape = (2, 3) a = mx.symbol.Variable('a', shape=shape) b = mx.symbol.Variable('b') c = mx.symbol.elemwise_add(a, b) arg_shapes, out_shapes, aux_shapes = c.infer_shape() assert arg_shapes[0] == shape assert arg_shapes[1] == shape assert out_shapes[0] == shape overwrite_shape = (5, 6) arg_shapes, out_shapes, aux_shapes = c.infer_shape(a=overwrite_shape) assert arg_shapes[0] == overwrite_shape assert arg_shapes[1] == overwrite_shape assert out_shapes[0] == overwrite_shape def check_symbol_consistency(sym1, sym2, ctx): assert sym1.list_arguments() == sym2.list_arguments() assert sym1.list_auxiliary_states() == sym2.list_auxiliary_states() assert sym1.list_outputs() == sym2.list_outputs() mx.test_utils.check_consistency([sym1, sym2], ctx_list=[ctx, ctx]) def test_load_000800(): with mx.AttrScope(ctx_group='stage1'): data = mx.symbol.Variable('data', lr_mult=0.2) weight = mx.sym.Variable(name='fc1_weight', lr_mult=1.2) fc1 = mx.symbol.FullyConnected(data = data, weight=weight, name='fc1', num_hidden=128, wd_mult=0.3) act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu") set_stage1 = set(act1.list_arguments()) with mx.AttrScope(ctx_group='stage2'): fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64, lr_mult=0.01) act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu") fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10) fc3 = mx.symbol.BatchNorm(fc3, name='batchnorm0') sym1 = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax') curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sym2 = mx.sym.load(os.path.join(curr_path, 'save_000800.json')) attr1 = sym1.attr_dict() attr2 = sym2.attr_dict() for k, v1 in attr1.items(): assert k in attr2, k v2 = attr2[k] for kk, vv1 in v1.items(): if kk.startswith('__') and kk.endswith('__'): assert kk in v2 and v2[kk] == vv1, k + str(v1) + str(v2) check_symbol_consistency(sym1, sym2, {'ctx': mx.cpu(0), 'group2ctx': {'stage1' : mx.cpu(1), 'stage2' : mx.cpu(2)}, 'data': (1,200)}) def test_blockgrad(): a = mx.sym.Variable('a') b = mx.sym.BlockGrad(2*a) exe = b.simple_bind(ctx=mx.cpu(), a=(10,10)) def test_zero_prop(): data = mx.symbol.Variable('data') for i in range(10): data = data * data exe = data.simple_bind(ctx=mx.cpu(), data=(10, 3, 256, 256)) big = int(re.search('Total (\d+) MB allocated', exe.debug_str()).group(1)) exe = data.simple_bind(ctx=mx.cpu(), data=(10, 3, 256, 256), grad_req='null') small1 = int(re.search('Total (\d+) MB allocated', exe.debug_str()).group(1)) data = mx.sym.stop_gradient(data) exe = data.simple_bind(ctx=mx.cpu(), data=(10, 3, 256, 256)) small2 = int(re.search('Total (\d+) MB allocated', exe.debug_str()).group(1)) assert big > small2 assert small1 == small2 def test_zero_prop2(): x = mx.sym.Variable('x') idx = mx.sym.Variable('idx') y = mx.sym.batch_take(x, idx) z = mx.sym.stop_gradient(y) exe = z.simple_bind(ctx=mx.cpu(), x=(10, 10), idx=(10,), type_dict={'x': np.float32, 'idx': np.int32}) exe.forward() exe.backward() # The following bind() should throw an exception. We discard the expected stderr # output for this operation only in order to keep the test logs clean. with discard_stderr(): try: y.simple_bind(ctx=mx.cpu(), x=(10, 10), idx=(10,), type_dict={'x': np.float32, 'idx': np.int32}) except: return assert False if __name__ == '__main__': import nose nose.runmodule()
{ "content_hash": "8365de462fdb38550cca6e9a5db554cf", "timestamp": "", "source": "github", "line_count": 234, "max_line_length": 108, "avg_line_length": 37.44017094017094, "alnum_prop": 0.6268690788722748, "repo_name": "stefanhenneking/mxnet", "id": "c570325a6b66a84c73161ff056d000d8f1d7479d", "size": "9547", "binary": false, "copies": "7", "ref": "refs/heads/master", "path": "tests/python/unittest/test_symbol.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "12255" }, { "name": "C", "bytes": "100668" }, { "name": "C++", "bytes": "3892731" }, { "name": "CMake", "bytes": "53484" }, { "name": "Cuda", "bytes": "746143" }, { "name": "Groovy", "bytes": "217" }, { "name": "Java", "bytes": "20406" }, { "name": "Jupyter Notebook", "bytes": "1229390" }, { "name": "Makefile", "bytes": "40444" }, { "name": "Matlab", "bytes": "30187" }, { "name": "Perl", "bytes": "669058" }, { "name": "Perl 6", "bytes": "22779" }, { "name": "Protocol Buffer", "bytes": "77256" }, { "name": "Python", "bytes": "3820057" }, { "name": "R", "bytes": "324842" }, { "name": "Scala", "bytes": "884082" }, { "name": "Shell", "bytes": "198299" } ], "symlink_target": "" }
__author__ = 'fahadadeel' import jpype import os.path from WorkingWithTables import AddImage asposeapispath = os.path.join(os.path.abspath("../../../"), "lib") print "You need to put your Aspose.Slides for Java APIs .jars in this folder:\n"+asposeapispath jpype.startJVM(jpype.getDefaultJVMPath(), "-Djava.ext.dirs=%s" % asposeapispath) testObject = AddImage('data/') testObject.main()
{ "content_hash": "e6cc52fd2489eb06a0b26c1784298798", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 95, "avg_line_length": 29.923076923076923, "alnum_prop": 0.7377892030848329, "repo_name": "aspose-slides/Aspose.Slides-for-Java", "id": "734beacab59ac38eb8904332b72711b46c690b02", "size": "389", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "Plugins/Aspose-Slides-Java-for-Python/tests/WorkingWithTables/AddImage/AddImage.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "27301" }, { "name": "Java", "bytes": "692513" }, { "name": "PHP", "bytes": "144457" }, { "name": "Python", "bytes": "279574" }, { "name": "Ruby", "bytes": "166824" } ], "symlink_target": "" }
""" Exceptions for logger. """ class LogProcessError(Exception): """Log Process Error""" pass
{ "content_hash": "b1944db5d99d669b4287900e806d5dfa", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 33, "avg_line_length": 12.11111111111111, "alnum_prop": 0.6146788990825688, "repo_name": "OctavianLee/Cytisas", "id": "a29c1e8164d27ac2d126e811ab5b0228a3463e61", "size": "109", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cytisas/logger/excs.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "197" }, { "name": "Python", "bytes": "12252" } ], "symlink_target": "" }
import re import copy import numpy as np from numpy.testing import assert_allclose, assert_equal, assert_array_equal import pytest from scipy.linalg import hilbert, svd from scipy.sparse import csc_matrix, isspmatrix from scipy.sparse.linalg import LinearOperator, aslinearoperator from scipy.sparse.linalg import svds from scipy.sparse.linalg._eigen.arpack import ArpackNoConvergence # --- Helper Functions / Classes --- def sorted_svd(m, k, which='LM'): # Compute svd of a dense matrix m, and return singular vectors/values # sorted. if isspmatrix(m): m = m.toarray() u, s, vh = svd(m) if which == 'LM': ii = np.argsort(s)[-k:] elif which == 'SM': ii = np.argsort(s)[:k] else: raise ValueError("unknown which=%r" % (which,)) return u[:, ii], s[ii], vh[ii] def svd_estimate(u, s, vh): return np.dot(u, np.dot(np.diag(s), vh)) def _check_svds(A, k, u, s, vh, which="LM", check_usvh_A=False, check_svd=True, atol=1e-10, rtol=1e-7): n, m = A.shape # Check shapes. assert_equal(u.shape, (n, k)) assert_equal(s.shape, (k,)) assert_equal(vh.shape, (k, m)) # Check that the original matrix can be reconstituted. A_rebuilt = (u*s).dot(vh) assert_equal(A_rebuilt.shape, A.shape) if check_usvh_A: assert_allclose(A_rebuilt, A, atol=atol, rtol=rtol) # Check that u is a semi-orthogonal matrix. uh_u = np.dot(u.T.conj(), u) assert_equal(uh_u.shape, (k, k)) assert_allclose(uh_u, np.identity(k), atol=atol, rtol=rtol) # Check that V is a semi-orthogonal matrix. vh_v = np.dot(vh, vh.T.conj()) assert_equal(vh_v.shape, (k, k)) assert_allclose(vh_v, np.identity(k), atol=atol, rtol=rtol) # Check that scipy.sparse.linalg.svds ~ scipy.linalg.svd if check_svd: u2, s2, vh2 = sorted_svd(A, k, which) assert_allclose(np.abs(u), np.abs(u2), atol=atol, rtol=rtol) assert_allclose(s, s2, atol=atol, rtol=rtol) assert_allclose(np.abs(vh), np.abs(vh2), atol=atol, rtol=rtol) class CheckingLinearOperator(LinearOperator): def __init__(self, A): self.A = A self.dtype = A.dtype self.shape = A.shape def _matvec(self, x): assert_equal(max(x.shape), np.size(x)) return self.A.dot(x) def _rmatvec(self, x): assert_equal(max(x.shape), np.size(x)) return self.A.T.conjugate().dot(x) # --- Test Input Validation --- # Tests input validation on parameters `k` and `which` # Needs better input validation checks for all other parameters class SVDSCommonTests: solver = None # some of these IV tests could run only once, say with solver=None _A_empty_msg = "`A` must not be empty." _A_dtype_msg = "`A` must be of floating or complex floating data type" _A_type_msg = "type not understood" _A_ndim_msg = "array must have ndim <= 2" _A_validation_inputs = [ (np.asarray([[]]), ValueError, _A_empty_msg), (np.asarray([[1, 2], [3, 4]]), ValueError, _A_dtype_msg), ("hi", TypeError, _A_type_msg), (np.asarray([[[1., 2.], [3., 4.]]]), ValueError, _A_ndim_msg)] @pytest.mark.parametrize("args", _A_validation_inputs) def test_svds_input_validation_A(self, args): A, error_type, message = args with pytest.raises(error_type, match=message): svds(A, k=1, solver=self.solver) @pytest.mark.parametrize("k", [-1, 0, 3, 4, 5, 1.5, "1"]) def test_svds_input_validation_k_1(self, k): rng = np.random.default_rng(0) A = rng.random((4, 3)) # propack can do complete SVD if self.solver == 'propack' and k == 3: res = svds(A, k=k, solver=self.solver) _check_svds(A, k, *res, check_usvh_A=True, check_svd=True) return message = ("`k` must be an integer satisfying") with pytest.raises(ValueError, match=message): svds(A, k=k, solver=self.solver) def test_svds_input_validation_k_2(self): # I think the stack trace is reasonable when `k` can't be converted # to an int. message = "int() argument must be a" with pytest.raises(TypeError, match=re.escape(message)): svds(np.eye(10), k=[], solver=self.solver) message = "invalid literal for int()" with pytest.raises(ValueError, match=message): svds(np.eye(10), k="hi", solver=self.solver) @pytest.mark.parametrize("tol", (-1, np.inf, np.nan)) def test_svds_input_validation_tol_1(self, tol): message = "`tol` must be a non-negative floating point value." with pytest.raises(ValueError, match=message): svds(np.eye(10), tol=tol, solver=self.solver) @pytest.mark.parametrize("tol", ([], 'hi')) def test_svds_input_validation_tol_2(self, tol): # I think the stack trace is reasonable here message = "'<' not supported between instances" with pytest.raises(TypeError, match=message): svds(np.eye(10), tol=tol, solver=self.solver) @pytest.mark.parametrize("which", ('LA', 'SA', 'ekki', 0)) def test_svds_input_validation_which(self, which): # Regression test for a github issue. # https://github.com/scipy/scipy/issues/4590 # Function was not checking for eigenvalue type and unintended # values could be returned. with pytest.raises(ValueError, match="`which` must be in"): svds(np.eye(10), which=which, solver=self.solver) @pytest.mark.parametrize("transpose", (True, False)) @pytest.mark.parametrize("n", range(4, 9)) def test_svds_input_validation_v0_1(self, transpose, n): rng = np.random.default_rng(0) A = rng.random((5, 7)) v0 = rng.random(n) if transpose: A = A.T k = 2 message = "`v0` must have shape" required_length = (A.shape[0] if self.solver == 'propack' else min(A.shape)) if n != required_length: with pytest.raises(ValueError, match=message): svds(A, k=k, v0=v0, solver=self.solver) def test_svds_input_validation_v0_2(self): A = np.ones((10, 10)) v0 = np.ones((1, 10)) message = "`v0` must have shape" with pytest.raises(ValueError, match=message): svds(A, k=1, v0=v0, solver=self.solver) @pytest.mark.parametrize("v0", ("hi", 1, np.ones(10, dtype=int))) def test_svds_input_validation_v0_3(self, v0): A = np.ones((10, 10)) message = "`v0` must be of floating or complex floating data type." with pytest.raises(ValueError, match=message): svds(A, k=1, v0=v0, solver=self.solver) @pytest.mark.parametrize("maxiter", (-1, 0, 5.5)) def test_svds_input_validation_maxiter_1(self, maxiter): message = ("`maxiter` must be a positive integer.") with pytest.raises(ValueError, match=message): svds(np.eye(10), maxiter=maxiter, solver=self.solver) def test_svds_input_validation_maxiter_2(self): # I think the stack trace is reasonable when `k` can't be converted # to an int. message = "int() argument must be a" with pytest.raises(TypeError, match=re.escape(message)): svds(np.eye(10), maxiter=[], solver=self.solver) message = "invalid literal for int()" with pytest.raises(ValueError, match=message): svds(np.eye(10), maxiter="hi", solver=self.solver) @pytest.mark.parametrize("rsv", ('ekki', 10)) def test_svds_input_validation_return_singular_vectors(self, rsv): message = "`return_singular_vectors` must be in" with pytest.raises(ValueError, match=message): svds(np.eye(10), return_singular_vectors=rsv, solver=self.solver) # --- Test Parameters --- @pytest.mark.parametrize("k", [3, 5]) @pytest.mark.parametrize("which", ["LM", "SM"]) def test_svds_parameter_k_which(self, k, which): # check that the `k` parameter sets the number of eigenvalues/ # eigenvectors returned. # Also check that the `which` parameter sets whether the largest or # smallest eigenvalues are returned rng = np.random.default_rng(0) A = rng.random((10, 10)) res = svds(A, k=k, which=which, solver=self.solver, random_state=0) _check_svds(A, k, *res, which=which, atol=8e-10) # loop instead of parametrize for simplicity def test_svds_parameter_tol(self): # check the effect of the `tol` parameter on solver accuracy by solving # the same problem with varying `tol` and comparing the eigenvalues # against ground truth computed n = 100 # matrix size k = 3 # number of eigenvalues to check # generate a random, sparse-ish matrix # effect isn't apparent for matrices that are too small rng = np.random.default_rng(0) A = rng.random((n, n)) A[A > .1] = 0 A = A @ A.T _, s, _ = svd(A) # calculate ground truth # calculate the error as a function of `tol` A = csc_matrix(A) def err(tol): _, s2, _ = svds(A, k=k, v0=np.ones(n), solver=self.solver, tol=tol) return np.linalg.norm((s2 - s[k-1::-1])/s[k-1::-1]) tols = [1e-4, 1e-2, 1e0] # tolerance levels to check # for 'arpack' and 'propack', accuracies make discrete steps accuracies = {'propack': [1e-12, 1e-6, 1e-4], 'arpack': [5e-15, 1e-10, 1e-10], 'lobpcg': [1e-11, 1e-3, 10]} for tol, accuracy in zip(tols, accuracies[self.solver]): error = err(tol) assert error < accuracy assert error > accuracy/10 def test_svd_v0(self): # check that the `v0` parameter affects the solution n = 100 k = 1 # If k != 1, LOBPCG needs more initial vectors, which are generated # with random_state, so it does not pass w/ k >= 2. # For some other values of `n`, the AssertionErrors are not raised # with different v0s, which is reasonable. rng = np.random.default_rng(0) A = rng.random((n, n)) # with the same v0, solutions are the same, and they are accurate # v0 takes precedence over random_state v0a = rng.random(n) res1a = svds(A, k, v0=v0a, solver=self.solver, random_state=0) res2a = svds(A, k, v0=v0a, solver=self.solver, random_state=1) assert_equal(res1a, res2a) _check_svds(A, k, *res1a) # with the same v0, solutions are the same, and they are accurate v0b = rng.random(n) res1b = svds(A, k, v0=v0b, solver=self.solver, random_state=2) res2b = svds(A, k, v0=v0b, solver=self.solver, random_state=3) assert_equal(res1b, res2b) _check_svds(A, k, *res1b) # with different v0, solutions can be numerically different message = "Arrays are not equal" with pytest.raises(AssertionError, match=message): assert_equal(res1a, res1b) def test_svd_random_state(self): # check that the `random_state` parameter affects the solution # Admittedly, `n` and `k` are chosen so that all solver pass all # these checks. That's a tall order, since LOBPCG doesn't want to # achieve the desired accuracy and ARPACK often returns the same # singular values/vectors for different v0. n = 100 k = 1 rng = np.random.default_rng(0) A = rng.random((n, n)) # with the same random_state, solutions are the same and accurate res1a = svds(A, k, solver=self.solver, random_state=0) res2a = svds(A, k, solver=self.solver, random_state=0) assert_equal(res1a, res2a) _check_svds(A, k, *res1a) # with the same random_state, solutions are the same and accurate res1b = svds(A, k, solver=self.solver, random_state=1) res2b = svds(A, k, solver=self.solver, random_state=1) assert_equal(res1b, res2b) _check_svds(A, k, *res1b) # with different random_state, solutions can be numerically different message = "Arrays are not equal" with pytest.raises(AssertionError, match=message): assert_equal(res1a, res1b) @pytest.mark.parametrize("random_state", (0, 1, np.random.RandomState(0), np.random.default_rng(0))) def test_svd_random_state_2(self, random_state): n = 100 k = 1 rng = np.random.default_rng(0) A = rng.random((n, n)) random_state_2 = copy.deepcopy(random_state) # with the same random_state, solutions are the same and accurate res1a = svds(A, k, solver=self.solver, random_state=random_state) res2a = svds(A, k, solver=self.solver, random_state=random_state_2) assert_equal(res1a, res2a) _check_svds(A, k, *res1a) @pytest.mark.parametrize("random_state", (None, np.random.RandomState(0), np.random.default_rng(0))) def test_svd_random_state_3(self, random_state): n = 100 k = 5 rng = np.random.default_rng(0) A = rng.random((n, n)) # random_state in different state produces accurate - but not # not necessarily identical - results res1a = svds(A, k, solver=self.solver, random_state=random_state) res2a = svds(A, k, solver=self.solver, random_state=random_state) _check_svds(A, k, *res1a, rtol=1e-6) _check_svds(A, k, *res2a, rtol=1e-6) message = "Arrays are not equal" with pytest.raises(AssertionError, match=message): assert_equal(res1a, res2a) def test_svd_maxiter(self): # check that maxiter works as expected: should not return accurate # solution after 1 iteration, but should with default `maxiter` A = hilbert(6) k = 1 u, s, vh = sorted_svd(A, k) if self.solver == 'arpack': message = "ARPACK error -1: No convergence" with pytest.raises(ArpackNoConvergence, match=message): svds(A, k, ncv=3, maxiter=1, solver=self.solver) elif self.solver == 'lobpcg': message = "Not equal to tolerance" with pytest.raises(AssertionError, match=message): u2, s2, vh2 = svds(A, k, maxiter=1, solver=self.solver) assert_allclose(np.abs(u2), np.abs(u)) elif self.solver == 'propack': message = "k=1 singular triplets did not converge within" with pytest.raises(np.linalg.LinAlgError, match=message): svds(A, k, maxiter=1, solver=self.solver) u, s, vh = svds(A, k, solver=self.solver) # default maxiter _check_svds(A, k, u, s, vh) @pytest.mark.parametrize("rsv", (True, False, 'u', 'vh')) @pytest.mark.parametrize("shape", ((5, 7), (6, 6), (7, 5))) def test_svd_return_singular_vectors(self, rsv, shape): # check that the return_singular_vectors parameter works as expected rng = np.random.default_rng(0) A = rng.random(shape) k = 2 M, N = shape u, s, vh = sorted_svd(A, k) respect_u = True if self.solver == 'propack' else M <= N respect_vh = True if self.solver == 'propack' else M > N if rsv is False: s2 = svds(A, k, return_singular_vectors=rsv, solver=self.solver, random_state=rng) assert_allclose(s2, s) elif rsv == 'u' and respect_u: u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv, solver=self.solver, random_state=rng) assert_allclose(np.abs(u2), np.abs(u)) assert_allclose(s2, s) assert vh2 is None elif rsv == 'vh' and respect_vh: u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv, solver=self.solver, random_state=rng) assert u2 is None assert_allclose(s2, s) assert_allclose(np.abs(vh2), np.abs(vh)) else: u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv, solver=self.solver, random_state=rng) assert_allclose(np.abs(u2), np.abs(u)) assert_allclose(s2, s) assert_allclose(np.abs(vh2), np.abs(vh)) # --- Test Basic Functionality --- # Tests the accuracy of each solver for real and complex matrices provided # as list, dense array, sparse matrix, and LinearOperator. A1 = [[1, 2, 3], [3, 4, 3], [1 + 1j, 0, 2], [0, 0, 1]] A2 = [[1, 2, 3, 8 + 5j], [3 - 2j, 4, 3, 5], [1, 0, 2, 3], [0, 0, 1, 0]] @pytest.mark.parametrize('A', (A1, A2)) @pytest.mark.parametrize('k', range(1, 5)) # PROPACK fails a lot if @pytest.mark.parametrize('which', ("SM", "LM")) @pytest.mark.parametrize('real', (True, False)) @pytest.mark.parametrize('transpose', (False, True)) # In gh-14299, it was suggested the `svds` should _not_ work with lists @pytest.mark.parametrize('lo_type', (np.asarray, csc_matrix, aslinearoperator)) def test_svd_simple(self, A, k, real, transpose, lo_type): A = np.asarray(A) A = np.real(A) if real else A A = A.T if transpose else A A2 = lo_type(A) # could check for the appropriate errors, but that is tested above if k > min(A.shape): pytest.skip("`k` cannot be greater than `min(A.shape)`") if self.solver != 'propack' and k >= min(A.shape): pytest.skip("Only PROPACK supports complete SVD") if self.solver == 'arpack' and not real and k == min(A.shape) - 1: pytest.skip("ARPACK has additional restriction for complex dtype") u, s, vh = svds(A2, k, solver=self.solver) _check_svds(A, k, u, s, vh) def test_svd_linop(self): solver = self.solver nmks = [(6, 7, 3), (9, 5, 4), (10, 8, 5)] def reorder(args): U, s, VH = args j = np.argsort(s) return U[:, j], s[j], VH[j, :] for n, m, k in nmks: # Test svds on a LinearOperator. A = np.random.RandomState(52).randn(n, m) L = CheckingLinearOperator(A) if solver == 'propack': v0 = np.ones(n) else: v0 = np.ones(min(A.shape)) U1, s1, VH1 = reorder(svds(A, k, v0=v0, solver=solver)) U2, s2, VH2 = reorder(svds(L, k, v0=v0, solver=solver)) assert_allclose(np.abs(U1), np.abs(U2)) assert_allclose(s1, s2) assert_allclose(np.abs(VH1), np.abs(VH2)) assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)), np.dot(U2, np.dot(np.diag(s2), VH2))) # Try again with which="SM". A = np.random.RandomState(1909).randn(n, m) L = CheckingLinearOperator(A) # TODO: arpack crashes when v0=v0, which="SM" kwargs = {'v0': v0} if solver not in {None, 'arpack'} else {} U1, s1, VH1 = reorder(svds(A, k, which="SM", solver=solver, **kwargs)) U2, s2, VH2 = reorder(svds(L, k, which="SM", solver=solver, **kwargs)) assert_allclose(np.abs(U1), np.abs(U2)) assert_allclose(s1, s2) assert_allclose(np.abs(VH1), np.abs(VH2)) assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)), np.dot(U2, np.dot(np.diag(s2), VH2))) if k < min(n, m) - 1: # Complex input and explicit which="LM". for (dt, eps) in [(complex, 1e-7), (np.complex64, 1e-3)]: rng = np.random.RandomState(1648) A = (rng.randn(n, m) + 1j * rng.randn(n, m)).astype(dt) L = CheckingLinearOperator(A) U1, s1, VH1 = reorder(svds(A, k, which="LM", solver=solver)) U2, s2, VH2 = reorder(svds(L, k, which="LM", solver=solver)) assert_allclose(np.abs(U1), np.abs(U2), rtol=eps) assert_allclose(s1, s2, rtol=eps) assert_allclose(np.abs(VH1), np.abs(VH2), rtol=eps) assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)), np.dot(U2, np.dot(np.diag(s2), VH2)), rtol=eps) # --- Test Edge Cases --- # Checks a few edge cases. @pytest.mark.parametrize("shape", ((6, 5), (5, 5), (5, 6))) @pytest.mark.parametrize("dtype", (float, complex)) def test_svd_LM_ones_matrix(self, shape, dtype): # Check that svds can deal with matrix_rank less than k in LM mode. k = 3 n, m = shape A = np.ones((n, m), dtype=dtype) U, s, VH = svds(A, k, solver=self.solver) # Check some generic properties of svd. _check_svds(A, k, U, s, VH, check_usvh_A=True, check_svd=False) # Check that the largest singular value is near sqrt(n*m) # and the other singular values have been forced to zero. assert_allclose(np.max(s), np.sqrt(n*m)) assert_array_equal(sorted(s)[:-1], 0) @pytest.mark.parametrize("shape", ((3, 4), (4, 4), (4, 3), (4, 2))) @pytest.mark.parametrize("dtype", (float, complex)) def test_svd_LM_zeros_matrix(self, shape, dtype): # Check that svds can deal with matrices containing only zeros; # see https://github.com/scipy/scipy/issues/3452/ # shape = (4, 2) is included because it is the particular case # reported in the issue k = 1 n, m = shape A = np.zeros((n, m), dtype=dtype) if (self.solver == 'arpack' and dtype is complex and k == min(A.shape) - 1): pytest.skip("ARPACK has additional restriction for complex dtype") U, s, VH = svds(A, k, solver=self.solver) # Check some generic properties of svd. _check_svds(A, k, U, s, VH, check_usvh_A=True, check_svd=False) # Check that the singular values are zero. assert_array_equal(s, 0) # --- Perform tests with each solver --- class Test_SVDS_once(): @pytest.mark.parametrize("solver", ['ekki', object]) def test_svds_input_validation_solver(self, solver): message = "solver must be one of" with pytest.raises(ValueError, match=message): svds(np.ones((3, 4)), k=2, solver=solver) class Test_SVDS_ARPACK(SVDSCommonTests): def setup_method(self): self.solver = 'arpack' @pytest.mark.parametrize("ncv", list(range(-1, 8)) + [4.5, "5"]) def test_svds_input_validation_ncv_1(self, ncv): rng = np.random.default_rng(0) A = rng.random((6, 7)) k = 3 if ncv in {4, 5}: u, s, vh = svds(A, k=k, ncv=ncv, solver=self.solver) # partial decomposition, so don't check that u@diag(s)@vh=A; # do check that scipy.sparse.linalg.svds ~ scipy.linalg.svd _check_svds(A, k, u, s, vh) else: message = ("`ncv` must be an integer satisfying") with pytest.raises(ValueError, match=message): svds(A, k=k, ncv=ncv, solver=self.solver) def test_svds_input_validation_ncv_2(self): # I think the stack trace is reasonable when `ncv` can't be converted # to an int. message = "int() argument must be a" with pytest.raises(TypeError, match=re.escape(message)): svds(np.eye(10), ncv=[], solver=self.solver) message = "invalid literal for int()" with pytest.raises(ValueError, match=message): svds(np.eye(10), ncv="hi", solver=self.solver) # I can't see a robust relationship between `ncv` and relevant outputs # (e.g. accuracy, time), so no test of the parameter. class Test_SVDS_LOBPCG(SVDSCommonTests): def setup_method(self): self.solver = 'lobpcg' def test_svd_random_state_3(self): pytest.xfail("LOBPCG is having trouble with accuracy.") class Test_SVDS_PROPACK(SVDSCommonTests): def setup_method(self): self.solver = 'propack' def test_svd_LM_ones_matrix(self): message = ("PROPACK does not return orthonormal singular vectors " "associated with zero singular values.") # There are some other issues with this matrix of all ones, e.g. # `which='sm'` and `k=1` returns the largest singular value pytest.xfail(message) def test_svd_LM_zeros_matrix(self): message = ("PROPACK does not return orthonormal singular vectors " "associated with zero singular values.") pytest.xfail(message)
{ "content_hash": "698f92007167c268689e0eb4150bdaa6", "timestamp": "", "source": "github", "line_count": 637, "max_line_length": 79, "avg_line_length": 39.55102040816327, "alnum_prop": 0.5727157259665, "repo_name": "matthew-brett/scipy", "id": "cac2531a4767f95555f880f69255bcc2ad442add", "size": "25194", "binary": false, "copies": "1", "ref": "refs/heads/polished-meson-windows", "path": "scipy/sparse/linalg/_eigen/tests/test_svds.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "4818671" }, { "name": "C++", "bytes": "3181034" }, { "name": "CMake", "bytes": "29273" }, { "name": "Cython", "bytes": "1035101" }, { "name": "Dockerfile", "bytes": "9777" }, { "name": "Fortran", "bytes": "5298461" }, { "name": "MATLAB", "bytes": "4346" }, { "name": "Makefile", "bytes": "778" }, { "name": "Meson", "bytes": "133294" }, { "name": "PowerShell", "bytes": "1554" }, { "name": "Python", "bytes": "14259543" }, { "name": "Shell", "bytes": "4415" }, { "name": "TeX", "bytes": "52106" } ], "symlink_target": "" }
""" This module contains errors/exceptions and warnings of general use for astropy. Exceptions that are specific to a given subpackage should *not* be here, but rather in the particular subpackage. """ class AstropyWarning(Warning): """ The base warning class from which all Astropy warnings should inherit. Any warning inheriting from this class is handled by the Astropy logger. """ class AstropyUserWarning(UserWarning, AstropyWarning): """ The primary warning class for Astropy. Use this if you do not need a specific sub-class. """ class AstropyDeprecationWarning(AstropyWarning): """ A warning class to indicate a deprecated feature. """ class AstropyPendingDeprecationWarning(PendingDeprecationWarning, AstropyWarning): """ A warning class to indicate a soon-to-be deprecated feature. """ class AstropyBackwardsIncompatibleChangeWarning(AstropyWarning): """ A warning class indicating a change in astropy that is incompatible with previous versions. The suggested procedure is to issue this warning for the version in which the change occurs, and remove it for all following versions. """ class _NoValue: """Special keyword value. This class may be used as the default value assigned to a deprecated keyword in order to check if it has been given a user defined value. """ def __repr__(self): return 'astropy.utils.exceptions.NoValue' NoValue = _NoValue()
{ "content_hash": "4911a2b8e181c59bf7d9c4b731a55595", "timestamp": "", "source": "github", "line_count": 56, "max_line_length": 82, "avg_line_length": 26.642857142857142, "alnum_prop": 0.7238605898123325, "repo_name": "DougBurke/astropy", "id": "1b4ca299e079bf85e6ff9c28bb77dd875752a167", "size": "1556", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "astropy/utils/exceptions.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "367279" }, { "name": "C++", "bytes": "1057" }, { "name": "HTML", "bytes": "1172" }, { "name": "Python", "bytes": "8390850" }, { "name": "TeX", "bytes": "805" } ], "symlink_target": "" }
import numpy as np import multiprocessing as mp import sys global inputStr def grabLine(positions): global inputStr if positions[-1] != 'end': return inputStr[positions[0]:positions[1]] else: print inputStr[positions[0]:] return inputStr[positions[0]:] def wrap(inputStr='',width=60): if inputStr: positions = np.arange(0,len(inputStr),width) posFinal = [] for i in range(len(positions[:-1])): posFinal += [(positions[i],positions[i+1])] posFinal += [(positions[-1],'end')] print posFinal[0:10] if __name__ == '__main__': p=mp.Pool(processes=8) wrappedLines = p.map(grabLine,posFinal) p.close() p.join() #print wrappedLines[0:10] return '\n'.join(wrappedLines) else: return '' e = sys.argv try: inputStr=sys.argv[1] try: width = sys.argv[2] except: width = 60 except: inputStr = '' width = 60 wrap(inputStr,width)
{ "content_hash": "129d5dbc4ca2ec577580eabc130aa57f", "timestamp": "", "source": "github", "line_count": 44, "max_line_length": 55, "avg_line_length": 23.522727272727273, "alnum_prop": 0.5584541062801932, "repo_name": "jlevy44/Joshua-Levy-Synteny-Analysis", "id": "8975d116d1a426fea8c6f79a057555e1d450d102", "size": "1035", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "hybridumAnalysisScripts/splitZmays/wrap_Josh.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "632" }, { "name": "Jupyter Notebook", "bytes": "66306" }, { "name": "Python", "bytes": "397346" }, { "name": "Shell", "bytes": "646" } ], "symlink_target": "" }
from __future__ import absolute_import from itertools import izip import six from sentry.api.serializers import Serializer, register, serialize from sentry.constants import LOG_LEVELS from sentry.models import (GroupTombstone, User) @register(GroupTombstone) class GroupTombstoneSerializer(Serializer): def get_attrs(self, item_list, user): user_list = list(User.objects.filter(id__in=[item.actor_id for item in item_list])) users = {u.id: d for u, d in izip(user_list, serialize(user_list, user))} attrs = {} for item in item_list: attrs[item] = { 'user': users.get(item.actor_id, {}), } return attrs def serialize(self, obj, attrs, user): return { 'id': six.text_type(obj.id), 'level': LOG_LEVELS.get(obj.level, 'unknown'), 'message': obj.message, 'culprit': obj.culprit, 'type': obj.get_event_type(), 'actor': attrs.get('user'), }
{ "content_hash": "9885b0ced361b4936f85dc16ae42072d", "timestamp": "", "source": "github", "line_count": 32, "max_line_length": 91, "avg_line_length": 31.75, "alnum_prop": 0.6003937007874016, "repo_name": "jean/sentry", "id": "f33631fcae730aa5949c2a0d77b79cca9f2fda2e", "size": "1016", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/sentry/api/serializers/models/grouptombstone.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "296112" }, { "name": "HTML", "bytes": "314273" }, { "name": "JavaScript", "bytes": "1293918" }, { "name": "Lua", "bytes": "57158" }, { "name": "Makefile", "bytes": "6632" }, { "name": "Python", "bytes": "24515298" }, { "name": "Ruby", "bytes": "4410" }, { "name": "Shell", "bytes": "2942" } ], "symlink_target": "" }
from python.decorators import euler_timer from python.functions import get_data def translate(message, key): len_key = len(key) result = message[:] for i in range(len_key): for j in range(i, len(result), len_key): result[j] = result[j] ^ key[i] result = ''.join(chr(val) for val in result) return result def main(verbose=False): message = get_data(59).split(',') message = [int(char) for char in message] possible_keys = [] for ascii1 in range(97, 123): for ascii2 in range(97, 123): for ascii3 in range(97, 123): possible_keys.append([ascii1, ascii2, ascii3]) for key in possible_keys: curr = translate(message, key) if (curr.upper().find('THE') != -1 and curr.upper().find('IS') != -1 and curr.upper().find('AND') != -1 and curr.upper().find('OF') != -1 and curr.upper().find('ARE') != -1): break key_as_word = ''.join(chr(val) for val in key) result = '\n\nActual Message:\n%s\n\nThe key is: %s or %s.' % ( curr, key_as_word, key) if verbose: return '%s%s' % (sum(ord(letter) for letter in curr), result) else: return sum(ord(letter) for letter in curr) if __name__ == '__main__': print euler_timer(59)(main)(verbose=True)
{ "content_hash": "266d276efe1b3eb801faefa5b3500d04", "timestamp": "", "source": "github", "line_count": 47, "max_line_length": 69, "avg_line_length": 29.148936170212767, "alnum_prop": 0.5525547445255474, "repo_name": "dhermes/project-euler", "id": "ef235e9538233a52b4c430bdba8c1b013772cebc", "size": "1393", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/complete/no059.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Java", "bytes": "70228" }, { "name": "JavaScript", "bytes": "95607" }, { "name": "Python", "bytes": "270158" } ], "symlink_target": "" }