repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Khan/git-bigfile | vendor/boto/rds2/layer1.py | 76 | 158232 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.rds2 import exceptions
from boto.compat import json
class RDSConnection(AWSQueryConnection):
"""
Amazon Relational Database Service
Amazon Relational Database Service (Amazon RDS) is a web service
that makes it easier to set up, operate, and scale a relational
database in the cloud. It provides cost-efficient, resizable
capacity for an industry-standard relational database and manages
common database administration tasks, freeing up developers to
focus on what makes their applications and businesses unique.
Amazon RDS gives you access to the capabilities of a familiar
MySQL or Oracle database server. This means the code,
applications, and tools you already use today with your existing
MySQL or Oracle databases work with Amazon RDS without
modification. Amazon RDS automatically backs up your database and
maintains the database software that powers your DB instance.
Amazon RDS is flexible: you can scale your database instance's
compute resources and storage capacity to meet your application's
demand. As with all Amazon Web Services, there are no up-front
investments, and you pay only for the resources you use.
This is the Amazon RDS API Reference . It contains a comprehensive
description of all Amazon RDS Query APIs and data types. Note that
this API is asynchronous and some actions may require polling to
determine when an action has been applied. See the parameter
description to determine if a change is applied immediately or on
the next instance reboot or during the maintenance window. For
more information on Amazon RDS concepts and usage scenarios, go to
the `Amazon RDS User Guide`_.
"""
APIVersion = "2013-09-09"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "rds.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"InvalidSubnet": exceptions.InvalidSubnet,
"DBParameterGroupQuotaExceeded": exceptions.DBParameterGroupQuotaExceeded,
"DBSubnetGroupAlreadyExists": exceptions.DBSubnetGroupAlreadyExists,
"DBSubnetGroupQuotaExceeded": exceptions.DBSubnetGroupQuotaExceeded,
"InstanceQuotaExceeded": exceptions.InstanceQuotaExceeded,
"InvalidRestore": exceptions.InvalidRestore,
"InvalidDBParameterGroupState": exceptions.InvalidDBParameterGroupState,
"AuthorizationQuotaExceeded": exceptions.AuthorizationQuotaExceeded,
"DBSecurityGroupAlreadyExists": exceptions.DBSecurityGroupAlreadyExists,
"InsufficientDBInstanceCapacity": exceptions.InsufficientDBInstanceCapacity,
"ReservedDBInstanceQuotaExceeded": exceptions.ReservedDBInstanceQuotaExceeded,
"DBSecurityGroupNotFound": exceptions.DBSecurityGroupNotFound,
"DBInstanceAlreadyExists": exceptions.DBInstanceAlreadyExists,
"ReservedDBInstanceNotFound": exceptions.ReservedDBInstanceNotFound,
"DBSubnetGroupDoesNotCoverEnoughAZs": exceptions.DBSubnetGroupDoesNotCoverEnoughAZs,
"InvalidDBSecurityGroupState": exceptions.InvalidDBSecurityGroupState,
"InvalidVPCNetworkState": exceptions.InvalidVPCNetworkState,
"ReservedDBInstancesOfferingNotFound": exceptions.ReservedDBInstancesOfferingNotFound,
"SNSTopicArnNotFound": exceptions.SNSTopicArnNotFound,
"SNSNoAuthorization": exceptions.SNSNoAuthorization,
"SnapshotQuotaExceeded": exceptions.SnapshotQuotaExceeded,
"OptionGroupQuotaExceeded": exceptions.OptionGroupQuotaExceeded,
"DBParameterGroupNotFound": exceptions.DBParameterGroupNotFound,
"SNSInvalidTopic": exceptions.SNSInvalidTopic,
"InvalidDBSubnetGroupState": exceptions.InvalidDBSubnetGroupState,
"DBSubnetGroupNotFound": exceptions.DBSubnetGroupNotFound,
"InvalidOptionGroupState": exceptions.InvalidOptionGroupState,
"SourceNotFound": exceptions.SourceNotFound,
"SubscriptionCategoryNotFound": exceptions.SubscriptionCategoryNotFound,
"EventSubscriptionQuotaExceeded": exceptions.EventSubscriptionQuotaExceeded,
"DBSecurityGroupNotSupported": exceptions.DBSecurityGroupNotSupported,
"InvalidEventSubscriptionState": exceptions.InvalidEventSubscriptionState,
"InvalidDBSubnetState": exceptions.InvalidDBSubnetState,
"InvalidDBSnapshotState": exceptions.InvalidDBSnapshotState,
"SubscriptionAlreadyExist": exceptions.SubscriptionAlreadyExist,
"DBSecurityGroupQuotaExceeded": exceptions.DBSecurityGroupQuotaExceeded,
"ProvisionedIopsNotAvailableInAZ": exceptions.ProvisionedIopsNotAvailableInAZ,
"AuthorizationNotFound": exceptions.AuthorizationNotFound,
"OptionGroupAlreadyExists": exceptions.OptionGroupAlreadyExists,
"SubscriptionNotFound": exceptions.SubscriptionNotFound,
"DBUpgradeDependencyFailure": exceptions.DBUpgradeDependencyFailure,
"PointInTimeRestoreNotEnabled": exceptions.PointInTimeRestoreNotEnabled,
"AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExists,
"DBSubnetQuotaExceeded": exceptions.DBSubnetQuotaExceeded,
"OptionGroupNotFound": exceptions.OptionGroupNotFound,
"DBParameterGroupAlreadyExists": exceptions.DBParameterGroupAlreadyExists,
"DBInstanceNotFound": exceptions.DBInstanceNotFound,
"ReservedDBInstanceAlreadyExists": exceptions.ReservedDBInstanceAlreadyExists,
"InvalidDBInstanceState": exceptions.InvalidDBInstanceState,
"DBSnapshotNotFound": exceptions.DBSnapshotNotFound,
"DBSnapshotAlreadyExists": exceptions.DBSnapshotAlreadyExists,
"StorageQuotaExceeded": exceptions.StorageQuotaExceeded,
"SubnetAlreadyInUse": exceptions.SubnetAlreadyInUse,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(RDSConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def add_source_identifier_to_subscription(self, subscription_name,
source_identifier):
"""
Adds a source identifier to an existing RDS event notification
subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to add a source identifier to.
:type source_identifier: string
:param source_identifier:
The identifier of the event source to be added. An identifier must
begin with a letter and must contain only ASCII letters, digits,
and hyphens; it cannot end with a hyphen or contain two consecutive
hyphens.
Constraints:
+ If the source type is a DB instance, then a `DBInstanceIdentifier`
must be supplied.
+ If the source type is a DB security group, a `DBSecurityGroupName`
must be supplied.
+ If the source type is a DB parameter group, a `DBParameterGroupName`
must be supplied.
+ If the source type is a DB snapshot, a `DBSnapshotIdentifier` must be
supplied.
"""
params = {
'SubscriptionName': subscription_name,
'SourceIdentifier': source_identifier,
}
return self._make_request(
action='AddSourceIdentifierToSubscription',
verb='POST',
path='/', params=params)
def add_tags_to_resource(self, resource_name, tags):
"""
Adds metadata tags to an Amazon RDS resource. These tags can
also be used with cost allocation reporting to track cost
associated with Amazon RDS resources, or used in Condition
statement in IAM policy for Amazon RDS.
For an overview on tagging Amazon RDS resources, see `Tagging
Amazon RDS Resources`_.
:type resource_name: string
:param resource_name: The Amazon RDS resource the tags will be added
to. This value is an Amazon Resource Name (ARN). For information
about creating an ARN, see ` Constructing an RDS Amazon Resource
Name (ARN)`_.
:type tags: list
:param tags: The tags to be assigned to the Amazon RDS resource.
"""
params = {'ResourceName': resource_name, }
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='AddTagsToResource',
verb='POST',
path='/', params=params)
def authorize_db_security_group_ingress(self, db_security_group_name,
cidrip=None,
ec2_security_group_name=None,
ec2_security_group_id=None,
ec2_security_group_owner_id=None):
"""
Enables ingress to a DBSecurityGroup using one of two forms of
authorization. First, EC2 or VPC security groups can be added
to the DBSecurityGroup if the application using the database
is running on EC2 or VPC instances. Second, IP ranges are
available if the application accessing your database is
running on the Internet. Required parameters for this API are
one of CIDR range, EC2SecurityGroupId for VPC, or
(EC2SecurityGroupOwnerId and either EC2SecurityGroupName or
EC2SecurityGroupId for non-VPC).
You cannot authorize ingress from an EC2 security group in one
Region to an Amazon RDS DB instance in another. You cannot
authorize ingress from a VPC security group in one VPC to an
Amazon RDS DB instance in another.
For an overview of CIDR ranges, go to the `Wikipedia
Tutorial`_.
:type db_security_group_name: string
:param db_security_group_name: The name of the DB security group to add
authorization to.
:type cidrip: string
:param cidrip: The IP range to authorize.
:type ec2_security_group_name: string
:param ec2_security_group_name: Name of the EC2 security group to
authorize. For VPC DB security groups, `EC2SecurityGroupId` must be
provided. Otherwise, EC2SecurityGroupOwnerId and either
`EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
:type ec2_security_group_id: string
:param ec2_security_group_id: Id of the EC2 security group to
authorize. For VPC DB security groups, `EC2SecurityGroupId` must be
provided. Otherwise, EC2SecurityGroupOwnerId and either
`EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: AWS Account Number of the owner of
the EC2 security group specified in the EC2SecurityGroupName
parameter. The AWS Access Key ID is not an acceptable value. For
VPC DB security groups, `EC2SecurityGroupId` must be provided.
Otherwise, EC2SecurityGroupOwnerId and either
`EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
"""
params = {'DBSecurityGroupName': db_security_group_name, }
if cidrip is not None:
params['CIDRIP'] = cidrip
if ec2_security_group_name is not None:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_id is not None:
params['EC2SecurityGroupId'] = ec2_security_group_id
if ec2_security_group_owner_id is not None:
params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
return self._make_request(
action='AuthorizeDBSecurityGroupIngress',
verb='POST',
path='/', params=params)
def copy_db_snapshot(self, source_db_snapshot_identifier,
target_db_snapshot_identifier, tags=None):
"""
Copies the specified DBSnapshot. The source DBSnapshot must be
in the "available" state.
:type source_db_snapshot_identifier: string
:param source_db_snapshot_identifier: The identifier for the source DB
snapshot.
Constraints:
+ Must be the identifier for a valid system snapshot in the "available"
state.
Example: `rds:mydb-2012-04-02-00-01`
:type target_db_snapshot_identifier: string
:param target_db_snapshot_identifier: The identifier for the copied
snapshot.
Constraints:
+ Cannot be null, empty, or blank
+ Must contain from 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `my-db-snapshot`
:type tags: list
:param tags: A list of tags.
"""
params = {
'SourceDBSnapshotIdentifier': source_db_snapshot_identifier,
'TargetDBSnapshotIdentifier': target_db_snapshot_identifier,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CopyDBSnapshot',
verb='POST',
path='/', params=params)
def create_db_instance(self, db_instance_identifier, allocated_storage,
db_instance_class, engine, master_username,
master_user_password, db_name=None,
db_security_groups=None,
vpc_security_group_ids=None,
availability_zone=None, db_subnet_group_name=None,
preferred_maintenance_window=None,
db_parameter_group_name=None,
backup_retention_period=None,
preferred_backup_window=None, port=None,
multi_az=None, engine_version=None,
auto_minor_version_upgrade=None,
license_model=None, iops=None,
option_group_name=None, character_set_name=None,
publicly_accessible=None, tags=None):
"""
Creates a new DB instance.
:type db_name: string
:param db_name: The meaning of this parameter differs according to the
database engine you use.
**MySQL**
The name of the database to create when the DB instance is created. If
this parameter is not specified, no database is created in the DB
instance.
Constraints:
+ Must contain 1 to 64 alphanumeric characters
+ Cannot be a word reserved by the specified database engine
Type: String
**Oracle**
The Oracle System ID (SID) of the created DB instance.
Default: `ORCL`
Constraints:
+ Cannot be longer than 8 characters
**SQL Server**
Not applicable. Must be null.
:type db_instance_identifier: string
:param db_instance_identifier: The DB instance identifier. This
parameter is stored as a lowercase string.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15
for SQL Server).
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
Example: `mydbinstance`
:type allocated_storage: integer
:param allocated_storage: The amount of storage (in gigabytes) to be
initially allocated for the database instance.
**MySQL**
Constraints: Must be an integer from 5 to 1024.
Type: Integer
**Oracle**
Constraints: Must be an integer from 10 to 1024.
**SQL Server**
Constraints: Must be an integer from 200 to 1024 (Standard Edition and
Enterprise Edition) or from 30 to 1024 (Express Edition and Web
Edition)
:type db_instance_class: string
:param db_instance_class: The compute and memory capacity of the DB
instance.
Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge`
:type engine: string
:param engine: The name of the database engine to be used for this
instance.
Valid Values: `MySQL` | `oracle-se1` | `oracle-se` | `oracle-ee` |
`sqlserver-ee` | `sqlserver-se` | `sqlserver-ex` | `sqlserver-web`
:type master_username: string
:param master_username:
The name of master user for the client DB instance.
**MySQL**
Constraints:
+ Must be 1 to 16 alphanumeric characters.
+ First character must be a letter.
+ Cannot be a reserved word for the chosen database engine.
Type: String
**Oracle**
Constraints:
+ Must be 1 to 30 alphanumeric characters.
+ First character must be a letter.
+ Cannot be a reserved word for the chosen database engine.
**SQL Server**
Constraints:
+ Must be 1 to 128 alphanumeric characters.
+ First character must be a letter.
+ Cannot be a reserved word for the chosen database engine.
:type master_user_password: string
:param master_user_password: The password for the master database user.
Can be any printable ASCII character except "/", '"', or "@".
Type: String
**MySQL**
Constraints: Must contain from 8 to 41 characters.
**Oracle**
Constraints: Must contain from 8 to 30 characters.
**SQL Server**
Constraints: Must contain from 8 to 128 characters.
:type db_security_groups: list
:param db_security_groups: A list of DB security groups to associate
with this DB instance.
Default: The default DB security group for the database engine.
:type vpc_security_group_ids: list
:param vpc_security_group_ids: A list of EC2 VPC security groups to
associate with this DB instance.
Default: The default EC2 VPC security group for the DB subnet group's
VPC.
:type availability_zone: string
:param availability_zone: The EC2 Availability Zone that the database
instance will be created in.
Default: A random, system-chosen Availability Zone in the endpoint's
region.
Example: `us-east-1d`
Constraint: The AvailabilityZone parameter cannot be specified if the
MultiAZ parameter is set to `True`. The specified Availability Zone
must be in the same region as the current endpoint.
:type db_subnet_group_name: string
:param db_subnet_group_name: A DB subnet group to associate with this
DB instance.
If there is no DB subnet group, then it is a non-VPC DB instance.
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which system maintenance can occur.
Format: `ddd:hh24:mi-ddd:hh24:mi`
Default: A 30-minute window selected at random from an 8-hour block of
time per region, occurring on a random day of the week. To see the
time blocks available, see ` Adjusting the Preferred Maintenance
Window`_ in the Amazon RDS User Guide.
Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun
Constraints: Minimum 30-minute window.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group to associate with this DB instance.
If this argument is omitted, the default DBParameterGroup for the
specified engine will be used.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type backup_retention_period: integer
:param backup_retention_period:
The number of days for which automated backups are retained. Setting
this parameter to a positive number enables backups. Setting this
parameter to 0 disables automated backups.
Default: 1
Constraints:
+ Must be a value from 0 to 8
+ Cannot be set to 0 if the DB instance is a master instance with read
replicas
:type preferred_backup_window: string
:param preferred_backup_window: The daily time range during which
automated backups are created if automated backups are enabled,
using the `BackupRetentionPeriod` parameter.
Default: A 30-minute window selected at random from an 8-hour block of
time per region. See the Amazon RDS User Guide for the time blocks
for each region from which the default backup windows are assigned.
Constraints: Must be in the format `hh24:mi-hh24:mi`. Times should be
Universal Time Coordinated (UTC). Must not conflict with the
preferred maintenance window. Must be at least 30 minutes.
:type port: integer
:param port: The port number on which the database accepts connections.
**MySQL**
Default: `3306`
Valid Values: `1150-65535`
Type: Integer
**Oracle**
Default: `1521`
Valid Values: `1150-65535`
**SQL Server**
Default: `1433`
Valid Values: `1150-65535` except for `1434` and `3389`.
:type multi_az: boolean
:param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
You cannot set the AvailabilityZone parameter if the MultiAZ
parameter is set to true.
:type engine_version: string
:param engine_version: The version number of the database engine to
use.
**MySQL**
Example: `5.1.42`
Type: String
**Oracle**
Example: `11.2.0.2.v2`
Type: String
**SQL Server**
Example: `10.50.2789.0.v1`
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor engine upgrades
will be applied automatically to the DB instance during the
maintenance window.
Default: `True`
:type license_model: string
:param license_model: License model information for this DB instance.
Valid values: `license-included` | `bring-your-own-license` | `general-
public-license`
:type iops: integer
:param iops: The amount of Provisioned IOPS (input/output operations
per second) to be initially allocated for the DB instance.
Constraints: Must be an integer greater than 1000.
:type option_group_name: string
:param option_group_name: Indicates that the DB instance should be
associated with the specified option group.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type character_set_name: string
:param character_set_name: For supported engines, indicates that the DB
instance should be associated with the specified CharacterSet.
:type publicly_accessible: boolean
:param publicly_accessible: Specifies the accessibility options for the
DB instance. A value of true specifies an Internet-facing instance
with a publicly resolvable DNS name, which resolves to a public IP
address. A value of false specifies an internal instance with a DNS
name that resolves to a private IP address.
Default: The default behavior varies depending on whether a VPC has
been requested or not. The following list shows the default
behavior in each case.
+ **Default VPC:**true
+ **VPC:**false
If no DB subnet group has been specified as part of the request and the
PubliclyAccessible value has not been set, the DB instance will be
publicly accessible. If a specific DB subnet group has been
specified as part of the request and the PubliclyAccessible value
has not been set, the DB instance will be private.
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBInstanceIdentifier': db_instance_identifier,
'AllocatedStorage': allocated_storage,
'DBInstanceClass': db_instance_class,
'Engine': engine,
'MasterUsername': master_username,
'MasterUserPassword': master_user_password,
}
if db_name is not None:
params['DBName'] = db_name
if db_security_groups is not None:
self.build_list_params(params,
db_security_groups,
'DBSecurityGroups.member')
if vpc_security_group_ids is not None:
self.build_list_params(params,
vpc_security_group_ids,
'VpcSecurityGroupIds.member')
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if db_parameter_group_name is not None:
params['DBParameterGroupName'] = db_parameter_group_name
if backup_retention_period is not None:
params['BackupRetentionPeriod'] = backup_retention_period
if preferred_backup_window is not None:
params['PreferredBackupWindow'] = preferred_backup_window
if port is not None:
params['Port'] = port
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if engine_version is not None:
params['EngineVersion'] = engine_version
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if license_model is not None:
params['LicenseModel'] = license_model
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if character_set_name is not None:
params['CharacterSetName'] = character_set_name
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBInstance',
verb='POST',
path='/', params=params)
def create_db_instance_read_replica(self, db_instance_identifier,
source_db_instance_identifier,
db_instance_class=None,
availability_zone=None, port=None,
auto_minor_version_upgrade=None,
iops=None, option_group_name=None,
publicly_accessible=None, tags=None):
"""
Creates a DB instance that acts as a read replica of a source
DB instance.
All read replica DB instances are created as Single-AZ
deployments with backups disabled. All other DB instance
attributes (including DB security groups and DB parameter
groups) are inherited from the source DB instance, except as
specified below.
The source DB instance must have backup retention enabled.
:type db_instance_identifier: string
:param db_instance_identifier: The DB instance identifier of the read
replica. This is the unique key that identifies a DB instance. This
parameter is stored as a lowercase string.
:type source_db_instance_identifier: string
:param source_db_instance_identifier: The identifier of the DB instance
that will act as the source for the read replica. Each DB instance
can have up to five read replicas.
Constraints: Must be the identifier of an existing DB instance that is
not already a read replica DB instance.
:type db_instance_class: string
:param db_instance_class: The compute and memory capacity of the read
replica.
Valid Values: `db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge
| db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge`
Default: Inherits from the source DB instance.
:type availability_zone: string
:param availability_zone: The Amazon EC2 Availability Zone that the
read replica will be created in.
Default: A random, system-chosen Availability Zone in the endpoint's
region.
Example: `us-east-1d`
:type port: integer
:param port: The port number that the DB instance uses for connections.
Default: Inherits from the source DB instance
Valid Values: `1150-65535`
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor engine upgrades
will be applied automatically to the read replica during the
maintenance window.
Default: Inherits from the source DB instance
:type iops: integer
:param iops: The amount of Provisioned IOPS (input/output operations
per second) to be initially allocated for the DB instance.
:type option_group_name: string
:param option_group_name: The option group the DB instance will be
associated with. If omitted, the default option group for the
engine specified will be used.
:type publicly_accessible: boolean
:param publicly_accessible: Specifies the accessibility options for the
DB instance. A value of true specifies an Internet-facing instance
with a publicly resolvable DNS name, which resolves to a public IP
address. A value of false specifies an internal instance with a DNS
name that resolves to a private IP address.
Default: The default behavior varies depending on whether a VPC has
been requested or not. The following list shows the default
behavior in each case.
+ **Default VPC:**true
+ **VPC:**false
If no DB subnet group has been specified as part of the request and the
PubliclyAccessible value has not been set, the DB instance will be
publicly accessible. If a specific DB subnet group has been
specified as part of the request and the PubliclyAccessible value
has not been set, the DB instance will be private.
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBInstanceIdentifier': db_instance_identifier,
'SourceDBInstanceIdentifier': source_db_instance_identifier,
}
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if port is not None:
params['Port'] = port
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBInstanceReadReplica',
verb='POST',
path='/', params=params)
def create_db_parameter_group(self, db_parameter_group_name,
db_parameter_group_family, description,
tags=None):
"""
Creates a new DB parameter group.
A DB parameter group is initially created with the default
parameters for the database engine used by the DB instance. To
provide custom values for any of the parameters, you must
modify the group after creating it using
ModifyDBParameterGroup . Once you've created a DB parameter
group, you need to associate it with your DB instance using
ModifyDBInstance . When you associate a new DB parameter group
with a running DB instance, you need to reboot the DB Instance
for the new DB parameter group and associated settings to take
effect.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
This value is stored as a lower-case string.
:type db_parameter_group_family: string
:param db_parameter_group_family: The DB parameter group family name. A
DB parameter group can be associated with one and only one DB
parameter group family, and can be applied only to a DB instance
running a database engine and engine version compatible with that
DB parameter group family.
:type description: string
:param description: The description for the DB parameter group.
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBParameterGroupName': db_parameter_group_name,
'DBParameterGroupFamily': db_parameter_group_family,
'Description': description,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBParameterGroup',
verb='POST',
path='/', params=params)
def create_db_security_group(self, db_security_group_name,
db_security_group_description, tags=None):
"""
Creates a new DB security group. DB security groups control
access to a DB instance.
:type db_security_group_name: string
:param db_security_group_name: The name for the DB security group. This
value is stored as a lowercase string.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
+ Must not be "Default"
+ May not contain spaces
Example: `mysecuritygroup`
:type db_security_group_description: string
:param db_security_group_description: The description for the DB
security group.
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBSecurityGroupName': db_security_group_name,
'DBSecurityGroupDescription': db_security_group_description,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBSecurityGroup',
verb='POST',
path='/', params=params)
def create_db_snapshot(self, db_snapshot_identifier,
db_instance_identifier, tags=None):
"""
Creates a DBSnapshot. The source DBInstance must be in
"available" state.
:type db_snapshot_identifier: string
:param db_snapshot_identifier: The identifier for the DB snapshot.
Constraints:
+ Cannot be null, empty, or blank
+ Must contain from 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `my-snapshot-id`
:type db_instance_identifier: string
:param db_instance_identifier:
The DB instance identifier. This is the unique key that identifies a DB
instance. This parameter isn't case sensitive.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBSnapshotIdentifier': db_snapshot_identifier,
'DBInstanceIdentifier': db_instance_identifier,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBSnapshot',
verb='POST',
path='/', params=params)
def create_db_subnet_group(self, db_subnet_group_name,
db_subnet_group_description, subnet_ids,
tags=None):
"""
Creates a new DB subnet group. DB subnet groups must contain
at least one subnet in at least two AZs in the region.
:type db_subnet_group_name: string
:param db_subnet_group_name: The name for the DB subnet group. This
value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters or
hyphens. Must not be "Default".
Example: `mySubnetgroup`
:type db_subnet_group_description: string
:param db_subnet_group_description: The description for the DB subnet
group.
:type subnet_ids: list
:param subnet_ids: The EC2 Subnet IDs for the DB subnet group.
:type tags: list
:param tags: A list of tags into tuples.
"""
params = {
'DBSubnetGroupName': db_subnet_group_name,
'DBSubnetGroupDescription': db_subnet_group_description,
}
self.build_list_params(params,
subnet_ids,
'SubnetIds.member')
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateDBSubnetGroup',
verb='POST',
path='/', params=params)
def create_event_subscription(self, subscription_name, sns_topic_arn,
source_type=None, event_categories=None,
source_ids=None, enabled=None, tags=None):
"""
Creates an RDS event notification subscription. This action
requires a topic ARN (Amazon Resource Name) created by either
the RDS console, the SNS console, or the SNS API. To obtain an
ARN with SNS, you must create a topic in Amazon SNS and
subscribe to the topic. The ARN is displayed in the SNS
console.
You can specify the type of source (SourceType) you want to be
notified of, provide a list of RDS sources (SourceIds) that
triggers the events, and provide a list of event categories
(EventCategories) for events you want to be notified of. For
example, you can specify SourceType = db-instance, SourceIds =
mydbinstance1, mydbinstance2 and EventCategories =
Availability, Backup.
If you specify both the SourceType and SourceIds, such as
SourceType = db-instance and SourceIdentifier = myDBInstance1,
you will be notified of all the db-instance events for the
specified source. If you specify a SourceType but do not
specify a SourceIdentifier, you will receive notice of the
events for that source type for all your RDS sources. If you
do not specify either the SourceType nor the SourceIdentifier,
you will be notified of events generated from all RDS sources
belonging to your customer account.
:type subscription_name: string
:param subscription_name: The name of the subscription.
Constraints: The name must be less than 255 characters.
:type sns_topic_arn: string
:param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic
created for event notification. The ARN is created by Amazon SNS
when you create a topic and subscribe to it.
:type source_type: string
:param source_type: The type of source that will be generating the
events. For example, if you want to be notified of events generated
by a DB instance, you would set this parameter to db-instance. if
this value is not specified, all events are returned.
Valid values: db-instance | db-parameter-group | db-security-group |
db-snapshot
:type event_categories: list
:param event_categories: A list of event categories for a SourceType
that you want to subscribe to. You can see a list of the categories
for a given SourceType in the `Events`_ topic in the Amazon RDS
User Guide or by using the **DescribeEventCategories** action.
:type source_ids: list
:param source_ids:
The list of identifiers of the event sources for which events will be
returned. If not specified, then all sources are included in the
response. An identifier must begin with a letter and must contain
only ASCII letters, digits, and hyphens; it cannot end with a
hyphen or contain two consecutive hyphens.
Constraints:
+ If SourceIds are supplied, SourceType must also be provided.
+ If the source type is a DB instance, then a `DBInstanceIdentifier`
must be supplied.
+ If the source type is a DB security group, a `DBSecurityGroupName`
must be supplied.
+ If the source type is a DB parameter group, a `DBParameterGroupName`
must be supplied.
+ If the source type is a DB snapshot, a `DBSnapshotIdentifier` must be
supplied.
:type enabled: boolean
:param enabled: A Boolean value; set to **true** to activate the
subscription, set to **false** to create the subscription but not
active it.
:type tags: list
:param tags: A list of tags.
"""
params = {
'SubscriptionName': subscription_name,
'SnsTopicArn': sns_topic_arn,
}
if source_type is not None:
params['SourceType'] = source_type
if event_categories is not None:
self.build_list_params(params,
event_categories,
'EventCategories.member')
if source_ids is not None:
self.build_list_params(params,
source_ids,
'SourceIds.member')
if enabled is not None:
params['Enabled'] = str(
enabled).lower()
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateEventSubscription',
verb='POST',
path='/', params=params)
def create_option_group(self, option_group_name, engine_name,
major_engine_version, option_group_description,
tags=None):
"""
Creates a new option group. You can create up to 20 option
groups.
:type option_group_name: string
:param option_group_name: Specifies the name of the option group to be
created.
Constraints:
+ Must be 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `myoptiongroup`
:type engine_name: string
:param engine_name: Specifies the name of the engine that this option
group should be associated with.
:type major_engine_version: string
:param major_engine_version: Specifies the major version of the engine
that this option group should be associated with.
:type option_group_description: string
:param option_group_description: The description of the option group.
:type tags: list
:param tags: A list of tags.
"""
params = {
'OptionGroupName': option_group_name,
'EngineName': engine_name,
'MajorEngineVersion': major_engine_version,
'OptionGroupDescription': option_group_description,
}
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='CreateOptionGroup',
verb='POST',
path='/', params=params)
def delete_db_instance(self, db_instance_identifier,
skip_final_snapshot=None,
final_db_snapshot_identifier=None):
"""
The DeleteDBInstance action deletes a previously provisioned
DB instance. A successful response from the web service
indicates the request was received correctly. When you delete
a DB instance, all automated backups for that instance are
deleted and cannot be recovered. Manual DB snapshots of the DB
instance to be deleted are not deleted.
If a final DB snapshot is requested the status of the RDS
instance will be "deleting" until the DB snapshot is created.
The API action `DescribeDBInstance` is used to monitor the
status of this operation. The action cannot be canceled or
reverted once submitted.
:type db_instance_identifier: string
:param db_instance_identifier:
The DB instance identifier for the DB instance to be deleted. This
parameter isn't case sensitive.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type skip_final_snapshot: boolean
:param skip_final_snapshot: Determines whether a final DB snapshot is
created before the DB instance is deleted. If `True` is specified,
no DBSnapshot is created. If false is specified, a DB snapshot is
created before the DB instance is deleted.
The FinalDBSnapshotIdentifier parameter must be specified if
SkipFinalSnapshot is `False`.
Default: `False`
:type final_db_snapshot_identifier: string
:param final_db_snapshot_identifier:
The DBSnapshotIdentifier of the new DBSnapshot created when
SkipFinalSnapshot is set to `False`.
Specifying this parameter and also setting the SkipFinalShapshot
parameter to true results in an error.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if skip_final_snapshot is not None:
params['SkipFinalSnapshot'] = str(
skip_final_snapshot).lower()
if final_db_snapshot_identifier is not None:
params['FinalDBSnapshotIdentifier'] = final_db_snapshot_identifier
return self._make_request(
action='DeleteDBInstance',
verb='POST',
path='/', params=params)
def delete_db_parameter_group(self, db_parameter_group_name):
"""
Deletes a specified DBParameterGroup. The DBParameterGroup
cannot be associated with any RDS instances to be deleted.
The specified DB parameter group cannot be associated with any
DB instances.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group.
Constraints:
+ Must be the name of an existing DB parameter group
+ You cannot delete a default DB parameter group
+ Cannot be associated with any DB instances
"""
params = {'DBParameterGroupName': db_parameter_group_name, }
return self._make_request(
action='DeleteDBParameterGroup',
verb='POST',
path='/', params=params)
def delete_db_security_group(self, db_security_group_name):
"""
Deletes a DB security group.
The specified DB security group must not be associated with
any DB instances.
:type db_security_group_name: string
:param db_security_group_name:
The name of the DB security group to delete.
You cannot delete the default DB security group.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
+ Must not be "Default"
+ May not contain spaces
"""
params = {'DBSecurityGroupName': db_security_group_name, }
return self._make_request(
action='DeleteDBSecurityGroup',
verb='POST',
path='/', params=params)
def delete_db_snapshot(self, db_snapshot_identifier):
"""
Deletes a DBSnapshot.
The DBSnapshot must be in the `available` state to be deleted.
:type db_snapshot_identifier: string
:param db_snapshot_identifier: The DBSnapshot identifier.
Constraints: Must be the name of an existing DB snapshot in the
`available` state.
"""
params = {'DBSnapshotIdentifier': db_snapshot_identifier, }
return self._make_request(
action='DeleteDBSnapshot',
verb='POST',
path='/', params=params)
def delete_db_subnet_group(self, db_subnet_group_name):
"""
Deletes a DB subnet group.
The specified database subnet group must not be associated
with any DB instances.
:type db_subnet_group_name: string
:param db_subnet_group_name:
The name of the database subnet group to delete.
You cannot delete the default subnet group.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
"""
params = {'DBSubnetGroupName': db_subnet_group_name, }
return self._make_request(
action='DeleteDBSubnetGroup',
verb='POST',
path='/', params=params)
def delete_event_subscription(self, subscription_name):
"""
Deletes an RDS event notification subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to delete.
"""
params = {'SubscriptionName': subscription_name, }
return self._make_request(
action='DeleteEventSubscription',
verb='POST',
path='/', params=params)
def delete_option_group(self, option_group_name):
"""
Deletes an existing option group.
:type option_group_name: string
:param option_group_name:
The name of the option group to be deleted.
You cannot delete default option groups.
"""
params = {'OptionGroupName': option_group_name, }
return self._make_request(
action='DeleteOptionGroup',
verb='POST',
path='/', params=params)
def describe_db_engine_versions(self, engine=None, engine_version=None,
db_parameter_group_family=None,
max_records=None, marker=None,
default_only=None,
list_supported_character_sets=None):
"""
Returns a list of the available DB engines.
:type engine: string
:param engine: The database engine to return.
:type engine_version: string
:param engine_version: The database engine version to return.
Example: `5.1.49`
:type db_parameter_group_family: string
:param db_parameter_group_family:
The name of a specific DB parameter group family to return details for.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more than the `MaxRecords` value is available, a
pagination token called a marker is included in the response so
that the following results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
request. If this parameter is specified, the response includes only
records beyond the marker, up to the value specified by
`MaxRecords`.
:type default_only: boolean
:param default_only: Indicates that only the default version of the
specified engine or engine and major version combination is
returned.
:type list_supported_character_sets: boolean
:param list_supported_character_sets: If this parameter is specified,
and if the requested engine supports the CharacterSetName parameter
for CreateDBInstance, the response includes a list of supported
character sets for each engine version.
"""
params = {}
if engine is not None:
params['Engine'] = engine
if engine_version is not None:
params['EngineVersion'] = engine_version
if db_parameter_group_family is not None:
params['DBParameterGroupFamily'] = db_parameter_group_family
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
if default_only is not None:
params['DefaultOnly'] = str(
default_only).lower()
if list_supported_character_sets is not None:
params['ListSupportedCharacterSets'] = str(
list_supported_character_sets).lower()
return self._make_request(
action='DescribeDBEngineVersions',
verb='POST',
path='/', params=params)
def describe_db_instances(self, db_instance_identifier=None,
filters=None, max_records=None, marker=None):
"""
Returns information about provisioned RDS instances. This API
supports pagination.
:type db_instance_identifier: string
:param db_instance_identifier:
The user-supplied instance identifier. If this parameter is specified,
information from only the specific DB instance is returned. This
parameter isn't case sensitive.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeDBInstances request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords` .
"""
params = {}
if db_instance_identifier is not None:
params['DBInstanceIdentifier'] = db_instance_identifier
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBInstances',
verb='POST',
path='/', params=params)
def describe_db_log_files(self, db_instance_identifier,
filename_contains=None, file_last_written=None,
file_size=None, max_records=None, marker=None):
"""
Returns a list of DB log files for the DB instance.
:type db_instance_identifier: string
:param db_instance_identifier:
The customer-assigned name of the DB instance that contains the log
files you want to list.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type filename_contains: string
:param filename_contains: Filters the available log files for log file
names that contain the specified string.
:type file_last_written: long
:param file_last_written: Filters the available log files for files
written since the specified date, in POSIX timestamp format.
:type file_size: long
:param file_size: Filters the available log files for files larger than
the specified size.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified MaxRecords
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
:type marker: string
:param marker: The pagination token provided in the previous request.
If this parameter is specified the response includes only records
beyond the marker, up to MaxRecords.
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if filename_contains is not None:
params['FilenameContains'] = filename_contains
if file_last_written is not None:
params['FileLastWritten'] = file_last_written
if file_size is not None:
params['FileSize'] = file_size
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBLogFiles',
verb='POST',
path='/', params=params)
def describe_db_parameter_groups(self, db_parameter_group_name=None,
filters=None, max_records=None,
marker=None):
"""
Returns a list of `DBParameterGroup` descriptions. If a
`DBParameterGroupName` is specified, the list will contain
only the description of the specified DB parameter group.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of a specific DB parameter group to return details for.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
`DescribeDBParameterGroups` request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords`.
"""
params = {}
if db_parameter_group_name is not None:
params['DBParameterGroupName'] = db_parameter_group_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBParameterGroups',
verb='POST',
path='/', params=params)
def describe_db_parameters(self, db_parameter_group_name, source=None,
max_records=None, marker=None):
"""
Returns the detailed parameter list for a particular DB
parameter group.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of a specific DB parameter group to return details for.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type source: string
:param source: The parameter types to return.
Default: All parameter types returned
Valid Values: `user | system | engine-default`
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
`DescribeDBParameters` request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {'DBParameterGroupName': db_parameter_group_name, }
if source is not None:
params['Source'] = source
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBParameters',
verb='POST',
path='/', params=params)
def describe_db_security_groups(self, db_security_group_name=None,
filters=None, max_records=None,
marker=None):
"""
Returns a list of `DBSecurityGroup` descriptions. If a
`DBSecurityGroupName` is specified, the list will contain only
the descriptions of the specified DB security group.
:type db_security_group_name: string
:param db_security_group_name: The name of the DB security group to
return details for.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeDBSecurityGroups request. If this parameter is specified,
the response includes only records beyond the marker, up to the
value specified by `MaxRecords`.
"""
params = {}
if db_security_group_name is not None:
params['DBSecurityGroupName'] = db_security_group_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBSecurityGroups',
verb='POST',
path='/', params=params)
def describe_db_snapshots(self, db_instance_identifier=None,
db_snapshot_identifier=None,
snapshot_type=None, filters=None,
max_records=None, marker=None):
"""
Returns information about DB snapshots. This API supports
pagination.
:type db_instance_identifier: string
:param db_instance_identifier:
A DB instance identifier to retrieve the list of DB snapshots for.
Cannot be used in conjunction with `DBSnapshotIdentifier`. This
parameter is not case sensitive.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type db_snapshot_identifier: string
:param db_snapshot_identifier:
A specific DB snapshot identifier to describe. Cannot be used in
conjunction with `DBInstanceIdentifier`. This value is stored as a
lowercase string.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
+ If this is the identifier of an automated snapshot, the
`SnapshotType` parameter must also be specified.
:type snapshot_type: string
:param snapshot_type: The type of snapshots that will be returned.
Values can be "automated" or "manual." If not specified, the
returned results will include all snapshots types.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
`DescribeDBSnapshots` request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {}
if db_instance_identifier is not None:
params['DBInstanceIdentifier'] = db_instance_identifier
if db_snapshot_identifier is not None:
params['DBSnapshotIdentifier'] = db_snapshot_identifier
if snapshot_type is not None:
params['SnapshotType'] = snapshot_type
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBSnapshots',
verb='POST',
path='/', params=params)
def describe_db_subnet_groups(self, db_subnet_group_name=None,
filters=None, max_records=None,
marker=None):
"""
Returns a list of DBSubnetGroup descriptions. If a
DBSubnetGroupName is specified, the list will contain only the
descriptions of the specified DBSubnetGroup.
For an overview of CIDR ranges, go to the `Wikipedia
Tutorial`_.
:type db_subnet_group_name: string
:param db_subnet_group_name: The name of the DB subnet group to return
details for.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeDBSubnetGroups request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {}
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDBSubnetGroups',
verb='POST',
path='/', params=params)
def describe_engine_default_parameters(self, db_parameter_group_family,
max_records=None, marker=None):
"""
Returns the default engine and system parameter information
for the specified database engine.
:type db_parameter_group_family: string
:param db_parameter_group_family: The name of the DB parameter group
family.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
`DescribeEngineDefaultParameters` request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords`.
"""
params = {
'DBParameterGroupFamily': db_parameter_group_family,
}
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEngineDefaultParameters',
verb='POST',
path='/', params=params)
def describe_event_categories(self, source_type=None):
"""
Displays a list of categories for all event source types, or,
if specified, for a specified source type. You can see a list
of the event categories and source types in the ` Events`_
topic in the Amazon RDS User Guide.
:type source_type: string
:param source_type: The type of source that will be generating the
events.
Valid values: db-instance | db-parameter-group | db-security-group |
db-snapshot
"""
params = {}
if source_type is not None:
params['SourceType'] = source_type
return self._make_request(
action='DescribeEventCategories',
verb='POST',
path='/', params=params)
def describe_event_subscriptions(self, subscription_name=None,
filters=None, max_records=None,
marker=None):
"""
Lists all the subscription descriptions for a customer
account. The description for a subscription includes
SubscriptionName, SNSTopicARN, CustomerID, SourceType,
SourceID, CreationTime, and Status.
If you specify a SubscriptionName, lists the description for
that subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to describe.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeOrderableDBInstanceOptions request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords` .
"""
params = {}
if subscription_name is not None:
params['SubscriptionName'] = subscription_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEventSubscriptions',
verb='POST',
path='/', params=params)
def describe_events(self, source_identifier=None, source_type=None,
start_time=None, end_time=None, duration=None,
event_categories=None, max_records=None, marker=None):
"""
Returns events related to DB instances, DB security groups, DB
snapshots, and DB parameter groups for the past 14 days.
Events specific to a particular DB instance, DB security
group, database snapshot, or DB parameter group can be
obtained by providing the name as a parameter. By default, the
past hour of events are returned.
:type source_identifier: string
:param source_identifier:
The identifier of the event source for which events will be returned.
If not specified, then all sources are included in the response.
Constraints:
+ If SourceIdentifier is supplied, SourceType must also be provided.
+ If the source type is `DBInstance`, then a `DBInstanceIdentifier`
must be supplied.
+ If the source type is `DBSecurityGroup`, a `DBSecurityGroupName` must
be supplied.
+ If the source type is `DBParameterGroup`, a `DBParameterGroupName`
must be supplied.
+ If the source type is `DBSnapshot`, a `DBSnapshotIdentifier` must be
supplied.
+ Cannot end with a hyphen or contain two consecutive hyphens.
:type source_type: string
:param source_type: The event source to retrieve events for. If no
value is specified, all events are returned.
:type start_time: timestamp
:param start_time: The beginning of the time interval to retrieve
events for, specified in ISO 8601 format. For more information
about ISO 8601, go to the `ISO8601 Wikipedia page.`_
Example: 2009-07-08T18:00Z
:type end_time: timestamp
:param end_time: The end of the time interval for which to retrieve
events, specified in ISO 8601 format. For more information about
ISO 8601, go to the `ISO8601 Wikipedia page.`_
Example: 2009-07-08T18:00Z
:type duration: integer
:param duration: The number of minutes to retrieve events for.
Default: 60
:type event_categories: list
:param event_categories: A list of event categories that trigger
notifications for a event notification subscription.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results may be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeEvents request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {}
if source_identifier is not None:
params['SourceIdentifier'] = source_identifier
if source_type is not None:
params['SourceType'] = source_type
if start_time is not None:
params['StartTime'] = start_time
if end_time is not None:
params['EndTime'] = end_time
if duration is not None:
params['Duration'] = duration
if event_categories is not None:
self.build_list_params(params,
event_categories,
'EventCategories.member')
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEvents',
verb='POST',
path='/', params=params)
def describe_option_group_options(self, engine_name,
major_engine_version=None,
max_records=None, marker=None):
"""
Describes all available options.
:type engine_name: string
:param engine_name: A required parameter. Options available for the
given Engine name will be described.
:type major_engine_version: string
:param major_engine_version: If specified, filters the results to
include only options for the specified major engine version.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
request. If this parameter is specified, the response includes only
records beyond the marker, up to the value specified by
`MaxRecords`.
"""
params = {'EngineName': engine_name, }
if major_engine_version is not None:
params['MajorEngineVersion'] = major_engine_version
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeOptionGroupOptions',
verb='POST',
path='/', params=params)
def describe_option_groups(self, option_group_name=None, filters=None,
marker=None, max_records=None,
engine_name=None, major_engine_version=None):
"""
Describes the available option groups.
:type option_group_name: string
:param option_group_name: The name of the option group to describe.
Cannot be supplied together with EngineName or MajorEngineVersion.
:type filters: list
:param filters:
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeOptionGroups request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type engine_name: string
:param engine_name: Filters the list of option groups to only include
groups associated with a specific database engine.
:type major_engine_version: string
:param major_engine_version: Filters the list of option groups to only
include groups associated with a specific database engine version.
If specified, then EngineName must also be specified.
"""
params = {}
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if marker is not None:
params['Marker'] = marker
if max_records is not None:
params['MaxRecords'] = max_records
if engine_name is not None:
params['EngineName'] = engine_name
if major_engine_version is not None:
params['MajorEngineVersion'] = major_engine_version
return self._make_request(
action='DescribeOptionGroups',
verb='POST',
path='/', params=params)
def describe_orderable_db_instance_options(self, engine,
engine_version=None,
db_instance_class=None,
license_model=None, vpc=None,
max_records=None, marker=None):
"""
Returns a list of orderable DB instance options for the
specified engine.
:type engine: string
:param engine: The name of the engine to retrieve DB instance options
for.
:type engine_version: string
:param engine_version: The engine version filter value. Specify this
parameter to show only the available offerings matching the
specified engine version.
:type db_instance_class: string
:param db_instance_class: The DB instance class filter value. Specify
this parameter to show only the available offerings matching the
specified DB instance class.
:type license_model: string
:param license_model: The license model filter value. Specify this
parameter to show only the available offerings matching the
specified license model.
:type vpc: boolean
:param vpc: The VPC filter value. Specify this parameter to show only
the available VPC or non-VPC offerings.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a pagination token called a marker is included in the
response so that the remaining results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
DescribeOrderableDBInstanceOptions request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords` .
"""
params = {'Engine': engine, }
if engine_version is not None:
params['EngineVersion'] = engine_version
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if license_model is not None:
params['LicenseModel'] = license_model
if vpc is not None:
params['Vpc'] = str(
vpc).lower()
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeOrderableDBInstanceOptions',
verb='POST',
path='/', params=params)
def describe_reserved_db_instances(self, reserved_db_instance_id=None,
reserved_db_instances_offering_id=None,
db_instance_class=None, duration=None,
product_description=None,
offering_type=None, multi_az=None,
filters=None, max_records=None,
marker=None):
"""
Returns information about reserved DB instances for this
account, or about a specified reserved DB instance.
:type reserved_db_instance_id: string
:param reserved_db_instance_id: The reserved DB instance identifier
filter value. Specify this parameter to show only the reservation
that matches the specified reservation ID.
:type reserved_db_instances_offering_id: string
:param reserved_db_instances_offering_id: The offering identifier
filter value. Specify this parameter to show only purchased
reservations matching the specified offering identifier.
:type db_instance_class: string
:param db_instance_class: The DB instance class filter value. Specify
this parameter to show only those reservations matching the
specified DB instances class.
:type duration: string
:param duration: The duration filter value, specified in years or
seconds. Specify this parameter to show only reservations for this
duration.
Valid Values: `1 | 3 | 31536000 | 94608000`
:type product_description: string
:param product_description: The product description filter value.
Specify this parameter to show only those reservations matching the
specified product description.
:type offering_type: string
:param offering_type: The offering type filter value. Specify this
parameter to show only the available offerings matching the
specified offering type.
Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy
Utilization" `
:type multi_az: boolean
:param multi_az: The Multi-AZ filter value. Specify this parameter to
show only those reservations matching the specified Multi-AZ
parameter.
:type filters: list
:param filters:
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more than the `MaxRecords` value is available, a
pagination token called a marker is included in the response so
that the following results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
request. If this parameter is specified, the response includes only
records beyond the marker, up to the value specified by
`MaxRecords`.
"""
params = {}
if reserved_db_instance_id is not None:
params['ReservedDBInstanceId'] = reserved_db_instance_id
if reserved_db_instances_offering_id is not None:
params['ReservedDBInstancesOfferingId'] = reserved_db_instances_offering_id
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if duration is not None:
params['Duration'] = duration
if product_description is not None:
params['ProductDescription'] = product_description
if offering_type is not None:
params['OfferingType'] = offering_type
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if filters is not None:
self.build_complex_list_params(
params, filters,
'Filters.member',
('FilterName', 'FilterValue'))
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReservedDBInstances',
verb='POST',
path='/', params=params)
def describe_reserved_db_instances_offerings(self,
reserved_db_instances_offering_id=None,
db_instance_class=None,
duration=None,
product_description=None,
offering_type=None,
multi_az=None,
max_records=None,
marker=None):
"""
Lists available reserved DB instance offerings.
:type reserved_db_instances_offering_id: string
:param reserved_db_instances_offering_id: The offering identifier
filter value. Specify this parameter to show only the available
offering that matches the specified reservation identifier.
Example: `438012d3-4052-4cc7-b2e3-8d3372e0e706`
:type db_instance_class: string
:param db_instance_class: The DB instance class filter value. Specify
this parameter to show only the available offerings matching the
specified DB instance class.
:type duration: string
:param duration: Duration filter value, specified in years or seconds.
Specify this parameter to show only reservations for this duration.
Valid Values: `1 | 3 | 31536000 | 94608000`
:type product_description: string
:param product_description: Product description filter value. Specify
this parameter to show only the available offerings matching the
specified product description.
:type offering_type: string
:param offering_type: The offering type filter value. Specify this
parameter to show only the available offerings matching the
specified offering type.
Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy
Utilization" `
:type multi_az: boolean
:param multi_az: The Multi-AZ filter value. Specify this parameter to
show only the available offerings matching the specified Multi-AZ
parameter.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more than the `MaxRecords` value is available, a
pagination token called a marker is included in the response so
that the following results can be retrieved.
Default: 100
Constraints: minimum 20, maximum 100
:type marker: string
:param marker: An optional pagination token provided by a previous
request. If this parameter is specified, the response includes only
records beyond the marker, up to the value specified by
`MaxRecords`.
"""
params = {}
if reserved_db_instances_offering_id is not None:
params['ReservedDBInstancesOfferingId'] = reserved_db_instances_offering_id
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if duration is not None:
params['Duration'] = duration
if product_description is not None:
params['ProductDescription'] = product_description
if offering_type is not None:
params['OfferingType'] = offering_type
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReservedDBInstancesOfferings',
verb='POST',
path='/', params=params)
def download_db_log_file_portion(self, db_instance_identifier,
log_file_name, marker=None,
number_of_lines=None):
"""
Downloads the last line of the specified log file.
:type db_instance_identifier: string
:param db_instance_identifier:
The customer-assigned name of the DB instance that contains the log
files you want to list.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type log_file_name: string
:param log_file_name: The name of the log file to be downloaded.
:type marker: string
:param marker: The pagination token provided in the previous request.
If this parameter is specified the response includes only records
beyond the marker, up to MaxRecords.
:type number_of_lines: integer
:param number_of_lines: The number of lines remaining to be downloaded.
"""
params = {
'DBInstanceIdentifier': db_instance_identifier,
'LogFileName': log_file_name,
}
if marker is not None:
params['Marker'] = marker
if number_of_lines is not None:
params['NumberOfLines'] = number_of_lines
return self._make_request(
action='DownloadDBLogFilePortion',
verb='POST',
path='/', params=params)
def list_tags_for_resource(self, resource_name):
"""
Lists all tags on an Amazon RDS resource.
For an overview on tagging an Amazon RDS resource, see
`Tagging Amazon RDS Resources`_.
:type resource_name: string
:param resource_name: The Amazon RDS resource with tags to be listed.
This value is an Amazon Resource Name (ARN). For information about
creating an ARN, see ` Constructing an RDS Amazon Resource Name
(ARN)`_.
"""
params = {'ResourceName': resource_name, }
return self._make_request(
action='ListTagsForResource',
verb='POST',
path='/', params=params)
def modify_db_instance(self, db_instance_identifier,
allocated_storage=None, db_instance_class=None,
db_security_groups=None,
vpc_security_group_ids=None,
apply_immediately=None, master_user_password=None,
db_parameter_group_name=None,
backup_retention_period=None,
preferred_backup_window=None,
preferred_maintenance_window=None, multi_az=None,
engine_version=None,
allow_major_version_upgrade=None,
auto_minor_version_upgrade=None, iops=None,
option_group_name=None,
new_db_instance_identifier=None):
"""
Modify settings for a DB instance. You can change one or more
database configuration parameters by specifying these
parameters and the new values in the request.
:type db_instance_identifier: string
:param db_instance_identifier:
The DB instance identifier. This value is stored as a lowercase string.
Constraints:
+ Must be the identifier for an existing DB instance
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type allocated_storage: integer
:param allocated_storage: The new storage capacity of the RDS instance.
Changing this parameter does not result in an outage and the change
is applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request.
**MySQL**
Default: Uses existing setting
Valid Values: 5-1024
Constraints: Value supplied must be at least 10% greater than the
current value. Values that are not at least 10% greater than the
existing value are rounded up so that they are 10% greater than the
current value.
Type: Integer
**Oracle**
Default: Uses existing setting
Valid Values: 10-1024
Constraints: Value supplied must be at least 10% greater than the
current value. Values that are not at least 10% greater than the
existing value are rounded up so that they are 10% greater than the
current value.
**SQL Server**
Cannot be modified.
If you choose to migrate your DB instance from using standard storage
to using Provisioned IOPS, or from using Provisioned IOPS to using
standard storage, the process can take time. The duration of the
migration depends on several factors such as database load, storage
size, storage type (standard or Provisioned IOPS), amount of IOPS
provisioned (if any), and the number of prior scale storage
operations. Typical migration times are under 24 hours, but the
process can take up to several days in some cases. During the
migration, the DB instance will be available for use, but may
experience performance degradation. While the migration takes
place, nightly backups for the instance will be suspended. No other
Amazon RDS operations can take place for the instance, including
modifying the instance, rebooting the instance, deleting the
instance, creating a read replica for the instance, and creating a
DB snapshot of the instance.
:type db_instance_class: string
:param db_instance_class: The new compute and memory capacity of the DB
instance. To determine the instance classes that are available for
a particular DB engine, use the DescribeOrderableDBInstanceOptions
action.
Passing a value for this parameter causes an outage during the change
and is applied during the next maintenance window, unless the
`ApplyImmediately` parameter is specified as `True` for this
request.
Default: Uses existing setting
Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge`
:type db_security_groups: list
:param db_security_groups:
A list of DB security groups to authorize on this DB instance. Changing
this parameter does not result in an outage and the change is
asynchronously applied as soon as possible.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type vpc_security_group_ids: list
:param vpc_security_group_ids:
A list of EC2 VPC security groups to authorize on this DB instance.
This change is asynchronously applied as soon as possible.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type apply_immediately: boolean
:param apply_immediately: Specifies whether or not the modifications in
this request and any pending modifications are asynchronously
applied as soon as possible, regardless of the
`PreferredMaintenanceWindow` setting for the DB instance.
If this parameter is passed as `False`, changes to the DB instance are
applied on the next call to RebootDBInstance, the next maintenance
reboot, or the next failure reboot, whichever occurs first. See
each parameter to determine when a change is applied.
Default: `False`
:type master_user_password: string
:param master_user_password:
The new password for the DB instance master user. Can be any printable
ASCII character except "/", '"', or "@".
Changing this parameter does not result in an outage and the change is
asynchronously applied as soon as possible. Between the time of the
request and the completion of the request, the `MasterUserPassword`
element exists in the `PendingModifiedValues` element of the
operation response.
Default: Uses existing setting
Constraints: Must be 8 to 41 alphanumeric characters (MySQL), 8 to 30
alphanumeric characters (Oracle), or 8 to 128 alphanumeric
characters (SQL Server).
Amazon RDS API actions never return the password, so this action
provides a way to regain access to a master instance user if the
password is lost.
:type db_parameter_group_name: string
:param db_parameter_group_name: The name of the DB parameter group to
apply to this DB instance. Changing this parameter does not result
in an outage and the change is applied during the next maintenance
window unless the `ApplyImmediately` parameter is set to `True` for
this request.
Default: Uses existing setting
Constraints: The DB parameter group must be in the same DB parameter
group family as this DB instance.
:type backup_retention_period: integer
:param backup_retention_period:
The number of days to retain automated backups. Setting this parameter
to a positive number enables backups. Setting this parameter to 0
disables automated backups.
Changing this parameter can result in an outage if you change from 0 to
a non-zero value or from a non-zero value to 0. These changes are
applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request. If
you change the parameter from one non-zero value to another non-
zero value, the change is asynchronously applied as soon as
possible.
Default: Uses existing setting
Constraints:
+ Must be a value from 0 to 8
+ Cannot be set to 0 if the DB instance is a master instance with read
replicas or if the DB instance is a read replica
:type preferred_backup_window: string
:param preferred_backup_window:
The daily time range during which automated backups are created if
automated backups are enabled, as determined by the
`BackupRetentionPeriod`. Changing this parameter does not result in
an outage and the change is asynchronously applied as soon as
possible.
Constraints:
+ Must be in the format hh24:mi-hh24:mi
+ Times should be Universal Time Coordinated (UTC)
+ Must not conflict with the preferred maintenance window
+ Must be at least 30 minutes
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which system maintenance can occur, which may result in an
outage. Changing this parameter does not result in an outage,
except in the following situation, and the change is asynchronously
applied as soon as possible. If there are pending actions that
cause a reboot, and the maintenance window is changed to include
the current time, then changing this parameter will cause a reboot
of the DB instance. If moving this window to the current time,
there must be at least 30 minutes between the current time and end
of the window to ensure pending changes are applied.
Default: Uses existing setting
Format: ddd:hh24:mi-ddd:hh24:mi
Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
Constraints: Must be at least 30 minutes
:type multi_az: boolean
:param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
Changing this parameter does not result in an outage and the change
is applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request.
Constraints: Cannot be specified if the DB instance is a read replica.
:type engine_version: string
:param engine_version: The version number of the database engine to
upgrade to. Changing this parameter results in an outage and the
change is applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request.
For major version upgrades, if a non-default DB parameter group is
currently in use, a new DB parameter group in the DB parameter
group family for the new engine version must be specified. The new
DB parameter group can be the default for that DB parameter group
family.
Example: `5.1.42`
:type allow_major_version_upgrade: boolean
:param allow_major_version_upgrade: Indicates that major version
upgrades are allowed. Changing this parameter does not result in an
outage and the change is asynchronously applied as soon as
possible.
Constraints: This parameter must be set to true when specifying a value
for the EngineVersion parameter that is a different major version
than the DB instance's current version.
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor version
upgrades will be applied automatically to the DB instance during
the maintenance window. Changing this parameter does not result in
an outage except in the following case and the change is
asynchronously applied as soon as possible. An outage will result
if this parameter is set to `True` during the maintenance window,
and a newer minor version is available, and RDS has enabled auto
patching for that engine version.
:type iops: integer
:param iops: The new Provisioned IOPS (I/O operations per second) value
for the RDS instance. Changing this parameter does not result in an
outage and the change is applied during the next maintenance window
unless the `ApplyImmediately` parameter is set to `True` for this
request.
Default: Uses existing setting
Constraints: Value supplied must be at least 10% greater than the
current value. Values that are not at least 10% greater than the
existing value are rounded up so that they are 10% greater than the
current value.
Type: Integer
If you choose to migrate your DB instance from using standard storage
to using Provisioned IOPS, or from using Provisioned IOPS to using
standard storage, the process can take time. The duration of the
migration depends on several factors such as database load, storage
size, storage type (standard or Provisioned IOPS), amount of IOPS
provisioned (if any), and the number of prior scale storage
operations. Typical migration times are under 24 hours, but the
process can take up to several days in some cases. During the
migration, the DB instance will be available for use, but may
experience performance degradation. While the migration takes
place, nightly backups for the instance will be suspended. No other
Amazon RDS operations can take place for the instance, including
modifying the instance, rebooting the instance, deleting the
instance, creating a read replica for the instance, and creating a
DB snapshot of the instance.
:type option_group_name: string
:param option_group_name: Indicates that the DB instance should be
associated with the specified option group. Changing this parameter
does not result in an outage except in the following case and the
change is applied during the next maintenance window unless the
`ApplyImmediately` parameter is set to `True` for this request. If
the parameter change results in an option group that enables OEM,
this change can cause a brief (sub-second) period during which new
connections are rejected but existing connections are not
interrupted.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type new_db_instance_identifier: string
:param new_db_instance_identifier:
The new DB instance identifier for the DB instance when renaming a DB
Instance. This value is stored as a lowercase string.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if allocated_storage is not None:
params['AllocatedStorage'] = allocated_storage
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if db_security_groups is not None:
self.build_list_params(params,
db_security_groups,
'DBSecurityGroups.member')
if vpc_security_group_ids is not None:
self.build_list_params(params,
vpc_security_group_ids,
'VpcSecurityGroupIds.member')
if apply_immediately is not None:
params['ApplyImmediately'] = str(
apply_immediately).lower()
if master_user_password is not None:
params['MasterUserPassword'] = master_user_password
if db_parameter_group_name is not None:
params['DBParameterGroupName'] = db_parameter_group_name
if backup_retention_period is not None:
params['BackupRetentionPeriod'] = backup_retention_period
if preferred_backup_window is not None:
params['PreferredBackupWindow'] = preferred_backup_window
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if engine_version is not None:
params['EngineVersion'] = engine_version
if allow_major_version_upgrade is not None:
params['AllowMajorVersionUpgrade'] = str(
allow_major_version_upgrade).lower()
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if new_db_instance_identifier is not None:
params['NewDBInstanceIdentifier'] = new_db_instance_identifier
return self._make_request(
action='ModifyDBInstance',
verb='POST',
path='/', params=params)
def modify_db_parameter_group(self, db_parameter_group_name, parameters):
"""
Modifies the parameters of a DB parameter group. To modify
more than one parameter, submit a list of the following:
`ParameterName`, `ParameterValue`, and `ApplyMethod`. A
maximum of 20 parameters can be modified in a single request.
The `apply-immediate` method can be used only for dynamic
parameters; the `pending-reboot` method can be used with MySQL
and Oracle DB instances for either dynamic or static
parameters. For Microsoft SQL Server DB instances, the
`pending-reboot` method can be used only for static
parameters.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group.
Constraints:
+ Must be the name of an existing DB parameter group
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type parameters: list
:param parameters:
An array of parameter names, values, and the apply method for the
parameter update. At least one parameter name, value, and apply
method must be supplied; subsequent arguments are optional. A
maximum of 20 parameters may be modified in a single request.
Valid Values (for the application method): `immediate | pending-reboot`
You can use the immediate value with dynamic parameters only. You can
use the pending-reboot value for both dynamic and static
parameters, and changes are applied when DB instance reboots.
"""
params = {'DBParameterGroupName': db_parameter_group_name, }
self.build_complex_list_params(
params, parameters,
'Parameters.member',
('ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion', 'ApplyMethod'))
return self._make_request(
action='ModifyDBParameterGroup',
verb='POST',
path='/', params=params)
def modify_db_subnet_group(self, db_subnet_group_name, subnet_ids,
db_subnet_group_description=None):
"""
Modifies an existing DB subnet group. DB subnet groups must
contain at least one subnet in at least two AZs in the region.
:type db_subnet_group_name: string
:param db_subnet_group_name: The name for the DB subnet group. This
value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters or
hyphens. Must not be "Default".
Example: `mySubnetgroup`
:type db_subnet_group_description: string
:param db_subnet_group_description: The description for the DB subnet
group.
:type subnet_ids: list
:param subnet_ids: The EC2 subnet IDs for the DB subnet group.
"""
params = {'DBSubnetGroupName': db_subnet_group_name, }
self.build_list_params(params,
subnet_ids,
'SubnetIds.member')
if db_subnet_group_description is not None:
params['DBSubnetGroupDescription'] = db_subnet_group_description
return self._make_request(
action='ModifyDBSubnetGroup',
verb='POST',
path='/', params=params)
def modify_event_subscription(self, subscription_name,
sns_topic_arn=None, source_type=None,
event_categories=None, enabled=None):
"""
Modifies an existing RDS event notification subscription. Note
that you cannot modify the source identifiers using this call;
to change source identifiers for a subscription, use the
AddSourceIdentifierToSubscription and
RemoveSourceIdentifierFromSubscription calls.
You can see a list of the event categories for a given
SourceType in the `Events`_ topic in the Amazon RDS User Guide
or by using the **DescribeEventCategories** action.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription.
:type sns_topic_arn: string
:param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic
created for event notification. The ARN is created by Amazon SNS
when you create a topic and subscribe to it.
:type source_type: string
:param source_type: The type of source that will be generating the
events. For example, if you want to be notified of events generated
by a DB instance, you would set this parameter to db-instance. if
this value is not specified, all events are returned.
Valid values: db-instance | db-parameter-group | db-security-group |
db-snapshot
:type event_categories: list
:param event_categories: A list of event categories for a SourceType
that you want to subscribe to. You can see a list of the categories
for a given SourceType in the `Events`_ topic in the Amazon RDS
User Guide or by using the **DescribeEventCategories** action.
:type enabled: boolean
:param enabled: A Boolean value; set to **true** to activate the
subscription.
"""
params = {'SubscriptionName': subscription_name, }
if sns_topic_arn is not None:
params['SnsTopicArn'] = sns_topic_arn
if source_type is not None:
params['SourceType'] = source_type
if event_categories is not None:
self.build_list_params(params,
event_categories,
'EventCategories.member')
if enabled is not None:
params['Enabled'] = str(
enabled).lower()
return self._make_request(
action='ModifyEventSubscription',
verb='POST',
path='/', params=params)
def modify_option_group(self, option_group_name, options_to_include=None,
options_to_remove=None, apply_immediately=None):
"""
Modifies an existing option group.
:type option_group_name: string
:param option_group_name: The name of the option group to be modified.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type options_to_include: list
:param options_to_include: Options in this list are added to the option
group or, if already present, the specified configuration is used
to update the existing configuration.
:type options_to_remove: list
:param options_to_remove: Options in this list are removed from the
option group.
:type apply_immediately: boolean
:param apply_immediately: Indicates whether the changes should be
applied immediately, or during the next maintenance window for each
instance associated with the option group.
"""
params = {'OptionGroupName': option_group_name, }
if options_to_include is not None:
self.build_complex_list_params(
params, options_to_include,
'OptionsToInclude.member',
('OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings'))
if options_to_remove is not None:
self.build_list_params(params,
options_to_remove,
'OptionsToRemove.member')
if apply_immediately is not None:
params['ApplyImmediately'] = str(
apply_immediately).lower()
return self._make_request(
action='ModifyOptionGroup',
verb='POST',
path='/', params=params)
def promote_read_replica(self, db_instance_identifier,
backup_retention_period=None,
preferred_backup_window=None):
"""
Promotes a read replica DB instance to a standalone DB
instance.
:type db_instance_identifier: string
:param db_instance_identifier: The DB instance identifier. This value
is stored as a lowercase string.
Constraints:
+ Must be the identifier for an existing read replica DB instance
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: mydbinstance
:type backup_retention_period: integer
:param backup_retention_period:
The number of days to retain automated backups. Setting this parameter
to a positive number enables backups. Setting this parameter to 0
disables automated backups.
Default: 1
Constraints:
+ Must be a value from 0 to 8
:type preferred_backup_window: string
:param preferred_backup_window: The daily time range during which
automated backups are created if automated backups are enabled,
using the `BackupRetentionPeriod` parameter.
Default: A 30-minute window selected at random from an 8-hour block of
time per region. See the Amazon RDS User Guide for the time blocks
for each region from which the default backup windows are assigned.
Constraints: Must be in the format `hh24:mi-hh24:mi`. Times should be
Universal Time Coordinated (UTC). Must not conflict with the
preferred maintenance window. Must be at least 30 minutes.
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if backup_retention_period is not None:
params['BackupRetentionPeriod'] = backup_retention_period
if preferred_backup_window is not None:
params['PreferredBackupWindow'] = preferred_backup_window
return self._make_request(
action='PromoteReadReplica',
verb='POST',
path='/', params=params)
def purchase_reserved_db_instances_offering(self,
reserved_db_instances_offering_id,
reserved_db_instance_id=None,
db_instance_count=None,
tags=None):
"""
Purchases a reserved DB instance offering.
:type reserved_db_instances_offering_id: string
:param reserved_db_instances_offering_id: The ID of the Reserved DB
instance offering to purchase.
Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706
:type reserved_db_instance_id: string
:param reserved_db_instance_id: Customer-specified identifier to track
this reservation.
Example: myreservationID
:type db_instance_count: integer
:param db_instance_count: The number of instances to reserve.
Default: `1`
:type tags: list
:param tags: A list of tags.
"""
params = {
'ReservedDBInstancesOfferingId': reserved_db_instances_offering_id,
}
if reserved_db_instance_id is not None:
params['ReservedDBInstanceId'] = reserved_db_instance_id
if db_instance_count is not None:
params['DBInstanceCount'] = db_instance_count
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='PurchaseReservedDBInstancesOffering',
verb='POST',
path='/', params=params)
def reboot_db_instance(self, db_instance_identifier, force_failover=None):
"""
Rebooting a DB instance restarts the database engine service.
A reboot also applies to the DB instance any modifications to
the associated DB parameter group that were pending. Rebooting
a DB instance results in a momentary outage of the instance,
during which the DB instance status is set to rebooting. If
the RDS instance is configured for MultiAZ, it is possible
that the reboot will be conducted through a failover. An
Amazon RDS event is created when the reboot is completed.
If your DB instance is deployed in multiple Availability
Zones, you can force a failover from one AZ to the other
during the reboot. You might force a failover to test the
availability of your DB instance deployment or to restore
operations to the original AZ after a failover occurs.
The time required to reboot is a function of the specific
database engine's crash recovery process. To improve the
reboot time, we recommend that you reduce database activities
as much as possible during the reboot process to reduce
rollback activity for in-transit transactions.
:type db_instance_identifier: string
:param db_instance_identifier:
The DB instance identifier. This parameter is stored as a lowercase
string.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type force_failover: boolean
:param force_failover: When `True`, the reboot will be conducted
through a MultiAZ failover.
Constraint: You cannot specify `True` if the instance is not configured
for MultiAZ.
"""
params = {'DBInstanceIdentifier': db_instance_identifier, }
if force_failover is not None:
params['ForceFailover'] = str(
force_failover).lower()
return self._make_request(
action='RebootDBInstance',
verb='POST',
path='/', params=params)
def remove_source_identifier_from_subscription(self, subscription_name,
source_identifier):
"""
Removes a source identifier from an existing RDS event
notification subscription.
:type subscription_name: string
:param subscription_name: The name of the RDS event notification
subscription you want to remove a source identifier from.
:type source_identifier: string
:param source_identifier: The source identifier to be removed from the
subscription, such as the **DB instance identifier** for a DB
instance or the name of a security group.
"""
params = {
'SubscriptionName': subscription_name,
'SourceIdentifier': source_identifier,
}
return self._make_request(
action='RemoveSourceIdentifierFromSubscription',
verb='POST',
path='/', params=params)
def remove_tags_from_resource(self, resource_name, tag_keys):
"""
Removes metadata tags from an Amazon RDS resource.
For an overview on tagging an Amazon RDS resource, see
`Tagging Amazon RDS Resources`_.
:type resource_name: string
:param resource_name: The Amazon RDS resource the tags will be removed
from. This value is an Amazon Resource Name (ARN). For information
about creating an ARN, see ` Constructing an RDS Amazon Resource
Name (ARN)`_.
:type tag_keys: list
:param tag_keys: The tag key (name) of the tag to be removed.
"""
params = {'ResourceName': resource_name, }
self.build_list_params(params,
tag_keys,
'TagKeys.member')
return self._make_request(
action='RemoveTagsFromResource',
verb='POST',
path='/', params=params)
def reset_db_parameter_group(self, db_parameter_group_name,
reset_all_parameters=None, parameters=None):
"""
Modifies the parameters of a DB parameter group to the
engine/system default value. To reset specific parameters
submit a list of the following: `ParameterName` and
`ApplyMethod`. To reset the entire DB parameter group, specify
the `DBParameterGroup` name and `ResetAllParameters`
parameters. When resetting the entire group, dynamic
parameters are updated immediately and static parameters are
set to `pending-reboot` to take effect on the next DB instance
restart or `RebootDBInstance` request.
:type db_parameter_group_name: string
:param db_parameter_group_name:
The name of the DB parameter group.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type reset_all_parameters: boolean
:param reset_all_parameters: Specifies whether ( `True`) or not (
`False`) to reset all parameters in the DB parameter group to
default values.
Default: `True`
:type parameters: list
:param parameters: An array of parameter names, values, and the apply
method for the parameter update. At least one parameter name,
value, and apply method must be supplied; subsequent arguments are
optional. A maximum of 20 parameters may be modified in a single
request.
**MySQL**
Valid Values (for Apply method): `immediate` | `pending-reboot`
You can use the immediate value with dynamic parameters only. You can
use the `pending-reboot` value for both dynamic and static
parameters, and changes are applied when DB instance reboots.
**Oracle**
Valid Values (for Apply method): `pending-reboot`
"""
params = {'DBParameterGroupName': db_parameter_group_name, }
if reset_all_parameters is not None:
params['ResetAllParameters'] = str(
reset_all_parameters).lower()
if parameters is not None:
self.build_complex_list_params(
params, parameters,
'Parameters.member',
('ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion', 'ApplyMethod'))
return self._make_request(
action='ResetDBParameterGroup',
verb='POST',
path='/', params=params)
def restore_db_instance_from_db_snapshot(self, db_instance_identifier,
db_snapshot_identifier,
db_instance_class=None,
port=None,
availability_zone=None,
db_subnet_group_name=None,
multi_az=None,
publicly_accessible=None,
auto_minor_version_upgrade=None,
license_model=None,
db_name=None, engine=None,
iops=None,
option_group_name=None,
tags=None):
"""
Creates a new DB instance from a DB snapshot. The target
database is created from the source database restore point
with the same configuration as the original source database,
except that the new RDS instance is created with the default
security group.
:type db_instance_identifier: string
:param db_instance_identifier:
The identifier for the DB snapshot to restore from.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type db_snapshot_identifier: string
:param db_snapshot_identifier: Name of the DB instance to create from
the DB snapshot. This parameter isn't case sensitive.
Constraints:
+ Must contain from 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `my-snapshot-id`
:type db_instance_class: string
:param db_instance_class: The compute and memory capacity of the Amazon
RDS DB instance.
Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge`
:type port: integer
:param port: The port number on which the database accepts connections.
Default: The same port as the original DB instance
Constraints: Value must be `1150-65535`
:type availability_zone: string
:param availability_zone: The EC2 Availability Zone that the database
instance will be created in.
Default: A random, system-chosen Availability Zone.
Constraint: You cannot specify the AvailabilityZone parameter if the
MultiAZ parameter is set to `True`.
Example: `us-east-1a`
:type db_subnet_group_name: string
:param db_subnet_group_name: The DB subnet group name to use for the
new instance.
:type multi_az: boolean
:param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
Constraint: You cannot specify the AvailabilityZone parameter if the
MultiAZ parameter is set to `True`.
:type publicly_accessible: boolean
:param publicly_accessible: Specifies the accessibility options for the
DB instance. A value of true specifies an Internet-facing instance
with a publicly resolvable DNS name, which resolves to a public IP
address. A value of false specifies an internal instance with a DNS
name that resolves to a private IP address.
Default: The default behavior varies depending on whether a VPC has
been requested or not. The following list shows the default
behavior in each case.
+ **Default VPC:**true
+ **VPC:**false
If no DB subnet group has been specified as part of the request and the
PubliclyAccessible value has not been set, the DB instance will be
publicly accessible. If a specific DB subnet group has been
specified as part of the request and the PubliclyAccessible value
has not been set, the DB instance will be private.
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor version
upgrades will be applied automatically to the DB instance during
the maintenance window.
:type license_model: string
:param license_model: License model information for the restored DB
instance.
Default: Same as source.
Valid values: `license-included` | `bring-your-own-license` | `general-
public-license`
:type db_name: string
:param db_name:
The database name for the restored DB instance.
This parameter doesn't apply to the MySQL engine.
:type engine: string
:param engine: The database engine to use for the new instance.
Default: The same as source
Constraint: Must be compatible with the engine of the source
Example: `oracle-ee`
:type iops: integer
:param iops: Specifies the amount of provisioned IOPS for the DB
instance, expressed in I/O operations per second. If this parameter
is not specified, the IOPS value will be taken from the backup. If
this parameter is set to 0, the new instance will be converted to a
non-PIOPS instance, which will take additional time, though your DB
instance will be available for connections before the conversion
starts.
Constraints: Must be an integer greater than 1000.
:type option_group_name: string
:param option_group_name: The name of the option group to be used for
the restored DB instance.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type tags: list
:param tags: A list of tags.
"""
params = {
'DBInstanceIdentifier': db_instance_identifier,
'DBSnapshotIdentifier': db_snapshot_identifier,
}
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if port is not None:
params['Port'] = port
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if license_model is not None:
params['LicenseModel'] = license_model
if db_name is not None:
params['DBName'] = db_name
if engine is not None:
params['Engine'] = engine
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='RestoreDBInstanceFromDBSnapshot',
verb='POST',
path='/', params=params)
def restore_db_instance_to_point_in_time(self,
source_db_instance_identifier,
target_db_instance_identifier,
restore_time=None,
use_latest_restorable_time=None,
db_instance_class=None,
port=None,
availability_zone=None,
db_subnet_group_name=None,
multi_az=None,
publicly_accessible=None,
auto_minor_version_upgrade=None,
license_model=None,
db_name=None, engine=None,
iops=None,
option_group_name=None,
tags=None):
"""
Restores a DB instance to an arbitrary point-in-time. Users
can restore to any point in time before the
latestRestorableTime for up to backupRetentionPeriod days. The
target database is created from the source database with the
same configuration as the original database except that the DB
instance is created with the default DB security group.
:type source_db_instance_identifier: string
:param source_db_instance_identifier:
The identifier of the source DB instance from which to restore.
Constraints:
+ Must be the identifier of an existing database instance
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type target_db_instance_identifier: string
:param target_db_instance_identifier:
The name of the new database instance to be created.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type restore_time: timestamp
:param restore_time: The date and time to restore from.
Valid Values: Value must be a UTC time
Constraints:
+ Must be before the latest restorable time for the DB instance
+ Cannot be specified if UseLatestRestorableTime parameter is true
Example: `2009-09-07T23:45:00Z`
:type use_latest_restorable_time: boolean
:param use_latest_restorable_time: Specifies whether ( `True`) or not (
`False`) the DB instance is restored from the latest backup time.
Default: `False`
Constraints: Cannot be specified if RestoreTime parameter is provided.
:type db_instance_class: string
:param db_instance_class: The compute and memory capacity of the Amazon
RDS DB instance.
Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large |
db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge`
Default: The same DBInstanceClass as the original DB instance.
:type port: integer
:param port: The port number on which the database accepts connections.
Constraints: Value must be `1150-65535`
Default: The same port as the original DB instance.
:type availability_zone: string
:param availability_zone: The EC2 Availability Zone that the database
instance will be created in.
Default: A random, system-chosen Availability Zone.
Constraint: You cannot specify the AvailabilityZone parameter if the
MultiAZ parameter is set to true.
Example: `us-east-1a`
:type db_subnet_group_name: string
:param db_subnet_group_name: The DB subnet group name to use for the
new instance.
:type multi_az: boolean
:param multi_az: Specifies if the DB instance is a Multi-AZ deployment.
Constraint: You cannot specify the AvailabilityZone parameter if the
MultiAZ parameter is set to `True`.
:type publicly_accessible: boolean
:param publicly_accessible: Specifies the accessibility options for the
DB instance. A value of true specifies an Internet-facing instance
with a publicly resolvable DNS name, which resolves to a public IP
address. A value of false specifies an internal instance with a DNS
name that resolves to a private IP address.
Default: The default behavior varies depending on whether a VPC has
been requested or not. The following list shows the default
behavior in each case.
+ **Default VPC:**true
+ **VPC:**false
If no DB subnet group has been specified as part of the request and the
PubliclyAccessible value has not been set, the DB instance will be
publicly accessible. If a specific DB subnet group has been
specified as part of the request and the PubliclyAccessible value
has not been set, the DB instance will be private.
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Indicates that minor version
upgrades will be applied automatically to the DB instance during
the maintenance window.
:type license_model: string
:param license_model: License model information for the restored DB
instance.
Default: Same as source.
Valid values: `license-included` | `bring-your-own-license` | `general-
public-license`
:type db_name: string
:param db_name:
The database name for the restored DB instance.
This parameter is not used for the MySQL engine.
:type engine: string
:param engine: The database engine to use for the new instance.
Default: The same as source
Constraint: Must be compatible with the engine of the source
Example: `oracle-ee`
:type iops: integer
:param iops: The amount of Provisioned IOPS (input/output operations
per second) to be initially allocated for the DB instance.
Constraints: Must be an integer greater than 1000.
:type option_group_name: string
:param option_group_name: The name of the option group to be used for
the restored DB instance.
Permanent options, such as the TDE option for Oracle Advanced Security
TDE, cannot be removed from an option group, and that option group
cannot be removed from a DB instance once it is associated with a
DB instance
:type tags: list
:param tags: A list of tags.
"""
params = {
'SourceDBInstanceIdentifier': source_db_instance_identifier,
'TargetDBInstanceIdentifier': target_db_instance_identifier,
}
if restore_time is not None:
params['RestoreTime'] = restore_time
if use_latest_restorable_time is not None:
params['UseLatestRestorableTime'] = str(
use_latest_restorable_time).lower()
if db_instance_class is not None:
params['DBInstanceClass'] = db_instance_class
if port is not None:
params['Port'] = port
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if db_subnet_group_name is not None:
params['DBSubnetGroupName'] = db_subnet_group_name
if multi_az is not None:
params['MultiAZ'] = str(
multi_az).lower()
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if license_model is not None:
params['LicenseModel'] = license_model
if db_name is not None:
params['DBName'] = db_name
if engine is not None:
params['Engine'] = engine
if iops is not None:
params['Iops'] = iops
if option_group_name is not None:
params['OptionGroupName'] = option_group_name
if tags is not None:
self.build_complex_list_params(
params, tags,
'Tags.member',
('Key', 'Value'))
return self._make_request(
action='RestoreDBInstanceToPointInTime',
verb='POST',
path='/', params=params)
def revoke_db_security_group_ingress(self, db_security_group_name,
cidrip=None,
ec2_security_group_name=None,
ec2_security_group_id=None,
ec2_security_group_owner_id=None):
"""
Revokes ingress from a DBSecurityGroup for previously
authorized IP ranges or EC2 or VPC Security Groups. Required
parameters for this API are one of CIDRIP, EC2SecurityGroupId
for VPC, or (EC2SecurityGroupOwnerId and either
EC2SecurityGroupName or EC2SecurityGroupId).
:type db_security_group_name: string
:param db_security_group_name: The name of the DB security group to
revoke ingress from.
:type cidrip: string
:param cidrip: The IP range to revoke access from. Must be a valid CIDR
range. If `CIDRIP` is specified, `EC2SecurityGroupName`,
`EC2SecurityGroupId` and `EC2SecurityGroupOwnerId` cannot be
provided.
:type ec2_security_group_name: string
:param ec2_security_group_name: The name of the EC2 security group to
revoke access from. For VPC DB security groups,
`EC2SecurityGroupId` must be provided. Otherwise,
EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or
`EC2SecurityGroupId` must be provided.
:type ec2_security_group_id: string
:param ec2_security_group_id: The id of the EC2 security group to
revoke access from. For VPC DB security groups,
`EC2SecurityGroupId` must be provided. Otherwise,
EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or
`EC2SecurityGroupId` must be provided.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: The AWS Account Number of the owner
of the EC2 security group specified in the `EC2SecurityGroupName`
parameter. The AWS Access Key ID is not an acceptable value. For
VPC DB security groups, `EC2SecurityGroupId` must be provided.
Otherwise, EC2SecurityGroupOwnerId and either
`EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.
"""
params = {'DBSecurityGroupName': db_security_group_name, }
if cidrip is not None:
params['CIDRIP'] = cidrip
if ec2_security_group_name is not None:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_id is not None:
params['EC2SecurityGroupId'] = ec2_security_group_id
if ec2_security_group_owner_id is not None:
params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
return self._make_request(
action='RevokeDBSecurityGroupIngress',
verb='POST',
path='/', params=params)
def _make_request(self, action, verb, path, params):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb='POST',
path='/', params=params)
body = response.read()
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
json_body = json.loads(body)
fault_name = json_body.get('Error', {}).get('Code', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| mit |
javipalanca/ojoalplato | ojoalplato/users/models.py | 1 | 1358 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.contrib.auth.models import AbstractUser
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
USER_STATUS_CHOICES = (
(0, "active"),
)
@python_2_unicode_compatible
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = models.CharField(_("Name of User"), blank=True, max_length=255)
login = models.CharField(max_length=60, default="")
url = models.URLField(max_length=100, blank=True)
activation_key = models.CharField(max_length=60, default="0")
status = models.IntegerField(default=0, choices=USER_STATUS_CHOICES)
def __str__(self):
return self.username
def get_absolute_url(self):
return reverse('users:detail', kwargs={'username': self.username})
class UserMeta(models.Model):
"""
Meta information about a user.
"""
id = models.IntegerField(primary_key=True)
user = models.ForeignKey(User, related_name="meta", blank=True, null=True)
key = models.CharField(max_length=255)
value = models.TextField()
def __unicode__(self):
return u"%s: %s" % (self.key, self.value)
| mit |
ryfeus/lambda-packs | Tensorflow_LightGBM_Scipy_nightly/source/numpy/distutils/command/__init__.py | 264 | 1098 | """distutils.command
Package containing implementation of all the standard Distutils
commands.
"""
from __future__ import division, absolute_import, print_function
def test_na_writable_attributes_deletion():
a = np.NA(2)
attr = ['payload', 'dtype']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $"
distutils_all = [ #'build_py',
'clean',
'install_clib',
'install_scripts',
'bdist',
'bdist_dumb',
'bdist_wininst',
]
__import__('distutils.command', globals(), locals(), distutils_all)
__all__ = ['build',
'config_compiler',
'config',
'build_src',
'build_py',
'build_ext',
'build_clib',
'build_scripts',
'install',
'install_data',
'install_headers',
'install_lib',
'bdist_rpm',
'sdist',
] + distutils_all
| mit |
jhd/spunout | venv/lib/python2.7/site-packages/pip/commands/search.py | 344 | 4736 | import sys
import textwrap
import pip.download
from pip.basecommand import Command, SUCCESS
from pip.util import get_terminal_size
from pip.log import logger
from pip.backwardcompat import xmlrpclib, reduce, cmp
from pip.exceptions import CommandError
from pip.status_codes import NO_MATCHES_FOUND
from pip._vendor import pkg_resources
from distutils.version import StrictVersion, LooseVersion
class SearchCommand(Command):
"""Search for PyPI packages whose name or summary contains <query>."""
name = 'search'
usage = """
%prog [options] <query>"""
summary = 'Search PyPI for packages.'
def __init__(self, *args, **kw):
super(SearchCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'--index',
dest='index',
metavar='URL',
default='https://pypi.python.org/pypi',
help='Base URL of Python Package Index (default %default)')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
raise CommandError('Missing required argument (search query).')
query = args
index_url = options.index
pypi_hits = self.search(query, index_url)
hits = transform_hits(pypi_hits)
terminal_width = None
if sys.stdout.isatty():
terminal_width = get_terminal_size()[0]
print_results(hits, terminal_width=terminal_width)
if pypi_hits:
return SUCCESS
return NO_MATCHES_FOUND
def search(self, query, index_url):
pypi = xmlrpclib.ServerProxy(index_url)
hits = pypi.search({'name': query, 'summary': query}, 'or')
return hits
def transform_hits(hits):
"""
The list from pypi is really a list of versions. We want a list of
packages with the list of versions stored inline. This converts the
list from pypi into one we can use.
"""
packages = {}
for hit in hits:
name = hit['name']
summary = hit['summary']
version = hit['version']
score = hit['_pypi_ordering']
if score is None:
score = 0
if name not in packages.keys():
packages[name] = {'name': name, 'summary': summary, 'versions': [version], 'score': score}
else:
packages[name]['versions'].append(version)
# if this is the highest version, replace summary and score
if version == highest_version(packages[name]['versions']):
packages[name]['summary'] = summary
packages[name]['score'] = score
# each record has a unique name now, so we will convert the dict into a list sorted by score
package_list = sorted(packages.values(), key=lambda x: x['score'], reverse=True)
return package_list
def print_results(hits, name_column_width=25, terminal_width=None):
installed_packages = [p.project_name for p in pkg_resources.working_set]
for hit in hits:
name = hit['name']
summary = hit['summary'] or ''
if terminal_width is not None:
# wrap and indent summary to fit terminal
summary = textwrap.wrap(summary, terminal_width - name_column_width - 5)
summary = ('\n' + ' ' * (name_column_width + 3)).join(summary)
line = '%s - %s' % (name.ljust(name_column_width), summary)
try:
logger.notify(line)
if name in installed_packages:
dist = pkg_resources.get_distribution(name)
logger.indent += 2
try:
latest = highest_version(hit['versions'])
if dist.version == latest:
logger.notify('INSTALLED: %s (latest)' % dist.version)
else:
logger.notify('INSTALLED: %s' % dist.version)
logger.notify('LATEST: %s' % latest)
finally:
logger.indent -= 2
except UnicodeEncodeError:
pass
def compare_versions(version1, version2):
try:
return cmp(StrictVersion(version1), StrictVersion(version2))
# in case of abnormal version number, fall back to LooseVersion
except ValueError:
pass
try:
return cmp(LooseVersion(version1), LooseVersion(version2))
except TypeError:
# certain LooseVersion comparions raise due to unorderable types,
# fallback to string comparison
return cmp([str(v) for v in LooseVersion(version1).version],
[str(v) for v in LooseVersion(version2).version])
def highest_version(versions):
return reduce((lambda v1, v2: compare_versions(v1, v2) == 1 and v1 or v2), versions)
| gpl-3.0 |
phausler/binutils | gdb/python/lib/gdb/command/frame_filters.py | 126 | 16605 | # Frame-filter commands.
# Copyright (C) 2013-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""GDB commands for working with frame-filters."""
import sys
import gdb
import copy
from gdb.FrameIterator import FrameIterator
from gdb.FrameDecorator import FrameDecorator
import gdb.frames
import itertools
# GDB Commands.
class SetFilterPrefixCmd(gdb.Command):
"""Prefix command for 'set' frame-filter related operations."""
def __init__(self):
super(SetFilterPrefixCmd, self).__init__("set frame-filter",
gdb.COMMAND_OBSCURE,
gdb.COMPLETE_NONE, True)
class ShowFilterPrefixCmd(gdb.Command):
"""Prefix command for 'show' frame-filter related operations."""
def __init__(self):
super(ShowFilterPrefixCmd, self).__init__("show frame-filter",
gdb.COMMAND_OBSCURE,
gdb.COMPLETE_NONE, True)
class InfoFrameFilter(gdb.Command):
"""List all registered Python frame-filters.
Usage: info frame-filters
"""
def __init__(self):
super(InfoFrameFilter, self).__init__("info frame-filter",
gdb.COMMAND_DATA)
@staticmethod
def enabled_string(state):
"""Return "Yes" if filter is enabled, otherwise "No"."""
if state:
return "Yes"
else:
return "No"
def list_frame_filters(self, frame_filters):
""" Internal worker function to list and print frame filters
in a dictionary.
Arguments:
frame_filters: The name of the dictionary, as
specified by GDB user commands.
"""
sorted_frame_filters = sorted(frame_filters.items(),
key=lambda i: gdb.frames.get_priority(i[1]),
reverse=True)
if len(sorted_frame_filters) == 0:
print(" No frame filters registered.")
else:
print(" Priority Enabled Name")
for frame_filter in sorted_frame_filters:
name = frame_filter[0]
try:
priority = '{:<8}'.format(
str(gdb.frames.get_priority(frame_filter[1])))
enabled = '{:<7}'.format(
self.enabled_string(gdb.frames.get_enabled(frame_filter[1])))
except Exception:
e = sys.exc_info()[1]
print(" Error printing filter '"+name+"': "+str(e))
else:
print(" %s %s %s" % (priority, enabled, name))
def print_list(self, title, filter_list, blank_line):
print(title)
self.list_frame_filters(filter_list)
if blank_line:
print("")
def invoke(self, arg, from_tty):
self.print_list("global frame-filters:", gdb.frame_filters, True)
cp = gdb.current_progspace()
self.print_list("progspace %s frame-filters:" % cp.filename,
cp.frame_filters, True)
for objfile in gdb.objfiles():
self.print_list("objfile %s frame-filters:" % objfile.filename,
objfile.frame_filters, False)
# Internal enable/disable functions.
def _enable_parse_arg(cmd_name, arg):
""" Internal worker function to take an argument from
enable/disable and return a tuple of arguments.
Arguments:
cmd_name: Name of the command invoking this function.
args: The argument as a string.
Returns:
A tuple containing the dictionary, and the argument, or just
the dictionary in the case of "all".
"""
argv = gdb.string_to_argv(arg);
argc = len(argv)
if argv[0] == "all" and argc > 1:
raise gdb.GdbError(cmd_name + ": with 'all' " \
"you may not specify a filter.")
else:
if argv[0] != "all" and argc != 2:
raise gdb.GdbError(cmd_name + " takes exactly two arguments.")
return argv
def _do_enable_frame_filter(command_tuple, flag):
"""Worker for enabling/disabling frame_filters.
Arguments:
command_type: A tuple with the first element being the
frame filter dictionary, and the second being
the frame filter name.
flag: True for Enable, False for Disable.
"""
list_op = command_tuple[0]
op_list = gdb.frames.return_list(list_op)
if list_op == "all":
for item in op_list:
gdb.frames.set_enabled(item, flag)
else:
frame_filter = command_tuple[1]
try:
ff = op_list[frame_filter]
except KeyError:
msg = "frame-filter '" + str(name) + "' not found."
raise gdb.GdbError(msg)
gdb.frames.set_enabled(ff, flag)
def _complete_frame_filter_list(text, word, all_flag):
"""Worker for frame filter dictionary name completion.
Arguments:
text: The full text of the command line.
word: The most recent word of the command line.
all_flag: Whether to include the word "all" in completion.
Returns:
A list of suggested frame filter dictionary name completions
from text/word analysis. This list can be empty when there
are no suggestions for completion.
"""
if all_flag == True:
filter_locations = ["all", "global", "progspace"]
else:
filter_locations = ["global", "progspace"]
for objfile in gdb.objfiles():
filter_locations.append(objfile.filename)
# If the user just asked for completions with no completion
# hints, just return all the frame filter dictionaries we know
# about.
if (text == ""):
return filter_locations
# Otherwise filter on what we know.
flist = filter(lambda x,y=text:x.startswith(y), filter_locations)
# If we only have one completion, complete it and return it.
if len(flist) == 1:
flist[0] = flist[0][len(text)-len(word):]
# Otherwise, return an empty list, or a list of frame filter
# dictionaries that the previous filter operation returned.
return flist
def _complete_frame_filter_name(word, printer_dict):
"""Worker for frame filter name completion.
Arguments:
word: The most recent word of the command line.
printer_dict: The frame filter dictionary to search for frame
filter name completions.
Returns: A list of suggested frame filter name completions
from word analysis of the frame filter dictionary. This list
can be empty when there are no suggestions for completion.
"""
printer_keys = printer_dict.keys()
if (word == ""):
return printer_keys
flist = filter(lambda x,y=word:x.startswith(y), printer_keys)
return flist
class EnableFrameFilter(gdb.Command):
"""GDB command to disable the specified frame-filter.
Usage: enable frame-filter enable DICTIONARY [NAME]
DICTIONARY is the name of the frame filter dictionary on which to
operate. If dictionary is set to "all", perform operations on all
dictionaries. Named dictionaries are: "global" for the global
frame filter dictionary, "progspace" for the program space's frame
filter dictionary. If either all, or the two named dictionaries
are not specified, the dictionary name is assumed to be the name
of the object-file name.
NAME matches the name of the frame-filter to operate on. If
DICTIONARY is "all", NAME is ignored.
"""
def __init__(self):
super(EnableFrameFilter, self).__init__("enable frame-filter",
gdb.COMMAND_DATA)
def complete(self, text, word):
"""Completion function for both frame filter dictionary, and
frame filter name."""
if text.count(" ") == 0:
return _complete_frame_filter_list(text, word, True)
else:
printer_list = gdb.frames.return_list(text.split()[0].rstrip())
return _complete_frame_filter_name(word, printer_list)
def invoke(self, arg, from_tty):
command_tuple = _enable_parse_arg("enable frame-filter", arg)
_do_enable_frame_filter(command_tuple, True)
class DisableFrameFilter(gdb.Command):
"""GDB command to disable the specified frame-filter.
Usage: disable frame-filter disable DICTIONARY [NAME]
DICTIONARY is the name of the frame filter dictionary on which to
operate. If dictionary is set to "all", perform operations on all
dictionaries. Named dictionaries are: "global" for the global
frame filter dictionary, "progspace" for the program space's frame
filter dictionary. If either all, or the two named dictionaries
are not specified, the dictionary name is assumed to be the name
of the object-file name.
NAME matches the name of the frame-filter to operate on. If
DICTIONARY is "all", NAME is ignored.
"""
def __init__(self):
super(DisableFrameFilter, self).__init__("disable frame-filter",
gdb.COMMAND_DATA)
def complete(self, text, word):
"""Completion function for both frame filter dictionary, and
frame filter name."""
if text.count(" ") == 0:
return _complete_frame_filter_list(text, word, True)
else:
printer_list = gdb.frames.return_list(text.split()[0].rstrip())
return _complete_frame_filter_name(word, printer_list)
def invoke(self, arg, from_tty):
command_tuple = _enable_parse_arg("disable frame-filter", arg)
_do_enable_frame_filter(command_tuple, False)
class SetFrameFilterPriority(gdb.Command):
"""GDB command to set the priority of the specified frame-filter.
Usage: set frame-filter priority DICTIONARY NAME PRIORITY
DICTIONARY is the name of the frame filter dictionary on which to
operate. Named dictionaries are: "global" for the global frame
filter dictionary, "progspace" for the program space's framefilter
dictionary. If either of these two are not specified, the
dictionary name is assumed to be the name of the object-file name.
NAME matches the name of the frame filter to operate on.
PRIORITY is the an integer to assign the new priority to the frame
filter.
"""
def __init__(self):
super(SetFrameFilterPriority, self).__init__("set frame-filter " \
"priority",
gdb.COMMAND_DATA)
def _parse_pri_arg(self, arg):
"""Internal worker to parse a priority from a tuple.
Arguments:
arg: Tuple which contains the arguments from the command.
Returns:
A tuple containing the dictionary, name and priority from
the arguments.
Raises:
gdb.GdbError: An error parsing the arguments.
"""
argv = gdb.string_to_argv(arg);
argc = len(argv)
if argc != 3:
print("set frame-filter priority " \
"takes exactly three arguments.")
return None
return argv
def _set_filter_priority(self, command_tuple):
"""Internal worker for setting priority of frame-filters, by
parsing a tuple and calling _set_priority with the parsed
tuple.
Arguments:
command_tuple: Tuple which contains the arguments from the
command.
"""
list_op = command_tuple[0]
frame_filter = command_tuple[1]
# GDB returns arguments as a string, so convert priority to
# a number.
priority = int(command_tuple[2])
op_list = gdb.frames.return_list(list_op)
try:
ff = op_list[frame_filter]
except KeyError:
msg = "frame-filter '" + str(name) + "' not found."
raise gdb.GdbError(msg)
gdb.frames.set_priority(ff, priority)
def complete(self, text, word):
"""Completion function for both frame filter dictionary, and
frame filter name."""
if text.count(" ") == 0:
return _complete_frame_filter_list(text, word, False)
else:
printer_list = gdb.frames.return_list(text.split()[0].rstrip())
return _complete_frame_filter_name(word, printer_list)
def invoke(self, arg, from_tty):
command_tuple = self._parse_pri_arg(arg)
if command_tuple != None:
self._set_filter_priority(command_tuple)
class ShowFrameFilterPriority(gdb.Command):
"""GDB command to show the priority of the specified frame-filter.
Usage: show frame-filter priority DICTIONARY NAME
DICTIONARY is the name of the frame filter dictionary on which to
operate. Named dictionaries are: "global" for the global frame
filter dictionary, "progspace" for the program space's framefilter
dictionary. If either of these two are not specified, the
dictionary name is assumed to be the name of the object-file name.
NAME matches the name of the frame-filter to operate on.
"""
def __init__(self):
super(ShowFrameFilterPriority, self).__init__("show frame-filter " \
"priority",
gdb.COMMAND_DATA)
def _parse_pri_arg(self, arg):
"""Internal worker to parse a dictionary and name from a
tuple.
Arguments:
arg: Tuple which contains the arguments from the command.
Returns:
A tuple containing the dictionary, and frame filter name.
Raises:
gdb.GdbError: An error parsing the arguments.
"""
argv = gdb.string_to_argv(arg);
argc = len(argv)
if argc != 2:
print("show frame-filter priority " \
"takes exactly two arguments.")
return None
return argv
def get_filter_priority(self, frame_filters, name):
"""Worker for retrieving the priority of frame_filters.
Arguments:
frame_filters: Name of frame filter dictionary.
name: object to select printers.
Returns:
The priority of the frame filter.
Raises:
gdb.GdbError: A frame filter cannot be found.
"""
op_list = gdb.frames.return_list(frame_filters)
try:
ff = op_list[name]
except KeyError:
msg = "frame-filter '" + str(name) + "' not found."
raise gdb.GdbError(msg)
return gdb.frames.get_priority(ff)
def complete(self, text, word):
"""Completion function for both frame filter dictionary, and
frame filter name."""
if text.count(" ") == 0:
return _complete_frame_filter_list(text, word, False)
else:
printer_list = frame._return_list(text.split()[0].rstrip())
return _complete_frame_filter_name(word, printer_list)
def invoke(self, arg, from_tty):
command_tuple = self._parse_pri_arg(arg)
if command_tuple == None:
return
filter_name = command_tuple[1]
list_name = command_tuple[0]
try:
priority = self.get_filter_priority(list_name, filter_name);
except Exception:
e = sys.exc_info()[1]
print("Error printing filter priority for '"+name+"':"+str(e))
else:
print("Priority of filter '" + filter_name + "' in list '" \
+ list_name + "' is: " + str(priority))
# Register commands
SetFilterPrefixCmd()
ShowFilterPrefixCmd()
InfoFrameFilter()
EnableFrameFilter()
DisableFrameFilter()
SetFrameFilterPriority()
ShowFrameFilterPriority()
| gpl-2.0 |
sobercoder/gem5 | configs/ruby/Ruby.py | 2 | 9500 | # Copyright (c) 2012, 2017 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Brad Beckmann
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from m5.util import addToPath, fatal
from common import MemConfig
from topologies import *
from network import Network
def define_options(parser):
# By default, ruby uses the simple timing cpu
parser.set_defaults(cpu_type="TimingSimpleCPU")
parser.add_option("--ruby-clock", action="store", type="string",
default='2GHz',
help="Clock for blocks running at Ruby system's speed")
parser.add_option("--access-backing-store", action="store_true", default=False,
help="Should ruby maintain a second copy of memory")
# Options related to cache structure
parser.add_option("--ports", action="store", type="int", default=4,
help="used of transitions per cycle which is a proxy \
for the number of ports.")
# network options are in network/Network.py
# ruby mapping options
parser.add_option("--numa-high-bit", type="int", default=0,
help="high order address bit to use for numa mapping. " \
"0 = highest bit, not specified = lowest bit")
parser.add_option("--recycle-latency", type="int", default=10,
help="Recycle latency for ruby controller input buffers")
protocol = buildEnv['PROTOCOL']
exec "import %s" % protocol
eval("%s.define_options(parser)" % protocol)
Network.define_options(parser)
def setup_memory_controllers(system, ruby, dir_cntrls, options):
ruby.block_size_bytes = options.cacheline_size
ruby.memory_size_bits = 48
index = 0
mem_ctrls = []
crossbars = []
# Sets bits to be used for interleaving. Creates memory controllers
# attached to a directory controller. A separate controller is created
# for each address range as the abstract memory can handle only one
# contiguous address range as of now.
for dir_cntrl in dir_cntrls:
crossbar = None
if len(system.mem_ranges) > 1:
crossbar = IOXBar()
crossbars.append(crossbar)
dir_cntrl.memory = crossbar.slave
for r in system.mem_ranges:
mem_ctrl = MemConfig.create_mem_ctrl(
MemConfig.get(options.mem_type), r, index, options.num_dirs,
int(math.log(options.num_dirs, 2)), options.cacheline_size)
if options.access_backing_store:
mem_ctrl.kvm_map=False
mem_ctrls.append(mem_ctrl)
if crossbar != None:
mem_ctrl.port = crossbar.master
else:
mem_ctrl.port = dir_cntrl.memory
index += 1
system.mem_ctrls = mem_ctrls
if len(crossbars) > 0:
ruby.crossbars = crossbars
def create_topology(controllers, options):
""" Called from create_system in configs/ruby/<protocol>.py
Must return an object which is a subclass of BaseTopology
found in configs/topologies/BaseTopology.py
This is a wrapper for the legacy topologies.
"""
exec "import topologies.%s as Topo" % options.topology
topology = eval("Topo.%s(controllers)" % options.topology)
return topology
def create_system(options, full_system, system, piobus = None, dma_ports = []):
system.ruby = RubySystem()
ruby = system.ruby
# Create the network object
(network, IntLinkClass, ExtLinkClass, RouterClass, InterfaceClass) = \
Network.create_network(options, ruby)
ruby.network = network
protocol = buildEnv['PROTOCOL']
exec "import %s" % protocol
try:
(cpu_sequencers, dir_cntrls, topology) = \
eval("%s.create_system(options, full_system, system, dma_ports,\
ruby)"
% protocol)
except:
print "Error: could not create sytem for ruby protocol %s" % protocol
raise
# Create the network topology
topology.makeTopology(options, network, IntLinkClass, ExtLinkClass,
RouterClass)
# Initialize network based on topology
Network.init_network(options, network, InterfaceClass)
# Create a port proxy for connecting the system port. This is
# independent of the protocol and kept in the protocol-agnostic
# part (i.e. here).
sys_port_proxy = RubyPortProxy(ruby_system = ruby)
if piobus is not None:
sys_port_proxy.pio_master_port = piobus.slave
# Give the system port proxy a SimObject parent without creating a
# full-fledged controller
system.sys_port_proxy = sys_port_proxy
# Connect the system port for loading of binaries etc
system.system_port = system.sys_port_proxy.slave
setup_memory_controllers(system, ruby, dir_cntrls, options)
# Connect the cpu sequencers and the piobus
if piobus != None:
for cpu_seq in cpu_sequencers:
cpu_seq.pio_master_port = piobus.slave
cpu_seq.mem_master_port = piobus.slave
if buildEnv['TARGET_ISA'] == "x86":
cpu_seq.pio_slave_port = piobus.master
ruby.number_of_virtual_networks = ruby.network.number_of_virtual_networks
ruby._cpu_ports = cpu_sequencers
ruby.num_of_sequencers = len(cpu_sequencers)
# Create a backing copy of physical memory in case required
if options.access_backing_store:
ruby.access_backing_store = True
ruby.phys_mem = SimpleMemory(range=system.mem_ranges[0],
in_addr_map=False)
def create_directories(options, mem_ranges, ruby_system):
dir_cntrl_nodes = []
if options.numa_high_bit:
numa_bit = options.numa_high_bit
else:
# if the numa_bit is not specified, set the directory bits as the
# lowest bits above the block offset bits, and the numa_bit as the
# highest of those directory bits
dir_bits = int(math.log(options.num_dirs, 2))
block_size_bits = int(math.log(options.cacheline_size, 2))
numa_bit = block_size_bits + dir_bits - 1
for i in xrange(options.num_dirs):
dir_ranges = []
for r in mem_ranges:
addr_range = m5.objects.AddrRange(r.start, size = r.size(),
intlvHighBit = numa_bit,
intlvBits = dir_bits,
intlvMatch = i)
dir_ranges.append(addr_range)
dir_cntrl = Directory_Controller()
dir_cntrl.version = i
dir_cntrl.directory = RubyDirectoryMemory()
dir_cntrl.ruby_system = ruby_system
dir_cntrl.addr_ranges = dir_ranges
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
return dir_cntrl_nodes
def send_evicts(options):
# currently, 2 scenarios warrant forwarding evictions to the CPU:
# 1. The O3 model must keep the LSQ coherent with the caches
# 2. The x86 mwait instruction is built on top of coherence invalidations
# 3. The local exclusive monitor in ARM systems
if options.cpu_type == "DerivO3CPU" or \
buildEnv['TARGET_ISA'] in ('x86', 'arm'):
return True
return False
| bsd-3-clause |
AlexanderFabisch/scikit-learn | sklearn/externals/joblib/logger.py | 359 | 5135 | """
Helpers for logging.
This module needs much love to become useful.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2008 Gael Varoquaux
# License: BSD Style, 3 clauses.
from __future__ import print_function
import time
import sys
import os
import shutil
import logging
import pprint
from .disk import mkdirp
def _squeeze_time(t):
"""Remove .1s to the time under Windows: this is the time it take to
stat files. This is needed to make results similar to timings under
Unix, for tests
"""
if sys.platform.startswith('win'):
return max(0, t - .1)
else:
return t
def format_time(t):
t = _squeeze_time(t)
return "%.1fs, %.1fmin" % (t, t / 60.)
def short_format_time(t):
t = _squeeze_time(t)
if t > 60:
return "%4.1fmin" % (t / 60.)
else:
return " %5.1fs" % (t)
def pformat(obj, indent=0, depth=3):
if 'numpy' in sys.modules:
import numpy as np
print_options = np.get_printoptions()
np.set_printoptions(precision=6, threshold=64, edgeitems=1)
else:
print_options = None
out = pprint.pformat(obj, depth=depth, indent=indent)
if print_options:
np.set_printoptions(**print_options)
return out
###############################################################################
# class `Logger`
###############################################################################
class Logger(object):
""" Base class for logging messages.
"""
def __init__(self, depth=3):
"""
Parameters
----------
depth: int, optional
The depth of objects printed.
"""
self.depth = depth
def warn(self, msg):
logging.warn("[%s]: %s" % (self, msg))
def debug(self, msg):
# XXX: This conflicts with the debug flag used in children class
logging.debug("[%s]: %s" % (self, msg))
def format(self, obj, indent=0):
""" Return the formated representation of the object.
"""
return pformat(obj, indent=indent, depth=self.depth)
###############################################################################
# class `PrintTime`
###############################################################################
class PrintTime(object):
""" Print and log messages while keeping track of time.
"""
def __init__(self, logfile=None, logdir=None):
if logfile is not None and logdir is not None:
raise ValueError('Cannot specify both logfile and logdir')
# XXX: Need argument docstring
self.last_time = time.time()
self.start_time = self.last_time
if logdir is not None:
logfile = os.path.join(logdir, 'joblib.log')
self.logfile = logfile
if logfile is not None:
mkdirp(os.path.dirname(logfile))
if os.path.exists(logfile):
# Rotate the logs
for i in range(1, 9):
try:
shutil.move(logfile + '.%i' % i,
logfile + '.%i' % (i + 1))
except:
"No reason failing here"
# Use a copy rather than a move, so that a process
# monitoring this file does not get lost.
try:
shutil.copy(logfile, logfile + '.1')
except:
"No reason failing here"
try:
with open(logfile, 'w') as logfile:
logfile.write('\nLogging joblib python script\n')
logfile.write('\n---%s---\n' % time.ctime(self.last_time))
except:
""" Multiprocessing writing to files can create race
conditions. Rather fail silently than crash the
computation.
"""
# XXX: We actually need a debug flag to disable this
# silent failure.
def __call__(self, msg='', total=False):
""" Print the time elapsed between the last call and the current
call, with an optional message.
"""
if not total:
time_lapse = time.time() - self.last_time
full_msg = "%s: %s" % (msg, format_time(time_lapse))
else:
# FIXME: Too much logic duplicated
time_lapse = time.time() - self.start_time
full_msg = "%s: %.2fs, %.1f min" % (msg, time_lapse,
time_lapse / 60)
print(full_msg, file=sys.stderr)
if self.logfile is not None:
try:
with open(self.logfile, 'a') as f:
print(full_msg, file=f)
except:
""" Multiprocessing writing to files can create race
conditions. Rather fail silently than crash the
calculation.
"""
# XXX: We actually need a debug flag to disable this
# silent failure.
self.last_time = time.time()
| bsd-3-clause |
catalan42/jna | native/libffi/generate-ios-source-and-headers.py | 183 | 5303 | #!/usr/bin/env python
import subprocess
import re
import os
import errno
import collections
import sys
class Platform(object):
pass
sdk_re = re.compile(r'.*-sdk ([a-zA-Z0-9.]*)')
def sdkinfo(sdkname):
ret = {}
for line in subprocess.Popen(['xcodebuild', '-sdk', sdkname, '-version'], stdout=subprocess.PIPE).stdout:
kv = line.strip().split(': ', 1)
if len(kv) == 2:
k,v = kv
ret[k] = v
return ret
sim_sdk_info = sdkinfo('iphonesimulator')
device_sdk_info = sdkinfo('iphoneos')
def latest_sdks():
latest_sim = None
latest_device = None
for line in subprocess.Popen(['xcodebuild', '-showsdks'], stdout=subprocess.PIPE).stdout:
match = sdk_re.match(line)
if match:
if 'Simulator' in line:
latest_sim = match.group(1)
elif 'iOS' in line:
latest_device = match.group(1)
return latest_sim, latest_device
sim_sdk, device_sdk = latest_sdks()
class simulator_platform(Platform):
sdk='iphonesimulator'
arch = 'i386'
name = 'simulator'
triple = 'i386-apple-darwin10'
sdkroot = sim_sdk_info['Path']
prefix = "#if !defined(__arm__) && defined(__i386__)\n\n"
suffix = "\n\n#endif"
class device_platform(Platform):
sdk='iphoneos'
name = 'ios'
arch = 'armv7'
triple = 'arm-apple-darwin10'
sdkroot = device_sdk_info['Path']
prefix = "#ifdef __arm__\n\n"
suffix = "\n\n#endif"
def move_file(src_dir, dst_dir, filename, file_suffix=None, prefix='', suffix=''):
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
out_filename = filename
if file_suffix:
split_name = os.path.splitext(filename)
out_filename = "%s_%s%s" % (split_name[0], file_suffix, split_name[1])
with open(os.path.join(src_dir, filename)) as in_file:
with open(os.path.join(dst_dir, out_filename), 'w') as out_file:
if prefix:
out_file.write(prefix)
out_file.write(in_file.read())
if suffix:
out_file.write(suffix)
headers_seen = collections.defaultdict(set)
def move_source_tree(src_dir, dest_dir, dest_include_dir, arch=None, prefix=None, suffix=None):
for root, dirs, files in os.walk(src_dir, followlinks=True):
relroot = os.path.relpath(root,src_dir)
def move_dir(arch, prefix='', suffix='', files=[]):
for file in files:
file_suffix = None
if file.endswith('.h'):
if dest_include_dir:
file_suffix = arch
if arch:
headers_seen[file].add(arch)
move_file(root, dest_include_dir, file, arch, prefix=prefix, suffix=suffix)
elif dest_dir:
outroot = os.path.join(dest_dir, relroot)
move_file(root, outroot, file, prefix=prefix, suffix=suffix)
if relroot == '.':
move_dir(arch=arch,
files=files,
prefix=prefix,
suffix=suffix)
elif relroot == 'arm':
move_dir(arch='arm',
prefix="#ifdef __arm__\n\n",
suffix="\n\n#endif",
files=files)
elif relroot == 'x86':
move_dir(arch='i386',
prefix="#if !defined(__arm__) && defined(__i386__)\n\n",
suffix="\n\n#endif",
files=files)
def build_target(platform):
def xcrun_cmd(cmd):
return subprocess.check_output(['xcrun', '-sdk', platform.sdkroot, '-find', cmd]).strip()
build_dir = 'build_' + platform.name
if not os.path.exists(build_dir):
os.makedirs(build_dir)
env = dict(CC=xcrun_cmd('clang'),
LD=xcrun_cmd('ld'),
CFLAGS='-arch %s -isysroot %s -miphoneos-version-min=4.0' % (platform.arch, platform.sdkroot))
working_dir=os.getcwd()
try:
os.chdir(build_dir)
subprocess.check_call(['../configure', '-host', platform.triple], env=env)
move_source_tree('.', None, '../ios/include',
arch=platform.arch,
prefix=platform.prefix,
suffix=platform.suffix)
move_source_tree('./include', None, '../ios/include',
arch=platform.arch,
prefix=platform.prefix,
suffix=platform.suffix)
finally:
os.chdir(working_dir)
for header_name, archs in headers_seen.iteritems():
basename, suffix = os.path.splitext(header_name)
def main():
move_source_tree('src', 'ios/src', 'ios/include')
move_source_tree('include', None, 'ios/include')
build_target(simulator_platform)
build_target(device_platform)
for header_name, archs in headers_seen.iteritems():
basename, suffix = os.path.splitext(header_name)
with open(os.path.join('ios/include', header_name), 'w') as header:
for arch in archs:
header.write('#include <%s_%s%s>\n' % (basename, arch, suffix))
if __name__ == '__main__':
main()
| lgpl-2.1 |
azureplus/hue | desktop/core/ext-py/python-ldap-2.3.13/Lib/ldap/dn.py | 45 | 2793 | """
dn.py - misc stuff for handling distinguished names (see RFC 4514)
See http://www.python-ldap.org/ for details.
\$Id: dn.py,v 1.11 2010/06/03 12:26:39 stroeder Exp $
Compability:
- Tested with Python 2.0+
"""
from ldap import __version__
import _ldap
import ldap.functions
def escape_dn_chars(s):
"""
Escape all DN special characters found in s
with a back-slash (see RFC 4514, section 2.4)
"""
if s:
s = s.replace('\\','\\\\')
s = s.replace(',' ,'\\,')
s = s.replace('+' ,'\\+')
s = s.replace('"' ,'\\"')
s = s.replace('<' ,'\\<')
s = s.replace('>' ,'\\>')
s = s.replace(';' ,'\\;')
s = s.replace('=' ,'\\=')
s = s.replace('\000' ,'\\\000')
if s[0]=='#' or s[0]==' ':
s = ''.join(('\\',s))
if s[-1]==' ':
s = ''.join((s[:-1],'\\ '))
return s
def str2dn(dn,flags=0):
"""
This function takes a DN as string as parameter and returns
a decomposed DN. It's the inverse to dn2str().
flags describes the format of the dn
See also the OpenLDAP man-page ldap_str2dn(3)
"""
if not dn:
return []
return ldap.functions._ldap_function_call(None,_ldap.str2dn,dn,flags)
def dn2str(dn):
"""
This function takes a decomposed DN as parameter and returns
a single string. It's the inverse to str2dn() but will always
return a DN in LDAPv3 format compliant to RFC 4514.
"""
return ','.join([
'+'.join([
'='.join((atype,escape_dn_chars(avalue or '')))
for atype,avalue,dummy in rdn])
for rdn in dn
])
def explode_dn(dn,notypes=0,flags=0):
"""
explode_dn(dn [, notypes=0]) -> list
This function takes a DN and breaks it up into its component parts.
The notypes parameter is used to specify that only the component's
attribute values be returned and not the attribute types.
"""
if not dn:
return []
dn_decomp = str2dn(dn,flags)
rdn_list = []
for rdn in dn_decomp:
if notypes:
rdn_list.append('+'.join([
escape_dn_chars(avalue or '')
for atype,avalue,dummy in rdn
]))
else:
rdn_list.append('+'.join([
'='.join((atype,escape_dn_chars(avalue or '')))
for atype,avalue,dummy in rdn
]))
return rdn_list
def explode_rdn(rdn,notypes=0,flags=0):
"""
explode_rdn(rdn [, notypes=0]) -> list
This function takes a RDN and breaks it up into its component parts
if it is a multi-valued RDN.
The notypes parameter is used to specify that only the component's
attribute values be returned and not the attribute types.
"""
if not rdn:
return []
rdn_decomp = str2dn(rdn,flags)[0]
if notypes:
return [avalue or '' for atype,avalue,dummy in rdn_decomp]
else:
return ['='.join((atype,escape_dn_chars(avalue or ''))) for atype,avalue,dummy in rdn_decomp]
| apache-2.0 |
tlby/mxnet | python/mxnet/contrib/onnx/onnx2mx/_op_translations.py | 2 | 34598 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
""" Module for translating ONNX operators into Mxnet operatoes"""
# pylint: disable=unused-argument,protected-access
import numpy as np
from . import _translation_utils as translation_utils
from .... import symbol
# Method definitions for the callable objects mapped in the import_helper module
def identity(attrs, inputs, proto_obj):
"""Returns the identity function of the the input."""
return 'identity', attrs, inputs
def random_uniform(attrs, inputs, proto_obj):
"""Draw random samples from a uniform distribtuion."""
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
"Instructions to install - https://github.com/onnx/onnx")
new_attrs = translation_utils._remove_attributes(attrs, ['seed'])
new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attrs.get('dtype', 1))]
return 'random_uniform', new_attrs, inputs
def random_normal(attrs, inputs, proto_obj):
"""Draw random samples from a Gaussian distribution."""
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
"Instructions to install - https://github.com/onnx/onnx")
new_attr = translation_utils._remove_attributes(attrs, ['seed'])
new_attr = translation_utils._fix_attribute_names(new_attr, {'mean': 'loc'})
new_attr['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attr.get('dtype', 1))]
return 'random_normal', new_attr, inputs
def sample_multinomial(attrs, inputs, proto_obj):
"""Draw random samples from a multinomial distribution."""
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
new_attrs = translation_utils._remove_attributes(attrs, ['seed'])
new_attrs = translation_utils._fix_attribute_names(new_attrs, {'sample_size': 'shape'})
new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(attrs.get('dtype', 6))]
return 'sample_multinomial', new_attrs, inputs
# Arithmetic Operations
def add(attrs, inputs, proto_obj):
"""Adding two tensors"""
new_attr = {}
if 'broadcast' in attrs and attrs['broadcast'] == 1:
broadcast_axis = attrs['axis']
op_value = translation_utils._fix_broadcast('broadcast_add', inputs,
broadcast_axis, proto_obj)
return op_value, new_attr, inputs
return 'broadcast_add', new_attr, inputs
def subtract(attrs, inputs, proto_obj):
"""Subtracting two tensors"""
new_attr = {}
if 'broadcast' in attrs and attrs['broadcast'] == 1:
broadcast_axis = attrs['axis']
op_value = translation_utils._fix_broadcast('broadcast_sub', inputs,
broadcast_axis, proto_obj)
return op_value, new_attr, inputs
return 'broadcast_sub', new_attr, inputs
def multiply(attrs, inputs, proto_obj):
"""Multiply two tensors"""
new_attr = {}
if 'broadcast' in attrs and attrs['broadcast'] == 1:
broadcast_axis = attrs['axis']
op_value = translation_utils._fix_broadcast('broadcast_mul', inputs,
broadcast_axis, proto_obj)
return op_value, new_attr, inputs
return 'broadcast_mul', new_attr, inputs
def divide(attrs, inputs, proto_obj):
"""Divide two tensors"""
new_attr = {}
if 'broadcast' in attrs and attrs['broadcast'] == 1:
broadcast_axis = attrs['axis']
op_value = translation_utils._fix_broadcast('broadcast_div', inputs,
broadcast_axis, proto_obj)
return op_value, new_attr, inputs
return 'broadcast_div', new_attr, inputs
def mean(attrs, inputs, proto_obj):
"""Mean of all the input tensors."""
concat_input = [symbol.expand_dims(op_input, axis=0) for op_input in inputs]
concat_sym = symbol.concat(*concat_input, dim=0)
mean_sym = symbol.mean(concat_sym, axis=0)
return mean_sym, attrs, inputs
def logical_and(attrs, inputs, proto_obj):
"""Logical and of two input arrays."""
return 'broadcast_logical_and', attrs, inputs
def logical_or(attrs, inputs, proto_obj):
"""Logical or of two input arrays."""
return 'broadcast_logical_or', attrs, inputs
def logical_xor(attrs, inputs, proto_obj):
"""Logical xor of two input arrays."""
return 'broadcast_logical_xor', attrs, inputs
def logical_not(attrs, inputs, proto_obj):
"""Logical not of two input arrays."""
return 'logical_not', attrs, inputs
def absolute(attrs, inputs, proto_obj):
"""Returns element-wise absolute value of the input."""
return 'abs', attrs, inputs
def negative(attrs, inputs, proto_obj):
"""Negation of every element in a tensor"""
return 'negative', attrs, inputs
def add_n(attrs, inputs, proto_obj):
"""Elementwise sum of arrays"""
return 'add_n', attrs, inputs
# Sorting and Searching
def argmax(attrs, inputs, proto_obj):
"""Returns indices of the maximum values along an axis"""
axis = attrs.get('axis', 0)
keepdims = attrs.get('keepdims', 1)
argmax_op = symbol.argmax(inputs[0], axis=axis, keepdims=keepdims)
# onnx argmax operator always expects int64 as output type
cast_attrs = {'dtype': 'int64'}
return 'cast', cast_attrs, argmax_op
def argmin(attrs, inputs, proto_obj):
"""Returns indices of the minimum values along an axis."""
axis = attrs.get('axis', 0)
keepdims = attrs.get('keepdims', 1)
argmin_op = symbol.argmin(inputs[0], axis=axis, keepdims=keepdims)
# onnx argmax operator always expects int64 as output type
cast_attrs = {'dtype': 'int64'}
return 'cast', cast_attrs, argmin_op
def maximum(attrs, inputs, proto_obj):
"""
Elementwise maximum of arrays.
MXNet maximum compares only two symbols at a time.
ONNX can send more than two to compare.
Breaking into multiple mxnet ops to compare two symbols at a time
"""
if len(inputs) > 1:
mxnet_op = symbol.maximum(inputs[0], inputs[1])
for op_input in inputs[2:]:
mxnet_op = symbol.maximum(mxnet_op, op_input)
else:
mxnet_op = symbol.maximum(inputs[0], inputs[0])
return mxnet_op, attrs, inputs
def minimum(attrs, inputs, proto_obj):
"""Elementwise minimum of arrays."""
# MXNet minimum compares only two symbols at a time.
# ONNX can send more than two to compare.
# Breaking into multiple mxnet ops to compare two symbols at a time
if len(inputs) > 1:
mxnet_op = symbol.minimum(inputs[0], inputs[1])
for op_input in inputs[2:]:
mxnet_op = symbol.minimum(mxnet_op, op_input)
else:
mxnet_op = symbol.minimum(inputs[0], inputs[0])
return mxnet_op, attrs, inputs
def lesser(attrs, inputs, proto_obj):
"""Logical Lesser operator with broadcasting."""
return 'broadcast_lesser', attrs, inputs
def greater(attrs, inputs, proto_obj):
"""Logical Greater operator with broadcasting."""
return 'broadcast_greater', attrs, inputs
def equal(attrs, inputs, proto_obj):
"""Logical Equal operator with broadcasting."""
return 'broadcast_equal', attrs, inputs
#Hyperbolic functions
def tanh(attrs, inputs, proto_obj):
"""Returns the hyperbolic tangent of the input array."""
return 'tanh', attrs, inputs
# Rounding
def ceil(attrs, inputs, proto_obj):
""" Calculate ceil value for input """
return 'ceil', attrs, inputs
def floor(attrs, inputs, proto_obj):
""" Calculate floor value for input """
return 'floor', attrs, inputs
# Joining and spliting
def concat(attrs, inputs, proto_obj):
""" Joins input arrays along a given axis. """
new_attrs = translation_utils._fix_attribute_names(attrs, {'axis': 'dim'})
return 'concat', new_attrs, inputs
# Basic neural network functions
def softsign(attrs, inputs, proto_obj):
"""Computes softsign of x element-wise."""
return 'softsign', attrs, inputs
def sigmoid(attrs, inputs, proto_obj):
"""Computes elementwise sigmoid of the input array"""
return 'sigmoid', attrs, inputs
def hardsigmoid(attrs, inputs, proto_obj):
"""Computes elementwise hard sigmoid of the input array"""
return 'hard_sigmoid', attrs, inputs
def relu(attrs, inputs, proto_obj):
"""Computes rectified linear function."""
return 'relu', attrs, inputs
def pad(attrs, inputs, proto_obj):
""" Add padding to input tensor"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'pads' : 'pad_width',
'value' : 'constant_value'
})
new_attrs['pad_width'] = translation_utils._pad_sequence_fix(new_attrs.get('pad_width'))
return 'pad', new_attrs, inputs
def matrix_multiplication(attrs, inputs, proto_obj):
"""Performs general matrix multiplication"""
return 'linalg_gemm2', attrs, inputs
def batch_norm(attrs, inputs, proto_obj):
"""Batch normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon': 'eps',
'is_test': 'fix_gamma'})
new_attrs = translation_utils._remove_attributes(new_attrs,
['spatial', 'consumed_inputs'])
# Disable cuDNN BN only if epsilon from model is < than minimum cuDNN eps (1e-5)
cudnn_min_eps = 1e-5
cudnn_off = 0 if attrs.get('epsilon', cudnn_min_eps) >= cudnn_min_eps else 1
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'cudnn_off': cudnn_off})
# in test mode "fix_gamma" should be unset.
new_attrs['fix_gamma'] = not attrs.get('is_test', 1)
return 'BatchNorm', new_attrs, inputs
def instance_norm(attrs, inputs, proto_obj):
"""Instance Normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon' : 'eps'})
new_attrs['eps'] = attrs.get('epsilon', 1e-5)
return 'InstanceNorm', new_attrs, inputs
def leaky_relu(attrs, inputs, proto_obj):
"""Leaky Relu function"""
if 'alpha' in attrs:
new_attrs = translation_utils._fix_attribute_names(attrs, {'alpha' : 'slope'})
else:
new_attrs = translation_utils._add_extra_attributes(attrs, {'slope': 0.01})
return 'LeakyReLU', new_attrs, inputs
def _elu(attrs, inputs, proto_obj):
"""Elu function"""
if 'alpha' in attrs:
new_attrs = translation_utils._fix_attribute_names(attrs, {'alpha' : 'slope'})
else:
new_attrs = translation_utils._add_extra_attributes(attrs, {'slope': 1.0})
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'act_type': 'elu'})
return 'LeakyReLU', new_attrs, inputs
def _prelu(attrs, inputs, proto_obj):
"""PRelu function"""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type': 'prelu'})
return 'LeakyReLU', new_attrs, inputs
def _selu(attrs, inputs, proto_obj):
"""Selu function"""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type': 'selu'})
return 'LeakyReLU', new_attrs, inputs
def softmax(attrs, inputs, proto_obj):
"""Softmax function."""
if 'axis' not in attrs:
attrs = translation_utils._add_extra_attributes(attrs, {'axis': 1})
return 'softmax', attrs, inputs
def log_softmax(attrs, inputs, proto_obj):
"""Computes the log softmax of the input. This is equivalent to
computing softmax followed by log."""
return 'log_softmax', attrs, inputs
def softplus(attrs, inputs, proto_obj):
"""Applies the sofplus activation function element-wise to the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type' : 'softrelu'})
return 'Activation', new_attrs, inputs
def conv(attrs, inputs, proto_obj):
"""Compute N-D convolution on (N+2)-D input."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape' : 'kernel',
'strides' : 'stride',
'pads': 'pad',
'dilations': 'dilate',
'group': 'num_group'})
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'num_group' : 1})
new_attrs = translation_utils._fix_bias('Convolution', new_attrs, len(inputs))
new_attrs = translation_utils._fix_channels('Convolution', new_attrs, inputs, proto_obj)
kernel = new_attrs['kernel']
stride = new_attrs['stride'] if 'stride' in new_attrs else []
padding = new_attrs['pad'] if 'pad' in new_attrs else []
dilations = new_attrs['dilate'] if 'dilate' in new_attrs else []
num_filter = new_attrs['num_filter']
num_group = new_attrs['num_group']
no_bias = new_attrs['no_bias'] if 'no_bias' in new_attrs else 0
bias = None if no_bias is True else inputs[2]
# Unlike ONNX, MXNet's convolution operator does not support asymmetric padding, so we first
# use 'Pad' operator, which supports asymmetric padding. Then use the convolution operator.
pad_width = (0, 0, 0, 0) + translation_utils._pad_sequence_fix(padding, kernel_dim=len(kernel))
pad_op = symbol.pad(inputs[0], mode='constant', pad_width=pad_width)
conv_op = symbol.Convolution(pad_op, inputs[1], bias,
kernel=kernel, stride=stride, dilate=dilations,
num_filter=num_filter, num_group=num_group, no_bias=no_bias)
return conv_op, new_attrs, inputs
def deconv(attrs, inputs, proto_obj):
"""Computes transposed convolution of the input tensor."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape' : 'kernel',
'strides' : 'stride',
'pads': 'pad',
'dilations': 'dilate',
'group': 'num_group'})
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'num_group' : 1})
new_attrs = translation_utils._fix_bias('Deconvolution', new_attrs, len(inputs))
new_attrs = translation_utils._fix_channels('Deconvolution', new_attrs, inputs, proto_obj)
kernel = new_attrs['kernel']
stride = new_attrs['stride'] if 'stride' in new_attrs else []
padding = new_attrs['pad'] if 'pad' in new_attrs else []
dilations = new_attrs['dilate'] if 'dilate' in new_attrs else []
num_filter = new_attrs['num_filter']
num_group = new_attrs['num_group']
no_bias = new_attrs['no_bias'] if 'no_bias' in new_attrs else False
bias = None if no_bias is True else inputs[2]
# Unlike ONNX, MXNet's deconvolution operator does not support asymmetric padding, so we first
# use 'Pad' operator, which supports asymmetric padding. Then use the deconvolution operator.
pad_width = (0, 0, 0, 0) + translation_utils._pad_sequence_fix(padding, kernel_dim=len(kernel))
pad_op = symbol.pad(inputs[0], mode='constant', pad_width=pad_width)
deconv_op = symbol.Deconvolution(pad_op, inputs[1], bias,
kernel=kernel, stride=stride, dilate=dilations,
num_filter=num_filter, num_group=num_group, no_bias=no_bias)
return deconv_op, new_attrs, inputs
def fully_connected(attrs, inputs, proto_obj):
"""Applies a linear transformation: Y=XWT+b."""
new_attrs = translation_utils._remove_attributes(attrs, ['axis'])
new_attrs = translation_utils._fix_bias('FullyConnected', new_attrs, len(inputs))
new_attrs = translation_utils._fix_channels('FullyConnected', new_attrs, inputs, proto_obj)
return 'FullyConnected', new_attrs, inputs
def global_maxpooling(attrs, inputs, proto_obj):
"""Performs max pooling on the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True,
'kernel': (1, 1),
'pool_type': 'max'})
return 'Pooling', new_attrs, inputs
def global_avgpooling(attrs, inputs, proto_obj):
"""Performs avg pooling on the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True,
'kernel': (1, 1),
'pool_type': 'avg'})
return 'Pooling', new_attrs, inputs
def global_lppooling(attrs, inputs, proto_obj):
"""Performs global lp pooling on the input."""
p_value = attrs.get('p', 2)
new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True,
'kernel': (1, 1),
'pool_type': 'lp',
'p_value': p_value})
new_attrs = translation_utils._remove_attributes(new_attrs, ['p'])
return 'Pooling', new_attrs, inputs
def linalg_gemm(attrs, inputs, proto_obj):
"""Performs general matrix multiplication and accumulation"""
trans_a = 0
trans_b = 0
alpha = 1
beta = 1
if 'transA' in attrs:
trans_a = attrs['transA']
if 'transB' in attrs:
trans_b = attrs['transB']
if 'alpha' in attrs:
alpha = attrs['alpha']
if 'beta' in attrs:
beta = attrs['beta']
flatten_a = symbol.flatten(inputs[0])
matmul_op = symbol.linalg_gemm2(A=flatten_a, B=inputs[1],
transpose_a=trans_a, transpose_b=trans_b,
alpha=alpha)
gemm_op = symbol.broadcast_add(matmul_op, beta*inputs[2])
new_attrs = translation_utils._fix_attribute_names(attrs, {'transA': 'transpose_a',
'transB': 'transpose_b'})
new_attrs = translation_utils._remove_attributes(new_attrs, ['broadcast'])
return gemm_op, new_attrs, inputs
def local_response_norm(attrs, inputs, proto_obj):
"""Local Response Normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'bias': 'knorm',
'size' : 'nsize'})
return 'LRN', new_attrs, inputs
def dropout(attrs, inputs, proto_obj):
"""Dropout Regularization."""
mode = 'training'
if 'is_test' in attrs and attrs['is_test'] == 0:
mode = 'always'
new_attrs = translation_utils._fix_attribute_names(attrs,
{'ratio': 'p'})
new_attrs = translation_utils._remove_attributes(new_attrs, ['is_test'])
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'mode': mode})
return 'Dropout', new_attrs, inputs
# Changing shape and type.
def reshape(attrs, inputs, proto_obj):
"""Reshape the given array by the shape attribute."""
if len(inputs) == 1:
return 'reshape', attrs, inputs[0]
reshape_shape = list(proto_obj._params[inputs[1].name].asnumpy())
reshape_shape = [int(i) for i in reshape_shape]
new_attrs = {'shape': reshape_shape}
return 'reshape', new_attrs, inputs[:1]
def cast(attrs, inputs, proto_obj):
""" Cast input to a given dtype"""
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
new_attrs = translation_utils._fix_attribute_names(attrs, {'to' : 'dtype'})
new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attrs['dtype'])]
return 'cast', new_attrs, inputs
def split(attrs, inputs, proto_obj):
"""Splits an array along a particular axis into multiple sub-arrays."""
split_list = attrs.get('split') if 'split' in attrs else []
new_attrs = translation_utils._fix_attribute_names(attrs,
{'split' : 'num_outputs'})
if 'axis' not in attrs:
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'axis': 0})
if not split_list:
num_outputs = len(proto_obj.model_metadata.get('output_tensor_data'))
else:
if len(set(split_list)) == 1:
num_outputs = len(split_list)
else:
raise NotImplementedError("Operator {} in MXNet does not support variable splits."
"Tracking the issue to support variable split here: "
"https://github.com/apache/incubator-mxnet/issues/11594"
.format('split'))
new_attrs['num_outputs'] = num_outputs
return 'split', new_attrs, inputs
def _slice(attrs, inputs, proto_obj):
"""Returns a slice of the input tensor along multiple axes."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis',
'ends' : 'end',
'starts' : 'begin'})
# onnx slice provides slicing on multiple axis. Adding multiple slice_axis operator
# for multiple axes from mxnet
begin = new_attrs.get('begin')
end = new_attrs.get('end')
axes = new_attrs.get('axis', tuple(range(len(begin))))
slice_op = symbol.slice_axis(inputs[0], axis=axes[0], begin=begin[0], end=end[0])
if len(axes) > 1:
for i, axis in enumerate(axes):
slice_op = symbol.slice_axis(slice_op, axis=axis, begin=begin[i], end=end[i])
return slice_op, new_attrs, inputs
def transpose(attrs, inputs, proto_obj):
"""Transpose the input array."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'perm' : 'axes'})
return 'transpose', new_attrs, inputs
def squeeze(attrs, inputs, proto_obj):
"""Remove single-dimensional entries from the shape of a tensor."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis'})
return 'squeeze', new_attrs, inputs
def unsqueeze(attrs, inputs, cls):
"""Inserts a new axis of size 1 into the array shape"""
# MXNet can only add one axis at a time.
mxnet_op = inputs[0]
for axis in attrs["axes"]:
mxnet_op = symbol.expand_dims(mxnet_op, axis=axis)
return mxnet_op, attrs, inputs
def flatten(attrs, inputs, proto_obj):
"""Flattens the input array into a 2-D array by collapsing the higher dimensions."""
#Mxnet does not have axis support. By default uses axis=1
if 'axis' in attrs and attrs['axis'] != 1:
raise RuntimeError("Flatten operator only supports axis=1")
new_attrs = translation_utils._remove_attributes(attrs, ['axis'])
return 'Flatten', new_attrs, inputs
def clip(attrs, inputs, proto_obj):
"""Clips (limits) the values in an array."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'min' : 'a_min',
'max' : 'a_max'})
if 'a_max' not in new_attrs:
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'a_max' : np.inf})
if 'a_min' not in new_attrs:
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'a_min' : -np.inf})
return 'clip', new_attrs, inputs
def gather(attrs, inputs, proto_obj):
"""Gather elements from an input array along the given axis."""
return 'take', attrs, inputs
#Powers
def reciprocal(attrs, inputs, proto_obj):
"""Returns the reciprocal of the argument, element-wise."""
return 'reciprocal', attrs, inputs
def squareroot(attrs, inputs, proto_obj):
"""Returns element-wise square-root value of the input."""
return 'sqrt', attrs, inputs
def power(attrs, inputs, proto_obj):
"""Returns element-wise result of base element raised to powers from exp element."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'exponent':'exp'})
if 'broadcast' in attrs:
new_attrs = translation_utils._remove_attributes(new_attrs, ['broadcast'])
if attrs['broadcast'] == 1:
return 'broadcast_power', new_attrs, inputs
else:
mxnet_op = symbol.pow(inputs[0], inputs[1])
return mxnet_op, new_attrs, inputs
mxnet_op = symbol.broadcast_power(inputs[0], inputs[1])
return mxnet_op, new_attrs, inputs
def exponent(attrs, inputs, proto_obj):
"""Elementwise exponent of input array."""
return 'exp', attrs, inputs
def _cos(attrs, inputs, proto_obj):
"""Elementwise cosine of input array."""
return 'cos', attrs, inputs
def _sin(attrs, inputs, proto_obj):
"""Elementwise sine of input array."""
return 'sin', attrs, inputs
def _tan(attrs, inputs, proto_obj):
"""Elementwise tan of input array."""
return 'tan', attrs, inputs
def arccos(attrs, inputs, proto_obj):
"""Elementwise inverse cos of input array."""
return 'arccos', attrs, inputs
def arcsin(attrs, inputs, proto_obj):
"""Elementwise inverse sin of input array."""
return 'arcsin', attrs, inputs
def arctan(attrs, inputs, proto_obj):
"""Elementwise inverse tan of input array."""
return 'arctan', attrs, inputs
def _log(attrs, inputs, proto_obj):
"""Elementwise log of input array."""
return 'log', attrs, inputs
# Reduce Functions
def reduce_max(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by maximum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'max', new_attrs, inputs
def reduce_mean(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by mean value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'mean', new_attrs, inputs
def reduce_min(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by minimum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'min', new_attrs, inputs
def reduce_sum(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by sum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'sum', new_attrs, inputs
def reduce_prod(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by product value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'prod', new_attrs, inputs
def reduce_log_sum(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by log sum value"""
keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims')
sum_op = symbol.sum(inputs[0], axis=attrs.get('axes'),
keepdims=keep_dims)
log_sym = symbol.log(sum_op)
return log_sym, attrs, inputs
def reduce_log_sum_exp(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by log sum exp value"""
keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims')
exp_op = symbol.exp(inputs[0])
sum_op = symbol.sum(exp_op, axis=attrs.get('axes'),
keepdims=keep_dims)
log_sym = symbol.log(sum_op)
return log_sym, attrs, inputs
def reduce_sum_square(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by sum square value"""
square_op = symbol.square(inputs[0])
sum_op = symbol.sum(square_op, axis=attrs.get('axes'),
keepdims=attrs.get('keepdims'))
return sum_op, attrs, inputs
def reduce_l1(attrs, inputs, proto_obj):
"""Reduce input tensor by l1 normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
new_attrs = translation_utils._add_extra_attributes(new_attrs,
{'ord' : 1})
return 'norm', new_attrs, inputs
def shape(attrs, inputs, proto_obj):
"""Returns shape of input array."""
return 'shape_array', attrs, inputs
def size(attrs, inputs, proto_obj):
"""Returns array containing size of data."""
return "size_array", attrs, inputs
def reduce_l2(attrs, inputs, proto_obj):
"""Reduce input tensor by l2 normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'norm', new_attrs, inputs
def avg_pooling(attrs, inputs, proto_obj):
""" Average pooling"""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'kernel_shape': 'kernel',
'strides': 'stride',
'pads': 'pad',
})
new_attrs = translation_utils._add_extra_attributes(new_attrs,
{'pooling_convention': 'valid'
})
new_op = translation_utils._fix_pooling('avg', inputs, new_attrs)
return new_op, new_attrs, inputs
def lp_pooling(attrs, inputs, proto_obj):
"""LP Pooling"""
p_value = attrs.get('p', 2)
new_attrs = translation_utils._fix_attribute_names(attrs,
{'kernel_shape': 'kernel',
'strides': 'stride',
'pads': 'pad'
})
new_attrs = translation_utils._remove_attributes(new_attrs, ['p'])
new_attrs = translation_utils._add_extra_attributes(new_attrs,
{'pooling_convention': 'valid',
'p_value': p_value
})
new_op = translation_utils._fix_pooling('lp', inputs, new_attrs)
return new_op, new_attrs, inputs
def max_pooling(attrs, inputs, proto_obj):
""" Average pooling"""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'kernel_shape': 'kernel',
'strides': 'stride',
'pads': 'pad',
})
new_attrs = translation_utils._add_extra_attributes(new_attrs,
{'pooling_convention': 'valid'
})
new_op = translation_utils._fix_pooling('max', inputs, new_attrs)
return new_op, new_attrs, inputs
def max_roi_pooling(attrs, inputs, proto_obj):
"""Max ROI Pooling."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'pooled_shape': 'pooled_size',
'spatial_scale': 'spatial_scale'
})
return 'ROIPooling', new_attrs, inputs
def depthtospace(attrs, inputs, proto_obj):
"""Rearranges data from depth into blocks of spatial data."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'blocksize':'block_size'})
return "depth_to_space", new_attrs, inputs
def spacetodepth(attrs, inputs, proto_obj):
"""Rearranges blocks of spatial data into depth."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'blocksize':'block_size'})
return "space_to_depth", new_attrs, inputs
def hardmax(attrs, inputs, proto_obj):
"""Returns batched one-hot vectors."""
input_tensor_data = proto_obj.model_metadata.get('input_tensor_data')[0]
input_shape = input_tensor_data[1]
axis = int(attrs.get('axis', 1))
axis = axis if axis >= 0 else len(input_shape) + axis
if axis == len(input_shape) - 1:
amax = symbol.argmax(inputs[0], axis=-1)
one_hot = symbol.one_hot(amax, depth=input_shape[-1])
return one_hot, attrs, inputs
# since reshape doesn't take a tensor for shape,
# computing with np.prod. This needs to be changed to
# to use mx.sym.prod() when mx.sym.reshape() is fixed.
# (https://github.com/apache/incubator-mxnet/issues/10789)
new_shape = (int(np.prod(input_shape[:axis])),
int(np.prod(input_shape[axis:])))
reshape_op = symbol.reshape(inputs[0], new_shape)
amax = symbol.argmax(reshape_op, axis=-1)
one_hot = symbol.one_hot(amax, depth=new_shape[-1])
hardmax_op = symbol.reshape(one_hot, input_shape)
return hardmax_op, attrs, inputs
def lpnormalization(attrs, inputs, proto_obj):
"""ONNX does not have eps attribute, so cannot map it to L2normalization in MXNet
without that, it works as norm operator discussion in PR:
https://github.com/onnx/onnx/pull/1330"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'p': 'ord'})
axis = int(attrs.get("axis", -1))
new_attrs.update(axis=axis)
return 'norm', new_attrs, inputs
| apache-2.0 |
AnotherIvan/calibre | src/calibre/ebooks/lrf/lrfparser.py | 15 | 7322 | __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
''''''
import sys, array, os, re, codecs, logging
from calibre import setup_cli_handlers
from calibre.utils.config import OptionParser
from calibre.utils.filenames import ascii_filename
from calibre.ebooks.lrf.meta import LRFMetaFile
from calibre.ebooks.lrf.objects import get_object, PageTree, StyleObject, \
Font, Text, TOCObject, BookAttr, ruby_tags
class LRFDocument(LRFMetaFile):
class temp(object): pass
def __init__(self, stream):
LRFMetaFile.__init__(self, stream)
self.scramble_key = self.xor_key
self.page_trees = []
self.font_map = {}
self.image_map = {}
self.toc = ''
self.keep_parsing = True
def parse(self):
self._parse_objects()
self.metadata = LRFDocument.temp()
for a in ('title', 'title_reading', 'author', 'author_reading', 'book_id',
'classification', 'free_text', 'publisher', 'label', 'category'):
setattr(self.metadata, a, getattr(self, a))
self.doc_info = LRFDocument.temp()
for a in ('thumbnail', 'language', 'creator', 'producer', 'page'):
setattr(self.doc_info, a, getattr(self, a))
self.doc_info.thumbnail_extension = self.thumbail_extension()
self.device_info = LRFDocument.temp()
for a in ('dpi', 'width', 'height'):
setattr(self.device_info, a, getattr(self, a))
def _parse_objects(self):
self.objects = {}
self._file.seek(self.object_index_offset)
obj_array = array.array("I", self._file.read(4*4*self.number_of_objects))
if ord(array.array("i",[1]).tostring()[0])==0: #big-endian
obj_array.byteswap()
for i in range(self.number_of_objects):
if not self.keep_parsing:
break
objid, objoff, objsize = obj_array[i*4:i*4+3]
self._parse_object(objid, objoff, objsize)
for obj in self.objects.values():
if not self.keep_parsing:
break
if hasattr(obj, 'initialize'):
obj.initialize()
def _parse_object(self, objid, objoff, objsize):
obj = get_object(self, self._file, objid, objoff, objsize, self.scramble_key)
self.objects[objid] = obj
if isinstance(obj, PageTree):
self.page_trees.append(obj)
elif isinstance(obj, TOCObject):
self.toc = obj
elif isinstance(obj, BookAttr):
self.ruby_tags = {}
for h in ruby_tags.values():
attr = h[0]
if hasattr(obj, attr):
self.ruby_tags[attr] = getattr(obj, attr)
def __iter__(self):
for pt in self.page_trees:
yield pt
def write_files(self):
for obj in self.image_map.values() + self.font_map.values():
open(obj.file, 'wb').write(obj.stream)
def to_xml(self, write_files=True):
bookinfo = u'<BookInformation>\n<Info version="1.1">\n<BookInfo>\n'
bookinfo += u'<Title reading="%s">%s</Title>\n'%(self.metadata.title_reading, self.metadata.title)
bookinfo += u'<Author reading="%s">%s</Author>\n'%(self.metadata.author_reading, self.metadata.author)
bookinfo += u'<BookID>%s</BookID>\n'%(self.metadata.book_id,)
bookinfo += u'<Publisher reading="">%s</Publisher>\n'%(self.metadata.publisher,)
bookinfo += u'<Label reading="">%s</Label>\n'%(self.metadata.label,)
bookinfo += u'<Category reading="">%s</Category>\n'%(self.metadata.category,)
bookinfo += u'<Classification reading="">%s</Classification>\n'%(self.metadata.classification,)
bookinfo += u'<FreeText reading="">%s</FreeText>\n</BookInfo>\n<DocInfo>\n'%(self.metadata.free_text,)
th = self.doc_info.thumbnail
if th:
prefix = ascii_filename(self.metadata.title)
bookinfo += u'<CThumbnail file="%s" />\n'%(prefix+'_thumbnail.'+self.doc_info.thumbnail_extension,)
if write_files:
open(prefix+'_thumbnail.'+self.doc_info.thumbnail_extension, 'wb').write(th)
bookinfo += u'<Language reading="">%s</Language>\n'%(self.doc_info.language,)
bookinfo += u'<Creator reading="">%s</Creator>\n'%(self.doc_info.creator,)
bookinfo += u'<Producer reading="">%s</Producer>\n'%(self.doc_info.producer,)
bookinfo += u'<SumPage>%s</SumPage>\n</DocInfo>\n</Info>\n%s</BookInformation>\n'%(self.doc_info.page,self.toc)
pages = u''
done_main = False
pt_id = -1
for page_tree in self:
if not done_main:
done_main = True
pages += u'<Main>\n'
close = u'</Main>\n'
pt_id = page_tree.id
else:
pages += u'<PageTree objid="%d">\n'%(page_tree.id,)
close = u'</PageTree>\n'
for page in page_tree:
pages += unicode(page)
pages += close
traversed_objects = [int(i) for i in re.findall(r'objid="(\w+)"', pages)] + [pt_id]
objects = u'\n<Objects>\n'
styles = u'\n<Style>\n'
for obj in self.objects:
obj = self.objects[obj]
if obj.id in traversed_objects:
continue
if isinstance(obj, (Font, Text, TOCObject)):
continue
if isinstance(obj, StyleObject):
styles += unicode(obj)
else:
objects += unicode(obj)
styles += '</Style>\n'
objects += '</Objects>\n'
if write_files:
self.write_files()
return '<BBeBXylog version="1.0">\n' + bookinfo + pages + styles + objects + '</BBeBXylog>'
def option_parser():
parser = OptionParser(usage=_('%prog book.lrf\nConvert an LRF file into an LRS (XML UTF-8 encoded) file'))
parser.add_option('--output', '-o', default=None, help=_('Output LRS file'), dest='out')
parser.add_option('--dont-output-resources', default=True, action='store_false',
help=_('Do not save embedded image and font files to disk'),
dest='output_resources')
parser.add_option('--verbose', default=False, action='store_true', dest='verbose', help=_('Be more verbose'))
return parser
def main(args=sys.argv, logger=None):
parser = option_parser()
opts, args = parser.parse_args(args)
if logger is None:
level = logging.DEBUG if opts.verbose else logging.INFO
logger = logging.getLogger('lrf2lrs')
setup_cli_handlers(logger, level)
if len(args) != 2:
parser.print_help()
return 1
if opts.out is None:
opts.out = os.path.join(os.path.dirname(args[1]), os.path.splitext(os.path.basename(args[1]))[0]+".lrs")
o = codecs.open(os.path.abspath(os.path.expanduser(opts.out)), 'wb', 'utf-8')
o.write(u'<?xml version="1.0" encoding="UTF-8"?>\n')
logger.info(_('Parsing LRF...'))
d = LRFDocument(open(args[1], 'rb'))
d.parse()
logger.info(_('Creating XML...'))
o.write(d.to_xml(write_files=opts.output_resources))
logger.info(_('LRS written to ')+opts.out)
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
pratapvardhan/scikit-learn | examples/plot_multilabel.py | 236 | 4157 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
dwitvliet/CATMAID | django/applications/catmaid/control/link.py | 1 | 6452 | import json
from django.http import HttpResponse
from django.core.exceptions import ObjectDoesNotExist
from catmaid.models import UserRole, Project, Relation, Treenode, Connector, \
TreenodeConnector, ClassInstance
from catmaid.control.authentication import requires_user_role, can_edit_or_fail
@requires_user_role(UserRole.Annotate)
def create_link(request, project_id=None):
""" Create a link, currently only a presynaptic_to or postsynaptic_to relationship
between a treenode and a connector.
"""
from_id = int(request.POST.get('from_id', 0))
to_id = int(request.POST.get('to_id', 0))
link_type = request.POST.get('link_type', 'none')
try:
project = Project.objects.get(id=project_id)
relation = Relation.objects.get(project=project, relation_name=link_type)
from_treenode = Treenode.objects.get(id=from_id)
to_connector = Connector.objects.get(id=to_id, project=project)
links = TreenodeConnector.objects.filter(
connector=to_id,
treenode=from_id,
relation=relation.id)
except ObjectDoesNotExist as e:
return HttpResponse(json.dumps({'error': e.message}))
if links.count() > 0:
return HttpResponse(json.dumps({'error': "A relation '%s' between these two elements already exists!" % link_type}))
related_skeleton_count = ClassInstance.objects.filter(project=project, id=from_treenode.skeleton.id).count()
if related_skeleton_count > 1:
# Can never happen. What motivated this check for an error of this kind? Would imply that a treenode belongs to more than one skeleton, which was possible when skeletons owned treendoes via element_of relations rather than by the skeleton_id column.
return HttpResponse(json.dumps({'error': 'Multiple rows for treenode with ID #%s found' % from_id}))
elif related_skeleton_count == 0:
return HttpResponse(json.dumps({'error': 'Failed to retrieve skeleton id of treenode #%s' % from_id}))
if link_type == 'presynaptic_to':
# Enforce only one presynaptic link
presyn_links = TreenodeConnector.objects.filter(project=project, connector=to_connector, relation=relation)
if (presyn_links.count() != 0):
return HttpResponse(json.dumps({'error': 'Connector %s does not have zero presynaptic connections.' % to_id}))
# The object returned in case of success
result = {}
if link_type == 'postsynaptic_to':
# Warn if there is already a link from the source skeleton to the
# target skeleton. This can happen and is not necessarely wrong, but
# worth to double check, because it is likely a mistake.
post_links_to_skeleton = TreenodeConnector.objects.filter(project=project,
connector=to_connector, relation=relation, skeleton_id=from_treenode.skeleton_id).count()
if post_links_to_skeleton == 1:
result['warning'] = 'There is already one post-synaptic ' \
'connection to the target skeleton'
elif post_links_to_skeleton > 1:
result['warning'] = 'There are already %s post-synaptic ' \
'connections to the target skeleton' % post_links_to_skeleton
# Enforce only synaptic links
gapjunction_links = TreenodeConnector.objects.filter(project=project, connector=to_connector,
relation__relation_name='gapjunction_with')
if (gapjunction_links.count() != 0):
return HttpResponse(json.dumps({'error': 'Connector %s cannot have both a gap junction and a postsynaptic node.' % to_id}))
if link_type == 'gapjunction_with':
# Enforce only two gap junction links
gapjunction_links = TreenodeConnector.objects.filter(project=project, connector=to_connector, relation=relation)
synapse_links = TreenodeConnector.objects.filter(project=project, connector=to_connector, relation__relation_name__endswith='synaptic_to')
if (gapjunction_links.count() > 1):
return HttpResponse(json.dumps({'error': 'Connector %s can only have two gap junction connections.' % to_id}))
if (synapse_links.count() != 0):
return HttpResponse(json.dumps({'error': 'Connector %s is part of a synapse, and gap junction can not be added.' % to_id}))
# Enforce same relations across all linked connectors; only new postsynaptic links are valid
if any([to_connector.children.exists(), to_connector.parent]) and link_type != 'postsynaptic_to':
return HttpResponse(json.dumps({'error': 'Cannot add %s connection to a linked connector.' % link_type}))
TreenodeConnector(
user=request.user,
project=project,
relation=relation,
treenode=from_treenode, # treenode_id = from_id
skeleton=from_treenode.skeleton, # treenode.skeleton_id where treenode.id = from_id
connector=to_connector # connector_id = to_id
).save()
result['message'] = 'success'
return HttpResponse(json.dumps(result), content_type='application/json')
@requires_user_role(UserRole.Annotate)
def delete_link(request, project_id=None):
connector_id = int(request.POST.get('connector_id', 0))
treenode_id = int(request.POST.get('treenode_id', 0))
links = TreenodeConnector.objects.filter(
connector=connector_id,
treenode=treenode_id)
if links.count() == 0:
return HttpResponse(json.dumps({'error': 'Failed to delete connector #%s from geometry domain.' % connector_id}))
# Enforce same relations across all linked connectors; only removal of postsynaptic links are valid
try:
to_connector = Connector.objects.get(id=connector_id, project=project_id)
link_type = links[0].relation.relation_name
except ObjectDoesNotExist as e:
return HttpResponse(json.dumps({'error': e.message}))
if any([to_connector.children.exists(), to_connector.parent]) and link_type != 'postsynaptic_to':
return HttpResponse(json.dumps({'error': 'Cannot remove %s connection to a linked connector.' % link_type}))
# Could be done by filtering above when obtaining the links,
# but then one cannot distinguish between the link not existing
# and the user_id not matching or not being superuser.
can_edit_or_fail(request.user, links[0].id, 'treenode_connector')
links[0].delete()
return HttpResponse(json.dumps({'result': 'Removed treenode to connector link'}))
| gpl-3.0 |
nhenezi/kuma | vendor/packages/sqlalchemy/examples/large_collection/large_collection.py | 7 | 3294 |
from sqlalchemy import (MetaData, Table, Column, Integer, String, ForeignKey,
create_engine)
from sqlalchemy.orm import (mapper, relationship, sessionmaker)
meta = MetaData()
org_table = Table('organizations', meta,
Column('org_id', Integer, primary_key=True),
Column('org_name', String(50), nullable=False, key='name'),
mysql_engine='InnoDB')
member_table = Table('members', meta,
Column('member_id', Integer, primary_key=True),
Column('member_name', String(50), nullable=False, key='name'),
Column('org_id', Integer, ForeignKey('organizations.org_id', ondelete="CASCADE")),
mysql_engine='InnoDB')
class Organization(object):
def __init__(self, name):
self.name = name
class Member(object):
def __init__(self, name):
self.name = name
mapper(Organization, org_table, properties = {
'members' : relationship(Member,
# Organization.members will be a Query object - no loading
# of the entire collection occurs unless requested
lazy="dynamic",
# Member objects "belong" to their parent, are deleted when
# removed from the collection
cascade="all, delete-orphan",
# "delete, delete-orphan" cascade does not load in objects on delete,
# allows ON DELETE CASCADE to handle it.
# this only works with a database that supports ON DELETE CASCADE -
# *not* sqlite or MySQL with MyISAM
passive_deletes=True,
)
})
mapper(Member, member_table)
if __name__ == '__main__':
engine = create_engine("mysql://scott:tiger@localhost/test", echo=True)
meta.create_all(engine)
# expire_on_commit=False means the session contents
# will not get invalidated after commit.
sess = sessionmaker(engine, expire_on_commit=False)()
# create org with some members
org = Organization('org one')
org.members.append(Member('member one'))
org.members.append(Member('member two'))
org.members.append(Member('member three'))
sess.add(org)
print "-------------------------\nflush one - save org + 3 members\n"
sess.commit()
# the 'members' collection is a Query. it issues
# SQL as needed to load subsets of the collection.
print "-------------------------\nload subset of members\n"
members = org.members.filter(member_table.c.name.like('%member t%')).all()
print members
# new Members can be appended without any
# SQL being emitted to load the full collection
org.members.append(Member('member four'))
org.members.append(Member('member five'))
org.members.append(Member('member six'))
print "-------------------------\nflush two - save 3 more members\n"
sess.commit()
# delete the object. Using ON DELETE CASCADE
# SQL is only emitted for the head row - the Member rows
# disappear automatically without the need for additional SQL.
sess.delete(org)
print "-------------------------\nflush three - delete org, delete members in one statement\n"
sess.commit()
print "-------------------------\nno Member rows should remain:\n"
print sess.query(Member).count()
print "------------------------\ndone. dropping tables."
meta.drop_all(engine) | mpl-2.0 |
jlachowski/django-transmeta | transmeta/management/commands/sync_transmeta_db.py | 3 | 12022 | """
Detect new translatable fields in all models and sync database structure.
You will need to execute this command in two cases:
1. When you add new languages to settings.LANGUAGES.
2. When you new translatable fields to your models.
"""
import re
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from django.db import connection, transaction
from django.db import backend
from django.db.models import get_models
from django.db.models.fields import FieldDoesNotExist
from transmeta import (mandatory_language, get_real_fieldname,
get_languages, get_all_translatable_fields)
VALUE_DEFAULT = 'WITHOUT VALUE'
def ask_for_confirmation(sql_sentences, model_full_name, assume_yes):
print ('\nSQL to synchronize "%s" schema:' % model_full_name)
for sentence in sql_sentences:
print (' %s' % sentence)
if assume_yes:
print ('\nAre you sure that you want to execute the previous SQL: (y/n) [n]: YES')
return True
while True:
prompt = '\nAre you sure that you want to execute the previous SQL: (y/n) [n]: '
answer = raw_input(prompt).strip()
if answer == '':
return False
elif answer not in ('y', 'n', 'yes', 'no'):
print ('Please answer yes or no')
elif answer == 'y' or answer == 'yes':
return True
else:
return False
def print_db_change_langs(db_change_langs, field_name, model_name):
print ('\nThis languages can change in "%s" field from "%s" model: %s' % \
(field_name, model_name, ", ".join(db_change_langs)))
class Command(BaseCommand):
help = "Detect new translatable fields or new available languages and sync database structure"
option_list = BaseCommand.option_list + (
make_option('-y', '--yes', action='store_true', dest='assume_yes',
help="Assume YES on all queries"),
make_option('-d', '--default', dest='default_language',
help="Language code of your default language"),
)
def handle(self, *args, **options):
""" command execution """
assume_yes = options.get('assume_yes', False)
default_language = options.get('default_language', None)
# set manual transaction management
transaction.commit_unless_managed()
transaction.enter_transaction_management()
transaction.managed(True)
self.cursor = connection.cursor()
self.introspection = connection.introspection
self.default_lang = default_language or mandatory_language()
all_models = get_models()
found_db_change_fields = False
for model in all_models:
if hasattr(model._meta, 'translatable_fields'):
model_full_name = '%s.%s' % (model._meta.app_label, model._meta.module_name)
translatable_fields = get_all_translatable_fields(model, column_in_current_table=True)
db_table = model._meta.db_table
for field_name in translatable_fields:
db_table_fields = self.get_table_fields(db_table)
db_change_langs = list(set(list(self.get_db_change_languages(field_name, db_table_fields)) + [self.default_lang]))
if db_change_langs:
sql_sentences = self.get_sync_sql(field_name, db_change_langs, model, db_table_fields)
if sql_sentences:
found_db_change_fields = True
print_db_change_langs(db_change_langs, field_name, model_full_name)
execute_sql = ask_for_confirmation(sql_sentences, model_full_name, assume_yes)
if execute_sql:
print ('Executing SQL...')
for sentence in sql_sentences:
self.cursor.execute(sentence)
# commit
transaction.commit()
print ('Done')
else:
print ('SQL not executed')
if transaction.is_dirty():
transaction.commit()
transaction.leave_transaction_management()
if not found_db_change_fields:
print ('\nNo new translatable fields detected')
if default_language:
variable = 'TRANSMETA_DEFAULT_LANGUAGE'
has_transmeta_default_language = getattr(settings, variable, False)
if not has_transmeta_default_language:
variable = 'LANGUAGE_CODE'
if getattr(settings, variable) != default_language:
print (('\n\nYou should change in your settings '
'the %s variable to "%s"' % (variable, default_language)))
def get_table_fields(self, db_table):
""" get table fields from schema """
db_table_desc = self.introspection.get_table_description(self.cursor, db_table)
return [t[0] for t in db_table_desc]
def get_field_required_in_db(self, db_table, field_name, value_not_implemented=False):
table_fields = self.introspection.get_table_description(self.cursor, db_table)
for f in table_fields:
if f[0] == field_name:
is_null = f[-1]
if is_null is None: # Not Implemented
return value_not_implemented
return not is_null
return False
def get_db_change_languages(self, field_name, db_table_fields):
""" get only db changes fields """
for lang_code, lang_name in get_languages():
if get_real_fieldname(field_name, lang_code) not in db_table_fields:
yield lang_code
for db_table_field in db_table_fields:
pattern = re.compile('^%s_(?P<lang>\w{2})$' % field_name)
m = pattern.match(db_table_field)
if not m:
continue
lang = m.group('lang')
yield lang
def was_translatable_before(self, field_name, db_table_fields):
""" check if field_name was translatable before syncing schema """
if field_name in db_table_fields:
# this implies field was never translatable before, data is in this field
return False
else:
return True
def get_default_field(self, field_name, model):
for lang_code, lang_name in get_languages():
field_name_i18n = get_real_fieldname(field_name, lang_code)
f = model._meta.get_field(field_name_i18n)
if not f.null:
return f
try:
return model._meta.get_field(field_name)
except FieldDoesNotExist:
return None
def get_value_default(self):
return getattr(settings, 'TRANSMETA_VALUE_DEFAULT', VALUE_DEFAULT)
def get_type_of_db_field(self, field_name, model):
field = self.get_default_field(field_name, model)
if not field:
field = model._meta.get_field(get_real_fieldname(field_name))
try:
col_type = field.db_type(connection)
except TypeError: # old django
col_type = field.db_type()
return col_type
def get_sync_sql(self, field_name, db_change_langs, model, db_table_fields):
""" returns SQL needed for sync schema for a new translatable field """
qn = connection.ops.quote_name
style = no_style()
sql_output = []
db_table = model._meta.db_table
was_translatable_before = self.was_translatable_before(field_name, db_table_fields)
default_f = self.get_default_field(field_name, model)
default_f_required = default_f and self.get_field_required_in_db(db_table,
default_f.name,
value_not_implemented=False)
for lang in db_change_langs:
new_field = get_real_fieldname(field_name, lang)
try:
f = model._meta.get_field(new_field)
col_type = self.get_type_of_db_field(field_name, model)
field_column = f.column
except FieldDoesNotExist: # columns in db, removed the settings.LANGUGES
field_column = new_field
col_type = self.get_type_of_db_field(field_name, model)
field_sql = [style.SQL_FIELD(qn(field_column)), style.SQL_COLTYPE(col_type)]
alter_colum_set = 'ALTER COLUMN %s SET' % qn(field_column)
if default_f:
alter_colum_drop = 'ALTER COLUMN %s DROP' % qn(field_column)
not_null = style.SQL_KEYWORD('NOT NULL')
if 'mysql' in backend.__name__:
alter_colum_set = 'MODIFY %s %s' % (qn(field_column), col_type)
not_null = style.SQL_KEYWORD('NULL')
if default_f:
alter_colum_drop = 'MODIFY %s %s' % (qn(field_column), col_type)
# column creation
if not new_field in db_table_fields:
sql_output.append("ALTER TABLE %s ADD COLUMN %s" % (qn(db_table), ' '.join(field_sql)))
if lang == self.default_lang and not was_translatable_before:
# data copy from old field (only for default language)
sql_output.append("UPDATE %s SET %s = %s" % (qn(db_table), \
qn(field_column), qn(field_name)))
if not f.null:
# changing to NOT NULL after having data copied
sql_output.append("ALTER TABLE %s %s %s" % \
(qn(db_table), alter_colum_set, \
style.SQL_KEYWORD('NOT NULL')))
elif default_f and not default_f.null:
if lang == self.default_lang:
f_required = self.get_field_required_in_db(db_table,
field_column,
value_not_implemented=False)
if default_f.name == new_field and default_f_required:
continue
if not f_required:
# data copy from old field (only for default language)
sql_output.append(("UPDATE %(db_table)s SET %(f_colum)s = '%(value_default)s' "
"WHERE %(f_colum)s is %(null)s or %(f_colum)s = '' " %
{'db_table': qn(db_table),
'f_colum': qn(field_column),
'value_default': self.get_value_default(),
'null': style.SQL_KEYWORD('NULL'),
}))
# changing to NOT NULL after having data copied
sql_output.append("ALTER TABLE %s %s %s" % \
(qn(db_table), alter_colum_set, \
style.SQL_KEYWORD('NOT NULL')))
else:
f_required = self.get_field_required_in_db(db_table,
field_column,
value_not_implemented=True)
if f_required:
sql_output.append(("ALTER TABLE %s %s %s" %
(qn(db_table), alter_colum_drop, not_null)))
if not was_translatable_before:
# we drop field only if field was no translatable before
sql_output.append("ALTER TABLE %s DROP COLUMN %s" % (qn(db_table), qn(field_name)))
return sql_output
| lgpl-3.0 |
manankalra/Twitter-Sentiment-Analysis | main/sentiment/tweepy_demo/tweep.py | 1 | 1099 | #!/usr/bin/env python
"""
tweepy(Twitter API) demo
"""
__author__ = "Manan Kalra"
__email__ = "[email protected]"
from tweepy import Stream, OAuthHandler
from tweepy.streaming import StreamListener
import time
# Add your own
consumer_key = ""
consumer_secret = ""
access_token = ""
access_token_secret = ""
class listener(StreamListener):
def on_data(self, raw_data):
try:
# print(raw_data)
tweet = raw_data.split(",\"text\":")[1].split(",\"source\"")[0]
print(tweet)
save_time = str(time.time()) + "::" + tweet
save_file = open('tweetDB.csv', 'a')
save_file.write(save_time)
save_file.write("\n")
save_file.close()
return True
except BaseException:
print("Failed")
def on_error(self, status_code):
print(status_code)
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
twitterStream = Stream(auth, listener())
twitterStream.filter(track=["<anything: noun/verb/adverb/...>"])
| mit |
bop/foundation | lib/python2.7/site-packages/django/contrib/auth/tokens.py | 96 | 2583 | from datetime import date
from django.conf import settings
from django.utils.http import int_to_base36, base36_to_int
from django.utils.crypto import constant_time_compare, salted_hmac
class PasswordResetTokenGenerator(object):
"""
Strategy object used to generate and check tokens for the password
reset mechanism.
"""
def make_token(self, user):
"""
Returns a token that can be used once to do a password reset
for the given user.
"""
return self._make_token_with_timestamp(user, self._num_days(self._today()))
def check_token(self, user, token):
"""
Check that a password reset token is correct for a given user.
"""
# Parse the token
try:
ts_b36, hash = token.split("-")
except ValueError:
return False
try:
ts = base36_to_int(ts_b36)
except ValueError:
return False
# Check that the timestamp/uid has not been tampered with
if not constant_time_compare(self._make_token_with_timestamp(user, ts), token):
return False
# Check the timestamp is within limit
if (self._num_days(self._today()) - ts) > settings.PASSWORD_RESET_TIMEOUT_DAYS:
return False
return True
def _make_token_with_timestamp(self, user, timestamp):
# timestamp is number of days since 2001-1-1. Converted to
# base 36, this gives us a 3 digit string until about 2121
ts_b36 = int_to_base36(timestamp)
# By hashing on the internal state of the user and using state
# that is sure to change (the password salt will change as soon as
# the password is set, at least for current Django auth, and
# last_login will also change), we produce a hash that will be
# invalid as soon as it is used.
# We limit the hash to 20 chars to keep URL short
key_salt = "django.contrib.auth.tokens.PasswordResetTokenGenerator"
# Ensure results are consistent across DB backends
login_timestamp = user.last_login.replace(microsecond=0, tzinfo=None)
value = (unicode(user.id) + user.password +
unicode(login_timestamp) + unicode(timestamp))
hash = salted_hmac(key_salt, value).hexdigest()[::2]
return "%s-%s" % (ts_b36, hash)
def _num_days(self, dt):
return (dt - date(2001, 1, 1)).days
def _today(self):
# Used for mocking in tests
return date.today()
default_token_generator = PasswordResetTokenGenerator()
| gpl-2.0 |
aaronplasek/CorEx | test_corex.py | 2 | 4334 | # Run tests with nosetests
import corex
import numpy as np
from functools import partial, update_wrapper
verbose = False
seed = 3
def generate_data(n_samples=100, group_sizes=[2], dim_hidden=2, missing=0):
Y_true = [np.random.randint(0, dim_hidden, n_samples) for _ in group_sizes]
X = np.hstack([np.repeat(Y_true[i][:,np.newaxis], size, axis=1) for i, size in enumerate(group_sizes)])
clusters = [i for i in range(len(group_sizes)) for _ in range(group_sizes[i])]
tcs = map(lambda z: (z-1)*np.log(dim_hidden), group_sizes)
X = np.where(np.random.random(X.shape) >= missing, X, -1)
return X, Y_true, clusters, tcs
def generate_noisy_data(n_samples=100, group_sizes=[2], erasure_p=0):
# Implement an erasure channel with erasure probability erasure_p
# The capacity of a single such channel is 1-erasure_p,
# So if we have group_size < 1/(1-p) , Shannon's bound forbids perfect recovery
# Or, 1 - 1/g < p
dim_hidden = 3
Y_true = [np.random.randint(0, 2, n_samples) for _ in group_sizes]
X = np.hstack([np.repeat(Y_true[i][:,np.newaxis], size, axis=1) for i, size in enumerate(group_sizes)])
X = np.where(np.random.random(X.shape) < erasure_p, 2, X) # Erasure channel
clusters = [i for i in range(len(group_sizes)) for _ in range(group_sizes[i])]
tcs = map(lambda z: (z-1)*np.log(2), group_sizes)
return X, Y_true, clusters, tcs
def check_correct(clusters, tcs, Y_true, X, corex):
assert np.array_equal(corex.transform(X), corex.labels) # Correctness of transform
assert np.array_equal(corex.clusters, clusters), str(zip(corex.clusters, clusters)) # Check connections
for j, tc in enumerate(tcs):
assert np.abs(corex.tcs[j]-tc)/tc < 0.1, "Values %f, %f" %(corex.tcs[j], tc) # TC relative error is small
assert len(set(map(tuple, zip(corex.labels.T[j], Y_true[j])))) == len(set(Y_true[j])), \
zip(corex.labels.T[j], Y_true[j]) # One-to-one correspondence of labels
def test_corex_all():
n_samples = 100
for group_sizes in [[2], [3, 2]]:
for dim_hidden in [2, 3]:
np.random.seed(seed)
X, Y_true, clusters, tcs = generate_data(n_samples=n_samples, group_sizes=group_sizes, dim_hidden=dim_hidden)
methods = [
corex.Corex(n_hidden=len(group_sizes), dim_hidden=dim_hidden, missing_values=-1, seed=seed, verbose=verbose).fit(X)
]
for i, method in enumerate(methods):
f = partial(check_correct, clusters, method.tcs, Y_true, X, method)
update_wrapper(f, check_correct)
f.description = 'method: ' + ['base', 'gaussian', 'discrete', 'discrete NT', 'gaussian NT', 'beta NT'][i] + \
', groups:' + str(group_sizes) + ', dim_hidden:' + str(dim_hidden) + ', seed: '+str(seed)
yield (f, )
def test_missing_values():
n_samples = 100
dim_hidden = 2
missing = 0.1
group_sizes = [10, 7] # Chance of entire row missing smaller than missing^n
np.random.seed(seed)
X, Y_true, clusters, tcs = generate_data(n_samples=n_samples, group_sizes=group_sizes,
dim_hidden=dim_hidden, missing=missing)
methods = [
corex.Corex(n_hidden=len(group_sizes), dim_hidden=dim_hidden, missing_values=-1, seed=seed, verbose=verbose).fit(X)
]
for i, method in enumerate(methods):
f = partial(check_correct, clusters, method.tcs, Y_true, X, method)
update_wrapper(f, check_correct)
f.description = 'missing values, '+ ['base', 'gaussian', 'discrete', 'discrete NT', 'gaussian NT'][i] + ', seed: '+str(seed)
yield (f, )
def test_near_shannon_limit():
X, Y_true, clusters, tcs = generate_noisy_data(n_samples=1000, group_sizes=[200], erasure_p=1.-3./200)
out = corex.Corex(n_hidden=1, seed=seed, verbose=verbose).fit(X)
assert max(np.mean(Y_true==out.labels.T), 1-np.mean(Y_true==out.labels.T)) > 0.95 # rate = 3*capacity, near perfect
X, Y_true, clusters, tcs = generate_noisy_data(n_samples=1000, group_sizes=[200], erasure_p=1.-1./200)
out = corex.Corex(n_hidden=1, seed=seed, verbose=verbose).fit(X)
assert max(np.mean(Y_true==out.labels.T), 1-np.mean(Y_true==out.labels.T)) < 0.9 # rate=capacity, not perfect | gpl-2.0 |
morningman/palo | gensrc/script/palo_builtins_functions.py | 2 | 31879 | # Modifications copyright (C) 2017, Baidu.com, Inc.
# Copyright 2017 The Apache Software Foundation
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This is a list of all the functions that are not auto-generated.
# It contains all the meta data that describes the function.
# The format is:
# [sql aliases], <return_type>, [<args>], <backend symbol>,
# With an optional
# <prepare symbol>, <close symbol>
#
# 'sql aliases' are the function names that can be used from sql. There must be at least
# one per function.
#
# The symbol can be empty for functions that are not yet implemented or are special-cased
# in Expr::CreateExpr() (i.e., functions that are implemented via a custom Expr class
# rather than a single function).
visible_functions = [
# Bit and Byte functions
# For functions corresponding to builtin operators, we can reuse the implementations
[['bitand'], 'TINYINT', ['TINYINT', 'TINYINT'],
'_ZN4palo9Operators32bitand_tiny_int_val_tiny_int_valEPN8palo_udf'
'15FunctionContextERKNS1_10TinyIntValES6_'],
[['bitand'], 'SMALLINT', ['SMALLINT', 'SMALLINT'],
'_ZN4palo9Operators34bitand_small_int_val_small_int_valEPN8palo_udf'
'15FunctionContextERKNS1_11SmallIntValES6_'],
[['bitand'], 'INT', ['INT', 'INT'],
'_ZN4palo9Operators22bitand_int_val_int_valEPN8palo_udf'
'15FunctionContextERKNS1_6IntValES6_'],
[['bitand'], 'BIGINT', ['BIGINT', 'BIGINT'],
'_ZN4palo9Operators30bitand_big_int_val_big_int_valEPN8palo_udf'
'15FunctionContextERKNS1_9BigIntValES6_'],
[['bitand'], 'LARGEINT', ['LARGEINT', 'LARGEINT'],
'_ZN4palo9Operators34bitand_large_int_val_large_int_valEPN8palo_udf'
'15FunctionContextERKNS1_11LargeIntValES6_'],
[['bitor'], 'TINYINT', ['TINYINT', 'TINYINT'],
'_ZN4palo9Operators31bitor_tiny_int_val_tiny_int_valEPN8palo_udf'
'15FunctionContextERKNS1_10TinyIntValES6_'],
[['bitor'], 'SMALLINT', ['SMALLINT', 'SMALLINT'],
'_ZN4palo9Operators33bitor_small_int_val_small_int_valEPN8palo_udf'
'15FunctionContextERKNS1_11SmallIntValES6_'],
[['bitor'], 'INT', ['INT', 'INT'],
'_ZN4palo9Operators21bitor_int_val_int_valEPN8palo_udf'
'15FunctionContextERKNS1_6IntValES6_'],
[['bitor'], 'BIGINT', ['BIGINT', 'BIGINT'],
'_ZN4palo9Operators29bitor_big_int_val_big_int_valEPN8palo_udf'
'15FunctionContextERKNS1_9BigIntValES6_'],
[['bitor'], 'LARGEINT', ['LARGEINT', 'LARGEINT'],
'_ZN4palo9Operators33bitor_large_int_val_large_int_valEPN8palo_udf'
'15FunctionContextERKNS1_11LargeIntValES6_'],
[['bitxor'], 'TINYINT', ['TINYINT', 'TINYINT'],
'_ZN4palo9Operators32bitxor_tiny_int_val_tiny_int_valEPN8palo_udf'
'15FunctionContextERKNS1_10TinyIntValES6_'],
[['bitxor'], 'SMALLINT', ['SMALLINT', 'SMALLINT'],
'_ZN4palo9Operators34bitxor_small_int_val_small_int_valEPN8palo_udf'
'15FunctionContextERKNS1_11SmallIntValES6_'],
[['bitxor'], 'INT', ['INT', 'INT'],
'_ZN4palo9Operators22bitxor_int_val_int_valEPN8palo_udf'
'15FunctionContextERKNS1_6IntValES6_'],
[['bitxor'], 'BIGINT', ['BIGINT', 'BIGINT'],
'_ZN4palo9Operators30bitxor_big_int_val_big_int_valEPN8palo_udf'
'15FunctionContextERKNS1_9BigIntValES6_'],
[['bitxor'], 'LARGEINT', ['LARGEINT', 'LARGEINT'],
'_ZN4palo9Operators34bitxor_large_int_val_large_int_valEPN8palo_udf'
'15FunctionContextERKNS1_11LargeIntValES6_'],
[['bitnot'], 'TINYINT', ['TINYINT'],
'_ZN4palo9Operators19bitnot_tiny_int_valEPN8palo_udf'
'15FunctionContextERKNS1_10TinyIntValE'],
[['bitnot'], 'SMALLINT', ['SMALLINT'],
'_ZN4palo9Operators20bitnot_small_int_valEPN8palo_udf'
'15FunctionContextERKNS1_11SmallIntValE'],
[['bitnot'], 'INT', ['INT'],
'_ZN4palo9Operators14bitnot_int_valEPN8palo_udf'
'15FunctionContextERKNS1_6IntValE'],
[['bitnot'], 'BIGINT', ['BIGINT'],
'_ZN4palo9Operators18bitnot_big_int_valEPN8palo_udf'
'15FunctionContextERKNS1_9BigIntValE'],
[['bitnot'], 'LARGEINT', ['LARGEINT'],
'_ZN4palo9Operators20bitnot_large_int_valEPN8palo_udf'
'15FunctionContextERKNS1_11LargeIntValE'],
# Timestamp functions
[['unix_timestamp'], 'INT', [],
'_ZN4palo18TimestampFunctions7to_unixEPN8palo_udf15FunctionContextE'],
[['unix_timestamp'], 'INT', ['DATETIME'],
'_ZN4palo18TimestampFunctions7to_unixEPN8palo_udf15FunctionContextERKNS1_11DateTimeValE'],
[['unix_timestamp'], 'INT', ['VARCHAR', 'VARCHAR'],
'_ZN4palo18TimestampFunctions7to_unixEPN8palo_udf15FunctionContextERKNS1_9StringValES6_'],
[['from_unixtime'], 'VARCHAR', ['INT'],
'_ZN4palo18TimestampFunctions9from_unixEPN8palo_udf15FunctionContextERKNS1_6IntValE'],
[['from_unixtime'], 'VARCHAR', ['INT', 'VARCHAR'],
'_ZN4palo18TimestampFunctions9from_unixEPN8palo_udf'
'15FunctionContextERKNS1_6IntValERKNS1_9StringValE'],
[['now', 'current_timestamp'], 'DATETIME', [],
'_ZN4palo18TimestampFunctions3nowEPN8palo_udf15FunctionContextE'],
[['curtime', 'current_time'], 'DATETIME', [],
'_ZN4palo18TimestampFunctions7curtimeEPN8palo_udf15FunctionContextE'],
[['timestamp'], 'DATETIME', ['DATETIME'],
'_ZN4palo18TimestampFunctions9timestampEPN8palo_udf15FunctionContextERKNS1_11DateTimeValE'],
[['from_days'], 'DATE', ['INT'],
'_ZN4palo18TimestampFunctions9from_daysEPN8palo_udf15FunctionContextERKNS1_6IntValE'],
[['to_days'], 'INT', ['DATE'],
'_ZN4palo18TimestampFunctions7to_daysEPN8palo_udf15FunctionContextERKNS1_11DateTimeValE'],
[['year'], 'INT', ['DATETIME'],
'_ZN4palo18TimestampFunctions4yearEPN8palo_udf15FunctionContextERKNS1_11DateTimeValE'],
[['month'], 'INT', ['DATETIME'],
'_ZN4palo18TimestampFunctions5monthEPN8palo_udf15FunctionContextERKNS1_11DateTimeValE'],
[['quarter'], 'INT', ['DATETIME'],
'_ZN4palo18TimestampFunctions7quarterEPN8palo_udf15FunctionContextERKNS1_11DateTimeValE'],
[['day', 'dayofmonth'], 'INT', ['DATETIME'],
'_ZN4palo18TimestampFunctions12day_of_monthEPN8palo_udf'
'15FunctionContextERKNS1_11DateTimeValE'],
[['dayofyear'], 'INT', ['DATETIME'],
'_ZN4palo18TimestampFunctions11day_of_yearEPN8palo_udf'
'15FunctionContextERKNS1_11DateTimeValE'],
[['weekofyear'], 'INT', ['DATETIME'],
'_ZN4palo18TimestampFunctions12week_of_yearEPN8palo_udf'
'15FunctionContextERKNS1_11DateTimeValE'],
[['hour'], 'INT', ['DATETIME'],
'_ZN4palo18TimestampFunctions4hourEPN8palo_udf15FunctionContextERKNS1_11DateTimeValE'],
[['minute'], 'INT', ['DATETIME'],
'_ZN4palo18TimestampFunctions6minuteEPN8palo_udf15FunctionContextERKNS1_11DateTimeValE'],
[['second'], 'INT', ['DATETIME'],
'_ZN4palo18TimestampFunctions6secondEPN8palo_udf15FunctionContextERKNS1_11DateTimeValE'],
[['years_add'], 'DATETIME', ['DATETIME', 'INT'],
'_ZN4palo18TimestampFunctions9years_addEPN8palo_udf'
'15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'],
[['years_sub'], 'DATETIME', ['DATETIME', 'INT'],
'_ZN4palo18TimestampFunctions9years_subEPN8palo_udf'
'15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'],
[['months_add', 'add_months'], 'DATETIME', ['DATETIME', 'INT'],
'_ZN4palo18TimestampFunctions10months_addEPN8palo_udf'
'15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'],
[['months_sub'], 'DATETIME', ['DATETIME', 'INT'],
'_ZN4palo18TimestampFunctions10months_subEPN8palo_udf'
'15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'],
[['weeks_add'], 'DATETIME', ['DATETIME', 'INT'],
'_ZN4palo18TimestampFunctions9weeks_addEPN8palo_udf'
'15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'],
[['weeks_sub'], 'DATETIME', ['DATETIME', 'INT'],
'_ZN4palo18TimestampFunctions9weeks_subEPN8palo_udf'
'15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'],
[['days_add', 'date_add', 'adddate'], 'DATETIME', ['DATETIME', 'INT'],
'_ZN4palo18TimestampFunctions8days_addEPN8palo_udf'
'15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'],
[['days_sub', 'date_sub', 'subdate'], 'DATETIME', ['DATETIME', 'INT'],
'_ZN4palo18TimestampFunctions8days_subEPN8palo_udf'
'15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'],
[['hours_add'], 'DATETIME', ['DATETIME', 'INT'],
'_ZN4palo18TimestampFunctions9hours_addEPN8palo_udf'
'15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'],
[['hours_sub'], 'DATETIME', ['DATETIME', 'INT'],
'_ZN4palo18TimestampFunctions9hours_subEPN8palo_udf'
'15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'],
[['minutes_add'], 'DATETIME', ['DATETIME', 'INT'],
'_ZN4palo18TimestampFunctions11minutes_addEPN8palo_udf'
'15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'],
[['minutes_sub'], 'DATETIME', ['DATETIME', 'INT'],
'_ZN4palo18TimestampFunctions11minutes_subEPN8palo_udf'
'15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'],
[['seconds_add'], 'DATETIME', ['DATETIME', 'INT'],
'_ZN4palo18TimestampFunctions11seconds_addEPN8palo_udf'
'15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'],
[['seconds_sub'], 'DATETIME', ['DATETIME', 'INT'],
'_ZN4palo18TimestampFunctions11seconds_subEPN8palo_udf'
'15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'],
[['microseconds_add'], 'DATETIME', ['DATETIME', 'INT'],
'_ZN4palo18TimestampFunctions10micros_addEPN8palo_udf'
'15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'],
[['microseconds_sub'], 'DATETIME', ['DATETIME', 'INT'],
'_ZN4palo18TimestampFunctions10micros_subEPN8palo_udf'
'15FunctionContextERKNS1_11DateTimeValERKNS1_6IntValE'],
[['datediff'], 'INT', ['DATETIME', 'DATETIME'],
'_ZN4palo18TimestampFunctions9date_diffEPN8palo_udf'
'15FunctionContextERKNS1_11DateTimeValES6_'],
[['timediff'], 'DATETIME', ['DATETIME', 'DATETIME'],
'_ZN4palo18TimestampFunctions9time_diffEPN8palo_udf'
'15FunctionContextERKNS1_11DateTimeValES6_'],
[['str_to_date'], 'DATETIME', ['VARCHAR', 'VARCHAR'],
'_ZN4palo18TimestampFunctions11str_to_dateEPN8palo_udf'
'15FunctionContextERKNS1_9StringValES6_'],
[['date_format'], 'VARCHAR', ['DATETIME', 'VARCHAR'],
'_ZN4palo18TimestampFunctions11date_formatEPN8palo_udf'
'15FunctionContextERKNS1_11DateTimeValERKNS1_9StringValE'],
[['date', 'to_date'], 'DATE', ['DATETIME'],
'_ZN4palo18TimestampFunctions7to_dateEPN8palo_udf15FunctionContextERKNS1_11DateTimeValE'],
[['dayname'], 'VARCHAR', ['DATETIME'],
'_ZN4palo18TimestampFunctions8day_nameEPN8palo_udf'
'15FunctionContextERKNS1_11DateTimeValE'],
[['monthname'], 'VARCHAR', ['DATETIME'],
'_ZN4palo18TimestampFunctions10month_nameEPN8palo_udf'
'15FunctionContextERKNS1_11DateTimeValE'],
# Math builtin functions
[['pi'], 'DOUBLE', [],
'_ZN4palo13MathFunctions2piEPN8palo_udf15FunctionContextE'],
[['e'], 'DOUBLE', [],
'_ZN4palo13MathFunctions1eEPN8palo_udf15FunctionContextE'],
[['abs'], 'DOUBLE', ['DOUBLE'],
'_ZN4palo13MathFunctions3absEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'],
[['sign'], 'FLOAT', ['DOUBLE'],
'_ZN4palo13MathFunctions4signEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'],
[['sin'], 'DOUBLE', ['DOUBLE'],
'_ZN4palo13MathFunctions3sinEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'],
[['asin'], 'DOUBLE', ['DOUBLE'],
'_ZN4palo13MathFunctions4asinEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'],
[['cos'], 'DOUBLE', ['DOUBLE'],
'_ZN4palo13MathFunctions3cosEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'],
[['acos'], 'DOUBLE', ['DOUBLE'],
'_ZN4palo13MathFunctions4acosEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'],
[['tan'], 'DOUBLE', ['DOUBLE'],
'_ZN4palo13MathFunctions3tanEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'],
[['atan'], 'DOUBLE', ['DOUBLE'],
'_ZN4palo13MathFunctions4atanEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'],
[['ceil', 'ceiling', 'dceil'], 'BIGINT', ['DOUBLE'],
'_ZN4palo13MathFunctions4ceilEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'],
[['floor', 'dfloor'], 'BIGINT', ['DOUBLE'],
'_ZN4palo13MathFunctions5floorEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'],
[['round', 'dround'], 'BIGINT', ['DOUBLE'],
'_ZN4palo13MathFunctions5roundEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'],
[['round', 'dround'], 'DOUBLE', ['DOUBLE', 'INT'],
'_ZN4palo13MathFunctions11round_up_toEPN8palo_udf'
'15FunctionContextERKNS1_9DoubleValERKNS1_6IntValE'],
[['truncate'], 'DOUBLE', ['DOUBLE', 'INT'],
'_ZN4palo13MathFunctions8truncateEPN8palo_udf'
'15FunctionContextERKNS1_9DoubleValERKNS1_6IntValE'],
[['ln', 'dlog1'], 'DOUBLE', ['DOUBLE'],
'_ZN4palo13MathFunctions2lnEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'],
[['log'], 'DOUBLE', ['DOUBLE', 'DOUBLE'],
'_ZN4palo13MathFunctions3logEPN8palo_udf15FunctionContextERKNS1_9DoubleValES6_'],
[['log2'], 'DOUBLE', ['DOUBLE'],
'_ZN4palo13MathFunctions4log2EPN8palo_udf15FunctionContextERKNS1_9DoubleValE'],
[['log10', 'dlog10'], 'DOUBLE', ['DOUBLE'],
'_ZN4palo13MathFunctions5log10EPN8palo_udf15FunctionContextERKNS1_9DoubleValE'],
[['exp', 'dexp'], 'DOUBLE', ['DOUBLE'],
'_ZN4palo13MathFunctions3expEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'],
[['radians'], 'DOUBLE', ['DOUBLE'],
'_ZN4palo13MathFunctions7radiansEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'],
[['degrees'], 'DOUBLE', ['DOUBLE'],
'_ZN4palo13MathFunctions7degreesEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'],
[['sqrt', 'dsqrt'], 'DOUBLE', ['DOUBLE'],
'_ZN4palo13MathFunctions4sqrtEPN8palo_udf15FunctionContextERKNS1_9DoubleValE'],
[['pow', 'power', 'dpow', 'fpow'], 'DOUBLE', ['DOUBLE', 'DOUBLE'],
'_ZN4palo13MathFunctions3powEPN8palo_udf15FunctionContextERKNS1_9DoubleValES6_'],
[['rand', 'random'], 'DOUBLE', [],
'_ZN4palo13MathFunctions4randEPN8palo_udf15FunctionContextE',
'_ZN4palo13MathFunctions12rand_prepareEPN8palo_udf'
'15FunctionContextENS2_18FunctionStateScopeE'],
[['rand', 'random'], 'DOUBLE', ['BIGINT'],
'_ZN4palo13MathFunctions9rand_seedEPN8palo_udf15FunctionContextERKNS1_9BigIntValE',
'_ZN4palo13MathFunctions12rand_prepareEPN8palo_udf'
'15FunctionContextENS2_18FunctionStateScopeE'],
[['bin'], 'VARCHAR', ['BIGINT'],
'_ZN4palo13MathFunctions3binEPN8palo_udf15FunctionContextERKNS1_9BigIntValE'],
[['hex'], 'VARCHAR', ['BIGINT'],
'_ZN4palo13MathFunctions7hex_intEPN8palo_udf15FunctionContextERKNS1_9BigIntValE'],
[['hex'], 'VARCHAR', ['VARCHAR'],
'_ZN4palo13MathFunctions10hex_stringEPN8palo_udf15FunctionContextERKNS1_9StringValE'],
[['unhex'], 'VARCHAR', ['VARCHAR'],
'_ZN4palo13MathFunctions5unhexEPN8palo_udf15FunctionContextERKNS1_9StringValE'],
[['conv'], 'VARCHAR', ['BIGINT', 'TINYINT', 'TINYINT'],
'_ZN4palo13MathFunctions8conv_intEPN8palo_udf'
'15FunctionContextERKNS1_9BigIntValERKNS1_10TinyIntValES9_'],
[['conv'], 'VARCHAR', ['VARCHAR', 'TINYINT', 'TINYINT'],
'_ZN4palo13MathFunctions11conv_stringEPN8palo_udf'
'15FunctionContextERKNS1_9StringValERKNS1_10TinyIntValES9_'],
[['pmod'], 'BIGINT', ['BIGINT', 'BIGINT'],
'_ZN4palo13MathFunctions11pmod_bigintEPN8palo_udf'
'15FunctionContextERKNS1_9BigIntValES6_'],
[['pmod'], 'DOUBLE', ['DOUBLE', 'DOUBLE'],
'_ZN4palo13MathFunctions11pmod_doubleEPN8palo_udf'
'15FunctionContextERKNS1_9DoubleValES6_'],
[['mod'], 'TINYINT', ['TINYINT', 'TINYINT'],
'_ZN4palo9Operators29mod_tiny_int_val_tiny_int_valEPN8palo_udf'
'15FunctionContextERKNS1_10TinyIntValES6_'],
[['mod'], 'SMALLINT', ['SMALLINT', 'SMALLINT'],
'_ZN4palo9Operators31mod_small_int_val_small_int_valEPN8palo_udf'
'15FunctionContextERKNS1_11SmallIntValES6_'],
[['mod'], 'INT', ['INT', 'INT'],
'_ZN4palo9Operators19mod_int_val_int_valEPN8palo_udf'
'15FunctionContextERKNS1_6IntValES6_'],
[['mod'], 'BIGINT', ['BIGINT', 'BIGINT'],
'_ZN4palo9Operators27mod_big_int_val_big_int_valEPN8palo_udf'
'15FunctionContextERKNS1_9BigIntValES6_'],
[['mod'], 'LARGEINT', ['LARGEINT', 'LARGEINT'],
'_ZN4palo9Operators31mod_large_int_val_large_int_valEPN8palo_udf'
'15FunctionContextERKNS1_11LargeIntValES6_'],
[['mod'], 'DECIMAL', ['DECIMAL', 'DECIMAL'],
'_ZN4palo16DecimalOperators27mod_decimal_val_decimal_valEPN8palo_udf'
'15FunctionContextERKNS1_10DecimalValES6_'],
[['mod', 'fmod'], 'FLOAT', ['FLOAT', 'FLOAT'],
'_ZN4palo13MathFunctions10fmod_floatEPN8palo_udf15FunctionContextERKNS1_8FloatValES6_'],
[['mod', 'fmod'], 'DOUBLE', ['DOUBLE', 'DOUBLE'],
'_ZN4palo13MathFunctions11fmod_doubleEPN8palo_udf15FunctionContextERKNS1_9DoubleValES6_'],
[['positive'], 'BIGINT', ['BIGINT'],
'_ZN4palo13MathFunctions15positive_bigintEPN8palo_udf'
'15FunctionContextERKNS1_9BigIntValE'],
[['positive'], 'DOUBLE', ['DOUBLE'],
'_ZN4palo13MathFunctions15positive_doubleEPN8palo_udf'
'15FunctionContextERKNS1_9DoubleValE'],
[['positive'], 'DECIMAL', ['DECIMAL'],
'_ZN4palo13MathFunctions16positive_decimalEPN8palo_udf'
'15FunctionContextERKNS1_10DecimalValE'],
[['negative'], 'BIGINT', ['BIGINT'],
'_ZN4palo13MathFunctions15negative_bigintEPN8palo_udf'
'15FunctionContextERKNS1_9BigIntValE'],
[['negative'], 'DOUBLE', ['DOUBLE'],
'_ZN4palo13MathFunctions15negative_doubleEPN8palo_udf'
'15FunctionContextERKNS1_9DoubleValE'],
[['negative'], 'DECIMAL', ['DECIMAL'],
'_ZN4palo13MathFunctions16negative_decimalEPN8palo_udf'
'15FunctionContextERKNS1_10DecimalValE'],
[['least'], 'TINYINT', ['TINYINT', '...'],
'_ZN4palo13MathFunctions5leastEPN8palo_udf15FunctionContextEiPKNS1_10TinyIntValE'],
[['least'], 'SMALLINT', ['SMALLINT', '...'],
'_ZN4palo13MathFunctions5leastEPN8palo_udf15FunctionContextEiPKNS1_11SmallIntValE'],
[['least'], 'INT', ['INT', '...'],
'_ZN4palo13MathFunctions5leastEPN8palo_udf15FunctionContextEiPKNS1_6IntValE'],
[['least'], 'BIGINT', ['BIGINT', '...'],
'_ZN4palo13MathFunctions5leastEPN8palo_udf15FunctionContextEiPKNS1_9BigIntValE'],
[['least'], 'LARGEINT', ['LARGEINT', '...'],
'_ZN4palo13MathFunctions5leastEPN8palo_udf15FunctionContextEiPKNS1_11LargeIntValE'],
[['least'], 'FLOAT', ['FLOAT', '...'],
'_ZN4palo13MathFunctions5leastEPN8palo_udf15FunctionContextEiPKNS1_8FloatValE'],
[['least'], 'DOUBLE', ['DOUBLE', '...'],
'_ZN4palo13MathFunctions5leastEPN8palo_udf15FunctionContextEiPKNS1_9DoubleValE'],
[['least'], 'VARCHAR', ['VARCHAR', '...'],
'_ZN4palo13MathFunctions5leastEPN8palo_udf15FunctionContextEiPKNS1_9StringValE'],
[['least'], 'DATETIME', ['DATETIME', '...'],
'_ZN4palo13MathFunctions5leastEPN8palo_udf15FunctionContextEiPKNS1_11DateTimeValE'],
[['least'], 'DECIMAL', ['DECIMAL', '...'],
'_ZN4palo13MathFunctions5leastEPN8palo_udf15FunctionContextEiPKNS1_10DecimalValE'],
[['greatest'], 'TINYINT', ['TINYINT', '...'],
'_ZN4palo13MathFunctions8greatestEPN8palo_udf15FunctionContextEiPKNS1_10TinyIntValE'],
[['greatest'], 'SMALLINT', ['SMALLINT', '...'],
'_ZN4palo13MathFunctions8greatestEPN8palo_udf15FunctionContextEiPKNS1_11SmallIntValE'],
[['greatest'], 'INT', ['INT', '...'],
'_ZN4palo13MathFunctions8greatestEPN8palo_udf15FunctionContextEiPKNS1_6IntValE'],
[['greatest'], 'BIGINT', ['BIGINT', '...'],
'_ZN4palo13MathFunctions8greatestEPN8palo_udf15FunctionContextEiPKNS1_9BigIntValE'],
[['greatest'], 'LARGEINT', ['LARGEINT', '...'],
'_ZN4palo13MathFunctions8greatestEPN8palo_udf15FunctionContextEiPKNS1_11LargeIntValE'],
[['greatest'], 'FLOAT', ['FLOAT', '...'],
'_ZN4palo13MathFunctions8greatestEPN8palo_udf15FunctionContextEiPKNS1_8FloatValE'],
[['greatest'], 'DOUBLE', ['DOUBLE', '...'],
'_ZN4palo13MathFunctions8greatestEPN8palo_udf15FunctionContextEiPKNS1_9DoubleValE'],
[['greatest'], 'VARCHAR', ['VARCHAR', '...'],
'_ZN4palo13MathFunctions8greatestEPN8palo_udf15FunctionContextEiPKNS1_9StringValE'],
[['greatest'], 'DATETIME', ['DATETIME', '...'],
'_ZN4palo13MathFunctions8greatestEPN8palo_udf15FunctionContextEiPKNS1_11DateTimeValE'],
[['greatest'], 'DECIMAL', ['DECIMAL', '...'],
'_ZN4palo13MathFunctions8greatestEPN8palo_udf15FunctionContextEiPKNS1_10DecimalValE'],
# Conditional Functions
# Some of these have empty symbols because the BE special-cases them based on the
# function name
[['if'], 'BOOLEAN', ['BOOLEAN', 'BOOLEAN', 'BOOLEAN'], ''],
[['if'], 'TINYINT', ['BOOLEAN', 'TINYINT', 'TINYINT'], ''],
[['if'], 'SMALLINT', ['BOOLEAN', 'SMALLINT', 'SMALLINT'], ''],
[['if'], 'INT', ['BOOLEAN', 'INT', 'INT'], ''],
[['if'], 'BIGINT', ['BOOLEAN', 'BIGINT', 'BIGINT'], ''],
[['if'], 'LARGEINT', ['BOOLEAN', 'LARGEINT', 'LARGEINT'], ''],
[['if'], 'FLOAT', ['BOOLEAN', 'FLOAT', 'FLOAT'], ''],
[['if'], 'DOUBLE', ['BOOLEAN', 'DOUBLE', 'DOUBLE'], ''],
[['if'], 'VARCHAR', ['BOOLEAN', 'VARCHAR', 'VARCHAR'], ''],
[['if'], 'DATETIME', ['BOOLEAN', 'DATETIME', 'DATETIME'], ''],
[['if'], 'DECIMAL', ['BOOLEAN', 'DECIMAL', 'DECIMAL'], ''],
[['nullif'], 'BOOLEAN', ['BOOLEAN', 'BOOLEAN'], ''],
[['nullif'], 'TINYINT', ['TINYINT', 'TINYINT'], ''],
[['nullif'], 'SMALLINT', ['SMALLINT', 'SMALLINT'], ''],
[['nullif'], 'INT', ['INT', 'INT'], ''],
[['nullif'], 'BIGINT', ['BIGINT', 'BIGINT'], ''],
[['nullif'], 'LARGEINT', ['LARGEINT', 'LARGEINT'], ''],
[['nullif'], 'FLOAT', ['FLOAT', 'FLOAT'], ''],
[['nullif'], 'DOUBLE', ['DOUBLE', 'DOUBLE'], ''],
[['nullif'], 'VARCHAR', ['VARCHAR', 'VARCHAR'], ''],
[['nullif'], 'DATETIME', ['DATETIME', 'DATETIME'], ''],
[['nullif'], 'DECIMAL', ['DECIMAL', 'DECIMAL'], ''],
[['ifnull'], 'BOOLEAN', ['BOOLEAN', 'BOOLEAN'], ''],
[['ifnull'], 'TINYINT', ['TINYINT', 'TINYINT'], ''],
[['ifnull'], 'SMALLINT', ['SMALLINT', 'SMALLINT'], ''],
[['ifnull'], 'INT', ['INT', 'INT'], ''],
[['ifnull'], 'BIGINT', ['BIGINT', 'BIGINT'], ''],
[['ifnull'], 'LARGEINT', ['LARGEINT', 'LARGEINT'], ''],
[['ifnull'], 'FLOAT', ['FLOAT', 'FLOAT'], ''],
[['ifnull'], 'DOUBLE', ['DOUBLE', 'DOUBLE'], ''],
[['ifnull'], 'VARCHAR', ['VARCHAR', 'VARCHAR'], ''],
[['ifnull'], 'DATETIME', ['DATETIME', 'DATETIME'], ''],
[['ifnull'], 'DECIMAL', ['DECIMAL', 'DECIMAL'], ''],
[['coalesce'], 'BOOLEAN', ['BOOLEAN', '...'], ''],
[['coalesce'], 'TINYINT', ['TINYINT', '...'], ''],
[['coalesce'], 'SMALLINT', ['SMALLINT', '...'], ''],
[['coalesce'], 'INT', ['INT', '...'], ''],
[['coalesce'], 'BIGINT', ['BIGINT', '...'], ''],
[['coalesce'], 'LARGEINT', ['LARGEINT', '...'], ''],
[['coalesce'], 'FLOAT', ['FLOAT', '...'], ''],
[['coalesce'], 'DOUBLE', ['DOUBLE', '...'], ''],
[['coalesce'], 'VARCHAR', ['VARCHAR', '...'], ''],
[['coalesce'], 'DATETIME', ['DATETIME', '...'], ''],
[['coalesce'], 'DECIMAL', ['DECIMAL', '...'], ''],
# String builtin functions
[['substr', 'substring'], 'VARCHAR', ['VARCHAR', 'INT'],
'_ZN4palo15StringFunctions9substringEPN'
'8palo_udf15FunctionContextERKNS1_9StringValERKNS1_6IntValE'],
[['substr', 'substring'], 'VARCHAR', ['VARCHAR', 'INT', 'INT'],
'_ZN4palo15StringFunctions9substringEPN'
'8palo_udf15FunctionContextERKNS1_9StringValERKNS1_6IntValES9_'],
[['strleft'], 'VARCHAR', ['VARCHAR', 'INT'],
'_ZN4palo15StringFunctions4leftEPN8palo_udf'
'15FunctionContextERKNS1_9StringValERKNS1_6IntValE'],
[['strright'], 'VARCHAR', ['VARCHAR', 'INT'],
'_ZN4palo15StringFunctions5rightEPN8palo_udf'
'15FunctionContextERKNS1_9StringValERKNS1_6IntValE'],
[['space'], 'VARCHAR', ['INT'],
'_ZN4palo15StringFunctions5spaceEPN8palo_udf15FunctionContextERKNS1_6IntValE'],
[['repeat'], 'VARCHAR', ['VARCHAR', 'INT'],
'_ZN4palo15StringFunctions6repeatEPN8palo_udf'
'15FunctionContextERKNS1_9StringValERKNS1_6IntValE'],
[['lpad'], 'VARCHAR', ['VARCHAR', 'INT', 'VARCHAR'],
'_ZN4palo15StringFunctions4lpadEPN8palo_udf'
'15FunctionContextERKNS1_9StringValERKNS1_6IntValES6_'],
[['rpad'], 'VARCHAR', ['VARCHAR', 'INT', 'VARCHAR'],
'_ZN4palo15StringFunctions4rpadEPN8palo_udf'
'15FunctionContextERKNS1_9StringValERKNS1_6IntValES6_'],
[['length'], 'INT', ['VARCHAR'],
'_ZN4palo15StringFunctions6lengthEPN8palo_udf15FunctionContextERKNS1_9StringValE'],
[['lower', 'lcase'], 'VARCHAR', ['VARCHAR'],
'_ZN4palo15StringFunctions5lowerEPN8palo_udf15FunctionContextERKNS1_9StringValE'],
[['upper', 'ucase'], 'VARCHAR', ['VARCHAR'],
'_ZN4palo15StringFunctions5upperEPN8palo_udf15FunctionContextERKNS1_9StringValE'],
[['reverse'], 'VARCHAR', ['VARCHAR'],
'_ZN4palo15StringFunctions7reverseEPN8palo_udf15FunctionContextERKNS1_9StringValE'],
[['trim'], 'VARCHAR', ['VARCHAR'],
'_ZN4palo15StringFunctions4trimEPN8palo_udf15FunctionContextERKNS1_9StringValE'],
[['ltrim'], 'VARCHAR', ['VARCHAR'],
'_ZN4palo15StringFunctions5ltrimEPN8palo_udf15FunctionContextERKNS1_9StringValE'],
[['rtrim'], 'VARCHAR', ['VARCHAR'],
'_ZN4palo15StringFunctions5rtrimEPN8palo_udf15FunctionContextERKNS1_9StringValE'],
[['ascii'], 'INT', ['VARCHAR'],
'_ZN4palo15StringFunctions5asciiEPN8palo_udf15FunctionContextERKNS1_9StringValE'],
[['instr'], 'INT', ['VARCHAR', 'VARCHAR'],
'_ZN4palo15StringFunctions5instrEPN8palo_udf15FunctionContextERKNS1_9StringValES6_'],
[['locate'], 'INT', ['VARCHAR', 'VARCHAR'],
'_ZN4palo15StringFunctions6locateEPN8palo_udf15FunctionContextERKNS1_9StringValES6_'],
[['locate'], 'INT', ['VARCHAR', 'VARCHAR', 'INT'],
'_ZN4palo15StringFunctions10locate_posEPN8palo_udf'
'15FunctionContextERKNS1_9StringValES6_RKNS1_6IntValE'],
[['regexp_extract'], 'VARCHAR', ['VARCHAR', 'VARCHAR', 'BIGINT'],
'_ZN4palo15StringFunctions14regexp_extractEPN8palo_udf'
'15FunctionContextERKNS1_9StringValES6_RKNS1_9BigIntValE',
'_ZN4palo15StringFunctions14regexp_prepareEPN8palo_udf'
'15FunctionContextENS2_18FunctionStateScopeE',
'_ZN4palo15StringFunctions12regexp_closeEPN8palo_udf'
'15FunctionContextENS2_18FunctionStateScopeE'],
[['regexp_replace'], 'VARCHAR', ['VARCHAR', 'VARCHAR', 'VARCHAR'],
'_ZN4palo15StringFunctions14regexp_replaceEPN8palo_udf'
'15FunctionContextERKNS1_9StringValES6_S6_',
'_ZN4palo15StringFunctions14regexp_prepareEPN8palo_udf'
'15FunctionContextENS2_18FunctionStateScopeE',
'_ZN4palo15StringFunctions12regexp_closeEPN8palo_udf'
'15FunctionContextENS2_18FunctionStateScopeE'],
[['concat'], 'VARCHAR', ['VARCHAR', '...'],
'_ZN4palo15StringFunctions6concatEPN8palo_udf15FunctionContextEiPKNS1_9StringValE'],
[['concat_ws'], 'VARCHAR', ['VARCHAR', 'VARCHAR', '...'],
'_ZN4palo15StringFunctions9concat_wsEPN8palo_udf'
'15FunctionContextERKNS1_9StringValEiPS5_'],
[['find_in_set'], 'INT', ['VARCHAR', 'VARCHAR'],
'_ZN4palo15StringFunctions11find_in_setEPN8palo_udf'
'15FunctionContextERKNS1_9StringValES6_'],
[['parse_url'], 'VARCHAR', ['VARCHAR', 'VARCHAR'],
'_ZN4palo15StringFunctions9parse_urlEPN8palo_udf'
'15FunctionContextERKNS1_9StringValES6_',
'_ZN4palo15StringFunctions17parse_url_prepareEPN8palo_udf'
'15FunctionContextENS2_18FunctionStateScopeE',
'_ZN4palo15StringFunctions15parse_url_closeEPN8palo_udf'
'15FunctionContextENS2_18FunctionStateScopeE'],
[['parse_url'], 'VARCHAR', ['VARCHAR', 'VARCHAR', 'VARCHAR'],
'_ZN4palo15StringFunctions13parse_url_keyEPN8palo_udf'
'15FunctionContextERKNS1_9StringValES6_S6_',
'_ZN4palo15StringFunctions17parse_url_prepareEPN8palo_udf'
'15FunctionContextENS2_18FunctionStateScopeE',
'_ZN4palo15StringFunctions15parse_url_closeEPN8palo_udf'
'15FunctionContextENS2_18FunctionStateScopeE'],
# Utility functions
[['sleep'], 'BOOLEAN', ['INT'],
'_ZN4palo16UtilityFunctions5sleepEPN8palo_udf15FunctionContextERKNS1_6IntValE'],
[['version'], 'VARCHAR', [],
'_ZN4palo16UtilityFunctions7versionEPN8palo_udf15FunctionContextE'],
# Json functions
[['get_json_int'], 'INT', ['VARCHAR', 'VARCHAR'],
'_ZN4palo13JsonFunctions12get_json_intEPN8palo_udf15FunctionContextERKNS1_9StringValES6_'],
[['get_json_double'], 'DOUBLE', ['VARCHAR', 'VARCHAR'],
'_ZN4palo13JsonFunctions15get_json_doubleEPN8palo_udf'
'15FunctionContextERKNS1_9StringValES6_'],
[['get_json_string'], 'VARCHAR', ['VARCHAR', 'VARCHAR'],
'_ZN4palo13JsonFunctions15get_json_stringEPN8palo_udf'
'15FunctionContextERKNS1_9StringValES6_'],
#hll function
[['hll_cardinality'], 'VARCHAR', ['VARCHAR'],
'_ZN4palo16HllHashFunctions15hll_cardinalityEPN8palo_udf'
'15FunctionContextERKNS1_9StringValE'],
[['hll_hash'], 'VARCHAR', ['VARCHAR'],
'_ZN4palo16HllHashFunctions8hll_hashEPN8palo_udf15FunctionContextERKNS1_9StringValE'],
# aes and base64 function
[['from_base64'], 'VARCHAR', ['VARCHAR'],
'_ZN4palo19EncryptionFunctions11from_base64EPN8palo_udf'
'15FunctionContextERKNS1_9StringValE'],
[['to_base64'], 'VARCHAR', ['VARCHAR'],
'_ZN4palo19EncryptionFunctions9to_base64EPN8palo_udf'
'15FunctionContextERKNS1_9StringValE'],
# for compatable with MySQL
[['md5'], 'VARCHAR', ['VARCHAR'],
'_ZN4palo19EncryptionFunctions3md5EPN8palo_udf15FunctionContextERKNS1_9StringValE'],
[['md5sum'], 'VARCHAR', ['VARCHAR', '...'],
'_ZN4palo19EncryptionFunctions6md5sumEPN8palo_udf15FunctionContextEiPKNS1_9StringValE']
]
invisible_functions = [
]
| apache-2.0 |
bluemini/kuma | kuma/wiki/tests/test_views.py | 2 | 168231 | # -*- coding: utf-8 -*-
import base64
import datetime
import json
import time
import mock
from nose.tools import eq_, ok_
from nose.plugins.attrib import attr
from pyquery import PyQuery as pq
from urlparse import urlparse
from django.conf import settings
from django.contrib.sites.models import Site
from django.core import mail
from django.db.models import Q
from django.test.client import (FakePayload, encode_multipart,
BOUNDARY, CONTENT_TYPE_RE, MULTIPART_CONTENT)
from django.test.utils import override_settings
from django.http import Http404
from django.utils.encoding import smart_str
from constance import config
from constance.test import override_config
from jingo.helpers import urlparams
from waffle.models import Flag, Switch
from kuma.attachments.models import Attachment
from kuma.attachments.utils import make_test_file
from kuma.authkeys.models import Key
from kuma.core.cache import memcache as cache
from kuma.core.models import IPBan
from kuma.core.urlresolvers import reverse
from kuma.users.tests import UserTestCase, user
from ..content import get_seo_description
from ..events import EditDocumentEvent
from ..forms import MIDAIR_COLLISION
from ..models import (Document, Revision, RevisionIP, DocumentZone,
DocumentTag, DocumentDeletionLog)
from ..views.document import _get_seo_parent_title
from . import (doc_rev, document, new_document_data, revision,
normalize_html, create_template_test_users,
make_translation, WikiTestCase, FakeResponse)
class RedirectTests(UserTestCase, WikiTestCase):
"""Tests for the REDIRECT wiki directive"""
localizing_client = True
def test_redirect_suppression(self):
"""The document view shouldn't redirect when passed redirect=no."""
redirect, _ = doc_rev('REDIRECT <a class="redirect" '
'href="/en-US/docs/blah">smoo</a>')
url = redirect.get_absolute_url() + '?redirect=no'
response = self.client.get(url, follow=True)
self.assertContains(response, 'REDIRECT ')
def test_redirects_only_internal(self):
"""Ensures redirects cannot be used to link to other sites"""
redirect, _ = doc_rev('REDIRECT <a class="redirect" '
'href="//davidwalsh.name">DWB</a>')
url = redirect.get_absolute_url()
response = self.client.get(url, follow=True)
self.assertContains(response, 'DWB')
def test_redirects_only_internal_2(self):
"""Ensures redirects cannot be used to link to other sites"""
redirect, _ = doc_rev('REDIRECT <a class="redirect" '
'href="http://davidwalsh.name">DWB</a>')
url = redirect.get_absolute_url()
response = self.client.get(url, follow=True)
self.assertContains(response, 'DWB')
def test_self_redirect_suppression(self):
"""The document view shouldn't redirect to itself."""
slug = 'redirdoc'
html = ('REDIRECT <a class="redirect" href="/en-US/docs/%s">smoo</a>' %
slug)
doc = document(title='blah', slug=slug, html=html, save=True,
locale=settings.WIKI_DEFAULT_LANGUAGE)
revision(document=doc, content=html, is_approved=True, save=True)
response = self.client.get(doc.get_absolute_url(), follow=True)
eq_(200, response.status_code)
response_html = pq(response.content)
article_body = response_html.find('#wikiArticle').html()
self.assertHTMLEqual(html, article_body)
class LocaleRedirectTests(UserTestCase, WikiTestCase):
"""Tests for fallbacks to en-US and such for slug lookups."""
# Some of these may fail or be invalid if your WIKI_DEFAULT_LANGUAGE is de.
localizing_client = True
def test_fallback_to_translation(self):
"""If a slug isn't found in the requested locale but is in the default
locale and if there is a translation of that default-locale document to
the requested locale, the translation should be served."""
en_doc, de_doc = self._create_en_and_de_docs()
response = self.client.get(reverse('wiki.document',
args=(en_doc.slug,),
locale='de'),
follow=True)
self.assertRedirects(response, de_doc.get_absolute_url())
def test_fallback_with_query_params(self):
"""The query parameters should be passed along to the redirect."""
en_doc, de_doc = self._create_en_and_de_docs()
url = reverse('wiki.document', args=[en_doc.slug], locale='de')
response = self.client.get(url + '?x=y&x=z', follow=True)
self.assertRedirects(response, de_doc.get_absolute_url() + '?x=y&x=z')
def test_redirect_with_no_slug(self):
"""Bug 775241: Fix exception in redirect for URL with ui-locale"""
loc = settings.WIKI_DEFAULT_LANGUAGE
url = '/%s/docs/%s/' % (loc, loc)
try:
self.client.get(url, follow=True)
except Http404, e:
pass
except Exception as e:
self.fail("The only exception should be a 404, not this: %s" % e)
def _create_en_and_de_docs(self):
en = settings.WIKI_DEFAULT_LANGUAGE
en_doc = document(locale=en, slug='english-slug', save=True)
de_doc = document(locale='de', parent=en_doc, save=True)
revision(document=de_doc, is_approved=True, save=True)
return en_doc, de_doc
class ViewTests(UserTestCase, WikiTestCase):
fixtures = UserTestCase.fixtures + ['wiki/documents.json']
localizing_client = True
@attr('bug875349')
def test_json_view(self):
expected_tags = sorted(['foo', 'bar', 'baz'])
expected_review_tags = sorted(['tech', 'editorial'])
doc = Document.objects.get(pk=1)
doc.tags.set(*expected_tags)
doc.current_revision.review_tags.set(*expected_review_tags)
url = reverse('wiki.json', locale=settings.WIKI_DEFAULT_LANGUAGE)
resp = self.client.get(url, {'title': 'an article title'})
eq_(200, resp.status_code)
data = json.loads(resp.content)
eq_('article-title', data['slug'])
result_tags = sorted([str(x) for x in data['tags']])
eq_(expected_tags, result_tags)
result_review_tags = sorted([str(x) for x in data['review_tags']])
eq_(expected_review_tags, result_review_tags)
url = reverse('wiki.json_slug', args=('article-title',),
locale=settings.WIKI_DEFAULT_LANGUAGE)
Switch.objects.create(name='application_ACAO', active=True)
resp = self.client.get(url)
ok_('Access-Control-Allow-Origin' in resp)
eq_('*', resp['Access-Control-Allow-Origin'])
eq_(200, resp.status_code)
data = json.loads(resp.content)
eq_('an article title', data['title'])
ok_('translations' in data)
result_tags = sorted([str(x) for x in data['tags']])
eq_(expected_tags, result_tags)
result_review_tags = sorted([str(x) for x in data['review_tags']])
eq_(expected_review_tags, result_review_tags)
def test_history_view(self):
slug = 'history-view-test-doc'
html = 'history view test doc'
doc = document(title='History view test doc', slug=slug,
html=html, save=True,
locale=settings.WIKI_DEFAULT_LANGUAGE)
for i in xrange(1, 51):
revision(document=doc, content=html,
comment='Revision %s' % i,
is_approved=True, save=True)
url = reverse('wiki.document_revisions', args=(slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
resp = self.client.get(url)
eq_(200, resp.status_code)
all_url = urlparams(reverse('wiki.document_revisions', args=(slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE),
limit='all')
resp = self.client.get(all_url)
eq_(403, resp.status_code)
self.client.login(username='testuser', password='testpass')
resp = self.client.get(all_url)
eq_(200, resp.status_code)
def test_toc_view(self):
slug = 'toc_test_doc'
html = '<h2>Head 2</h2><h3>Head 3</h3>'
doc = document(title='blah', slug=slug, html=html, save=True,
locale=settings.WIKI_DEFAULT_LANGUAGE)
revision(document=doc, content=html, is_approved=True, save=True)
url = reverse('wiki.toc', args=[slug],
locale=settings.WIKI_DEFAULT_LANGUAGE)
Switch.objects.create(name='application_ACAO', active=True)
resp = self.client.get(url)
ok_('Access-Control-Allow-Origin' in resp)
eq_('*', resp['Access-Control-Allow-Origin'])
self.assertHTMLEqual(
resp.content, '<ol><li><a href="#Head_2" rel="internal">Head 2</a>'
'<ol><li><a href="#Head_3" rel="internal">Head 3</a>'
'</ol></li></ol>')
@attr('bug875349')
def test_children_view(self):
test_content = '<p>Test <a href="http://example.com">Summary</a></p>'
def _make_doc(title, slug, parent=None, is_redir=False):
doc = document(title=title,
slug=slug,
save=True,
is_redirect=is_redir)
if is_redir:
content = 'REDIRECT <a class="redirect" href="/en-US/blah">Blah</a>'
else:
content = test_content
revision(document=doc,
content=test_content,
summary=get_seo_description(
test_content,
strip_markup=False),
save=True)
doc.html = content
if parent:
doc.parent_topic = parent
doc.save()
return doc
root_doc = _make_doc('Root', 'Root')
child_doc_1 = _make_doc('Child 1', 'Root/Child_1', root_doc)
_make_doc('Grandchild 1', 'Root/Child_1/Grandchild_1', child_doc_1)
grandchild_doc_2 = _make_doc('Grandchild 2',
'Root/Child_1/Grandchild_2',
child_doc_1)
_make_doc('Great Grandchild 1',
'Root/Child_1/Grandchild_2/Great_Grand_Child_1',
grandchild_doc_2)
_make_doc('Child 2', 'Root/Child_2', root_doc)
_make_doc('Child 3', 'Root/Child_3', root_doc, True)
Switch.objects.create(name='application_ACAO', active=True)
for expand in (True, False):
url = reverse('wiki.children', args=['Root'],
locale=settings.WIKI_DEFAULT_LANGUAGE)
if expand:
url = '%s?expand' % url
resp = self.client.get(url)
ok_('Access-Control-Allow-Origin' in resp)
eq_('*', resp['Access-Control-Allow-Origin'])
json_obj = json.loads(resp.content)
# Basic structure creation testing
eq_(json_obj['slug'], 'Root')
if not expand:
ok_('summary' not in json_obj)
else:
eq_(json_obj['summary'],
'Test <a href="http://example.com">Summary</a>')
ok_('tags' in json_obj)
ok_('review_tags' in json_obj)
eq_(len(json_obj['subpages']), 2)
eq_(len(json_obj['subpages'][0]['subpages']), 2)
eq_(json_obj['subpages'][0]['subpages'][1]['title'],
'Grandchild 2')
# Depth parameter testing
def _depth_test(depth, aught):
url = reverse('wiki.children', args=['Root'],
locale=settings.WIKI_DEFAULT_LANGUAGE) + '?depth=' + str(depth)
resp = self.client.get(url)
json_obj = json.loads(resp.content)
eq_(len(json_obj['subpages'][0]['subpages'][1]['subpages']), aught)
_depth_test(2, 0)
_depth_test(3, 1)
_depth_test(6, 1)
# Sorting test
sort_root_doc = _make_doc('Sort Root', 'Sort_Root')
_make_doc('B Child', 'Sort_Root/B_Child', sort_root_doc)
_make_doc('A Child', 'Sort_Root/A_Child', sort_root_doc)
resp = self.client.get(reverse('wiki.children', args=['Sort_Root'],
locale=settings.WIKI_DEFAULT_LANGUAGE))
json_obj = json.loads(resp.content)
eq_(json_obj['subpages'][0]['title'], 'A Child')
# Test if we are serving an error json if document does not exist
no_doc_url = reverse('wiki.children', args=['nonexistentDocument'],
locale=settings.WIKI_DEFAULT_LANGUAGE)
resp = self.client.get(no_doc_url)
result = json.loads(resp.content)
eq_(result, {'error': 'Document does not exist.'})
def test_summary_view(self):
"""The ?summary option should restrict document view to summary"""
d, r = doc_rev("""
<p>Foo bar <a href="http://example.com">baz</a></p>
<p>Quux xyzzy</p>
""")
resp = self.client.get('%s?raw&summary' % d.get_absolute_url())
eq_(resp.content, 'Foo bar <a href="http://example.com">baz</a>')
@override_settings(CELERY_ALWAYS_EAGER=True)
@mock.patch('waffle.flag_is_active')
@mock.patch('kuma.wiki.jobs.DocumentContributorsJob.get')
def test_footer_contributors(self, get_contributors, flag_is_active):
get_contributors.return_value = [
{'id': 1, 'username': 'ringo', 'email': '[email protected]'},
{'id': 2, 'username': 'john', 'email': '[email protected]'},
]
flag_is_active.return_value = True
d, r = doc_rev('some content')
resp = self.client.get(d.get_absolute_url())
page = pq(resp.content)
contributors = (page.find(":contains('Contributors to this page')")
.parent())
# just checking if the contributor link is rendered
eq_(len(contributors.find('a')), 2)
def test_revision_view_bleached_content(self):
"""Bug 821988: Revision content should be cleaned with bleach"""
d, r = doc_rev("""
<a href="#" onload=alert(3)>Hahaha</a>
<svg><svg onload=alert(3);>
""")
resp = self.client.get(r.get_absolute_url())
page = pq(resp.content)
ct = page.find('#wikiArticle').html()
ok_('<svg>' not in ct)
ok_('<a href="#">Hahaha</a>' in ct)
def test_raw_css_view(self):
"""The raw source for a document can be requested"""
self.client.login(username='admin', password='testpass')
doc = document(title='Template:CustomSampleCSS',
slug='Template:CustomSampleCSS',
save=True)
revision(
save=True,
is_approved=True,
document=doc,
content="""
/* CSS here */
body {
padding: 0;
margin: 0;
}
svg:not(:root) {
display:block;
}
""")
response = self.client.get('%s?raw=true' %
reverse('wiki.document', args=[doc.slug]))
ok_('text/css' in response['Content-Type'])
class PermissionTests(UserTestCase, WikiTestCase):
localizing_client = True
def setUp(self):
"""Set up the permissions, groups, and users needed for the tests"""
super(PermissionTests, self).setUp()
self.perms, self.groups, self.users, self.superuser = (
create_template_test_users())
def test_template_revert_permission(self):
locale = 'en-US'
slug = 'Template:test-revert-perm'
doc = document(save=True, slug=slug, title=slug, locale=locale)
rev = revision(save=True, document=doc)
# Revision template should not show revert button
url = reverse('wiki.revision', args=([doc.slug, rev.id]))
resp = self.client.get(url)
ok_('Revert' not in resp.content)
# Revert POST should give permission denied to user without perm
username = self.users['none'].username
self.client.login(username=username, password='testpass')
url = reverse('wiki.revert_document',
args=([doc.slug, rev.id]))
resp = self.client.post(url, {'comment': 'test'})
eq_(403, resp.status_code)
# Revert POST should give success to user with perm
username = self.users['change'].username
self.client.login(username=username, password='testpass')
url = reverse('wiki.revert_document',
args=([doc.slug, rev.id]))
resp = self.client.post(url, {'comment': 'test'}, follow=True)
eq_(200, resp.status_code)
def test_template_permissions(self):
msg = ('edit', 'create')
for is_add in (True, False):
slug_trials = (
('test_for_%s', (
(True, self.superuser),
(True, self.users['none']),
(True, self.users['all']),
(True, self.users['add']),
(True, self.users['change']),
)),
('Template:test_for_%s', (
(True, self.superuser),
(False, self.users['none']),
(True, self.users['all']),
(is_add, self.users['add']),
(not is_add, self.users['change']),
))
)
for slug_tmpl, trials in slug_trials:
for expected, tmp_user in trials:
username = tmp_user.username
slug = slug_tmpl % username
locale = settings.WIKI_DEFAULT_LANGUAGE
Document.objects.all().filter(slug=slug).delete()
if not is_add:
doc = document(save=True, slug=slug, title=slug,
locale=locale)
revision(save=True, document=doc)
self.client.login(username=username, password='testpass')
data = new_document_data()
slug = slug_tmpl % username
data.update({"title": slug, "slug": slug})
if is_add:
url = reverse('wiki.create', locale=locale)
resp = self.client.post(url, data, follow=False)
else:
data['form'] = 'rev'
url = reverse('wiki.edit', args=(slug,), locale=locale)
resp = self.client.post(url, data, follow=False)
if expected:
eq_(302, resp.status_code,
"%s should be able to %s %s" %
(user, msg[is_add], slug))
Document.objects.filter(slug=slug).delete()
else:
eq_(403, resp.status_code,
"%s should not be able to %s %s" %
(user, msg[is_add], slug))
class ConditionalGetTests(UserTestCase, WikiTestCase):
"""Tests for conditional GET on document view"""
localizing_client = True
def test_last_modified(self):
"""Ensure the last-modified stamp of a document is cached"""
doc, rev = doc_rev()
get_url = reverse('wiki.document',
args=[doc.slug],
locale=settings.WIKI_DEFAULT_LANGUAGE)
# There should be a last-modified date cached for this document already
cache_key = doc.last_modified_cache_key
ok_(cache.get(cache_key))
# Now, try a request, and ensure that the last-modified header is
# present.
response = self.client.get(get_url, follow=False)
ok_(response.has_header('last-modified'))
last_mod = response['last-modified']
# Try another request, using If-Modified-Since. This should be a 304
response = self.client.get(get_url, follow=False,
HTTP_IF_MODIFIED_SINCE=last_mod)
eq_(304, response.status_code)
# Finally, ensure that the last-modified was cached.
cached_last_mod = cache.get(cache_key)
eq_(doc.modified.strftime('%s'), cached_last_mod)
# Let the clock tick, so the last-modified will change on edit.
time.sleep(1.0)
# Edit the document, ensure the last-modified has been invalidated.
revision(document=doc, content="New edits", save=True)
ok_(cache.get(cache_key) != cached_last_mod)
# This should be another 304, but the last-modified in response and
# cache should have changed.
response = self.client.get(get_url, follow=False,
HTTP_IF_MODIFIED_SINCE=last_mod)
eq_(200, response.status_code)
ok_(last_mod != response['last-modified'])
ok_(cached_last_mod != cache.get(cache_key))
def test_deletion_clears_last_modified(self):
"""Deleting a page clears any last-modified caching"""
# Setup mostly the same as previous test, to get a doc and set
# last-modified info.
doc, rev = doc_rev()
self.url = reverse('wiki.document',
args=[doc.slug],
locale=settings.WIKI_DEFAULT_LANGUAGE)
cache_key = doc.last_modified_cache_key
last_mod = cache.get(cache_key)
ok_(last_mod) # exists already because pre-filled
self.client.get(self.url, follow=False)
ok_(cache.get(cache_key) == last_mod)
# Now delete the doc and make sure there's no longer
# last-modified data in the cache for it afterward.
doc.delete()
ok_(not cache.get(cache_key))
def test_deleted_doc_returns_404(self):
"""Requesting a deleted doc returns 404"""
doc, rev = doc_rev()
doc.delete()
DocumentDeletionLog.objects.create(locale=doc.locale, slug=doc.slug,
user=rev.creator, reason="test")
response = self.client.get(doc.get_absolute_url(), follow=False)
eq_(404, response.status_code)
class ReadOnlyTests(UserTestCase, WikiTestCase):
"""Tests readonly scenarios"""
fixtures = UserTestCase.fixtures + ['wiki/documents.json']
localizing_client = True
def setUp(self):
super(ReadOnlyTests, self).setUp()
self.d, r = doc_rev()
self.edit_url = reverse('wiki.edit', args=[self.d.slug])
def test_everyone(self):
""" kumaediting: everyone, kumabanned: none """
self.kumaediting_flag.everyone = True
self.kumaediting_flag.save()
self.client.login(username='testuser', password='testpass')
resp = self.client.get(self.edit_url)
eq_(200, resp.status_code)
def test_superusers_only(self):
""" kumaediting: superusers, kumabanned: none """
self.kumaediting_flag.everyone = None
self.kumaediting_flag.superusers = True
self.kumaediting_flag.save()
self.client.login(username='testuser', password='testpass')
resp = self.client.get(self.edit_url)
eq_(403, resp.status_code)
ok_('The wiki is in read-only mode.' in resp.content)
self.client.logout()
self.client.login(username='admin', password='testpass')
resp = self.client.get(self.edit_url)
eq_(200, resp.status_code)
def test_banned_users(self):
""" kumaediting: everyone, kumabanned: testuser2 """
self.kumaediting_flag.everyone = True
self.kumaediting_flag.save()
# ban testuser2
kumabanned = Flag.objects.create(name='kumabanned')
kumabanned.users = self.user_model.objects.filter(username='testuser2')
kumabanned.save()
# testuser can still access
self.client.login(username='testuser', password='testpass')
resp = self.client.get(self.edit_url)
eq_(200, resp.status_code)
self.client.logout()
# testuser2 cannot
self.client.login(username='testuser2', password='testpass')
resp = self.client.get(self.edit_url)
eq_(403, resp.status_code)
ok_('Your profile has been banned from making edits.' in resp.content)
# ban testuser01 and testuser2
kumabanned.users = self.user_model.objects.filter(
Q(username='testuser2') | Q(username='testuser01'))
kumabanned.save()
# testuser can still access
self.client.login(username='testuser', password='testpass')
resp = self.client.get(self.edit_url)
eq_(200, resp.status_code)
self.client.logout()
# testuser2 cannot access
self.client.login(username='testuser2', password='testpass')
resp = self.client.get(self.edit_url)
eq_(403, resp.status_code)
ok_('Your profile has been banned from making edits.' in resp.content)
# testuser01 cannot access
self.client.login(username='testuser01', password='testpass')
resp = self.client.get(self.edit_url)
eq_(403, resp.status_code)
ok_('Your profile has been banned from making edits.' in resp.content)
class BannedIPTests(UserTestCase, WikiTestCase):
"""Tests readonly scenarios"""
fixtures = UserTestCase.fixtures + ['wiki/documents.json']
localizing_client = True
def setUp(self):
super(BannedIPTests, self).setUp()
self.ip = '127.0.0.1'
self.ip_ban = IPBan.objects.create(ip=self.ip)
self.doc, rev = doc_rev()
self.edit_url = reverse('wiki.edit',
args=[self.doc.slug])
def tearDown(self):
cache.clear()
def test_banned_ip_cant_get_edit(self):
self.client.login(username='testuser', password='testpass')
response = self.client.get(self.edit_url, REMOTE_ADDR=self.ip)
eq_(403, response.status_code)
def test_banned_ip_cant_post_edit(self):
self.client.login(username='testuser', password='testpass')
response = self.client.get(self.edit_url, REMOTE_ADDR=self.ip)
eq_(403, response.status_code)
def test_banned_ip_can_still_get_articles(self):
response = self.client.get(self.doc.get_absolute_url(),
REMOTE_ADDR=self.ip)
eq_(200, response.status_code)
class KumascriptIntegrationTests(UserTestCase, WikiTestCase):
"""
Tests for usage of the kumascript service.
Note that these tests really just check whether or not the service was
used, and are not integration tests meant to exercise the real service.
"""
localizing_client = True
def setUp(self):
super(KumascriptIntegrationTests, self).setUp()
self.d, self.r = doc_rev()
self.r.content = "TEST CONTENT"
self.r.save()
self.d.tags.set('foo', 'bar', 'baz')
self.url = reverse('wiki.document',
args=(self.d.slug,),
locale=self.d.locale)
# TODO: upgrade mock to 0.8.0 so we can do this.
# self.mock_kumascript_get = (
# mock.patch('kuma.wiki.kumascript.get'))
# self.mock_kumascript_get.return_value = self.d.html
def tearDown(self):
super(KumascriptIntegrationTests, self).tearDown()
# TODO: upgrade mock to 0.8.0 so we can do this.
# self.mock_kumascript_get.stop()
@override_config(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_basic_view(self, mock_kumascript_get):
"""When kumascript timeout is non-zero, the service should be used"""
mock_kumascript_get.return_value = (self.d.html, None)
self.client.get(self.url, follow=False)
ok_(mock_kumascript_get.called,
"kumascript should have been used")
@override_config(KUMASCRIPT_TIMEOUT=0.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_disabled(self, mock_kumascript_get):
"""When disabled, the kumascript service should not be used"""
mock_kumascript_get.return_value = (self.d.html, None)
self.client.get(self.url, follow=False)
ok_(not mock_kumascript_get.called,
"kumascript not should have been used")
@override_config(KUMASCRIPT_TIMEOUT=0.0)
@mock.patch('kuma.wiki.kumascript.get')
@override_settings(CELERY_ALWAYS_EAGER=True)
def test_disabled_rendering(self, mock_kumascript_get):
"""When disabled, the kumascript service should not be used
in rendering"""
mock_kumascript_get.return_value = (self.d.html, None)
self.d.schedule_rendering('max-age=0')
ok_(not mock_kumascript_get.called,
"kumascript not should have been used")
@override_config(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_nomacros(self, mock_kumascript_get):
mock_kumascript_get.return_value = (self.d.html, None)
self.client.get('%s?nomacros' % self.url, follow=False)
ok_(not mock_kumascript_get.called,
"kumascript should not have been used")
@override_config(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_raw(self, mock_kumascript_get):
mock_kumascript_get.return_value = (self.d.html, None)
self.client.get('%s?raw' % self.url, follow=False)
ok_(not mock_kumascript_get.called,
"kumascript should not have been used")
@override_config(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_raw_macros(self, mock_kumascript_get):
mock_kumascript_get.return_value = (self.d.html, None)
self.client.get('%s?raw¯os' % self.url, follow=False)
ok_(mock_kumascript_get.called,
"kumascript should have been used")
@override_config(KUMASCRIPT_TIMEOUT=1.0,
KUMASCRIPT_MAX_AGE=1234)
@mock.patch('requests.get')
def test_ua_max_age_zero(self, mock_requests_get):
"""Authenticated users can request a zero max-age for kumascript"""
trap = {}
def my_requests_get(url, headers=None, timeout=None):
trap['headers'] = headers
return FakeResponse(status_code=200,
headers={}, text='HELLO WORLD')
mock_requests_get.side_effect = my_requests_get
self.client.get(self.url, follow=False,
HTTP_CACHE_CONTROL='no-cache')
eq_('max-age=1234', trap['headers']['Cache-Control'])
self.client.login(username='admin', password='testpass')
self.client.get(self.url, follow=False,
HTTP_CACHE_CONTROL='no-cache')
eq_('no-cache', trap['headers']['Cache-Control'])
@override_config(KUMASCRIPT_TIMEOUT=1.0,
KUMASCRIPT_MAX_AGE=1234)
@mock.patch('requests.get')
def test_ua_no_cache(self, mock_requests_get):
"""Authenticated users can request no-cache for kumascript"""
trap = {}
def my_requests_get(url, headers=None, timeout=None):
trap['headers'] = headers
return FakeResponse(status_code=200,
headers={}, text='HELLO WORLD')
mock_requests_get.side_effect = my_requests_get
self.client.get(self.url, follow=False,
HTTP_CACHE_CONTROL='no-cache')
eq_('max-age=1234', trap['headers']['Cache-Control'])
self.client.login(username='admin', password='testpass')
self.client.get(self.url, follow=False,
HTTP_CACHE_CONTROL='no-cache')
eq_('no-cache', trap['headers']['Cache-Control'])
@override_config(KUMASCRIPT_TIMEOUT=1.0,
KUMASCRIPT_MAX_AGE=1234)
@mock.patch('requests.get')
def test_conditional_get(self, mock_requests_get):
"""Ensure conditional GET in requests to kumascript work as expected"""
expected_etag = "8675309JENNY"
expected_modified = "Wed, 14 Mar 2012 22:29:17 GMT"
expected_content = "HELLO THERE, WORLD"
trap = dict(req_cnt=0)
def my_requests_get(url, headers=None, timeout=None):
trap['req_cnt'] += 1
trap['headers'] = headers
if trap['req_cnt'] in [1, 2]:
return FakeResponse(
status_code=200, text=expected_content,
headers={
"etag": expected_etag,
"last-modified": expected_modified,
"age": 456
})
else:
return FakeResponse(
status_code=304, text='',
headers={
"etag": expected_etag,
"last-modified": expected_modified,
"age": 123
})
mock_requests_get.side_effect = my_requests_get
# First request to let the view cache etag / last-modified
response = self.client.get(self.url)
# Clear rendered_html to force another request.
self.d.rendered_html = ''
self.d.save()
# Second request to verify the view sends them back
response = self.client.get(self.url)
eq_(expected_etag, trap['headers']['If-None-Match'])
eq_(expected_modified, trap['headers']['If-Modified-Since'])
# Third request to verify content was cached and served on a 304
response = self.client.get(self.url)
ok_(expected_content in response.content)
@override_config(KUMASCRIPT_TIMEOUT=1.0,
KUMASCRIPT_MAX_AGE=600)
@mock.patch('requests.get')
def test_error_reporting(self, mock_requests_get):
"""Kumascript reports errors in HTTP headers, Kuma should display"""
# Make sure we have enough log messages to ensure there are more than
# 10 lines of Base64 in headers. This ensures that there'll be a
# failure if the view sorts FireLogger sequence number alphabetically
# instead of numerically.
expected_errors = {
"logs": [
{"level": "debug",
"message": "Message #1",
"args": ['TestError', {}, {'name': 'SomeMacro', 'token': {'args': 'arguments here'}}],
"time": "12:32:03 GMT-0400 (EDT)",
"timestamp": "1331829123101000"},
{"level": "warning",
"message": "Message #2",
"args": ['TestError', {}, {'name': 'SomeMacro2'}],
"time": "12:33:58 GMT-0400 (EDT)",
"timestamp": "1331829238052000"},
{"level": "info",
"message": "Message #3",
"args": ['TestError'],
"time": "12:34:22 GMT-0400 (EDT)",
"timestamp": "1331829262403000"},
{"level": "debug",
"message": "Message #4",
"time": "12:32:03 GMT-0400 (EDT)",
"timestamp": "1331829123101000"},
{"level": "warning",
"message": "Message #5",
"time": "12:33:58 GMT-0400 (EDT)",
"timestamp": "1331829238052000"},
{"level": "info",
"message": "Message #6",
"time": "12:34:22 GMT-0400 (EDT)",
"timestamp": "1331829262403000"},
]
}
# Pack it up, get ready to ship it out.
d_json = json.dumps(expected_errors)
d_b64 = base64.encodestring(d_json)
d_lines = [x for x in d_b64.split("\n") if x]
# Headers are case-insensitive, so let's just drive that point home
p = ['firelogger', 'FIRELOGGER', 'FireLogger']
fl_uid = 8675309
headers_out = {}
for i in range(0, len(d_lines)):
headers_out['%s-%s-%s' % (p[i % len(p)], fl_uid, i)] = d_lines[i]
# Now, trap the request from the view.
trap = {}
def my_requests_get(url, headers=None, timeout=None):
trap['headers'] = headers
return FakeResponse(
status_code=200,
text='HELLO WORLD',
headers=headers_out
)
mock_requests_get.side_effect = my_requests_get
# Finally, fire off the request to the view and ensure that the log
# messages were received and displayed on the page. But, only for a
# logged in user.
self.client.login(username='admin', password='testpass')
response = self.client.get(self.url)
eq_(trap['headers']['X-FireLogger'], '1.2')
for error in expected_errors['logs']:
ok_(error['message'] in response.content)
eq_(response.status_code, 200)
@override_config(KUMASCRIPT_TIMEOUT=1.0,
KUMASCRIPT_MAX_AGE=600)
@mock.patch('requests.post')
def test_preview_nonascii(self, mock_post):
"""POSTing non-ascii to kumascript should encode to utf8"""
content = u'Français'
trap = {}
def my_post(url, timeout=None, headers=None, data=None):
trap['data'] = data
return FakeResponse(status_code=200, headers={},
text=content.encode('utf8'))
mock_post.side_effect = my_post
self.client.login(username='admin', password='testpass')
self.client.post(reverse('wiki.preview'), {'content': content})
try:
trap['data'].decode('utf8')
except UnicodeDecodeError:
self.fail("Data wasn't posted as utf8")
@attr('bug1197971')
@override_config(KUMASCRIPT_TIMEOUT=1.0,
KUMASCRIPT_MAX_AGE=600)
@mock.patch('kuma.wiki.kumascript.post')
def test_dont_render_previews_for_deferred_docs(self, mock_post):
"""
When a user previews a document with deferred rendering,
we want to force the preview to skip the kumascript POST,
so that big previews can't use up too many kumascript connections.
"""
self.d.defer_rendering = True
self.d.save()
def should_not_post(*args, **kwargs):
self.fail("Preview doc with deferred rendering should not "
"post to KumaScript.")
mock_post.side_effect = should_not_post
self.client.login(username='admin', password='testpass')
self.client.post(reverse('wiki.preview'), {'doc_id': self.d.id})
class DocumentSEOTests(UserTestCase, WikiTestCase):
"""Tests for the document seo logic"""
localizing_client = True
@attr('bug1190212')
def test_get_seo_parent_doesnt_throw_404(self):
slug_dict = {'seo_root': 'Root/Does/Not/Exist'}
try:
_get_seo_parent_title(slug_dict, 'bn-BD')
except Http404:
self.fail('Missing parent should not cause 404 from '
'_get_seo_parent_title')
def test_seo_title(self):
self.client.login(username='admin', password='testpass')
# Utility to make a quick doc
def _make_doc(title, aught_titles, slug):
doc = document(save=True, slug=slug, title=title,
locale=settings.WIKI_DEFAULT_LANGUAGE)
revision(save=True, document=doc)
response = self.client.get(reverse('wiki.document', args=[slug],
locale=settings.WIKI_DEFAULT_LANGUAGE))
page = pq(response.content)
ok_(page.find('title').text() in aught_titles)
# Test nested document titles
_make_doc('One', ['One | MDN'], 'one')
_make_doc('Two', ['Two - One | MDN'], 'one/two')
_make_doc('Three', ['Three - One | MDN'], 'one/two/three')
_make_doc(u'Special Φ Char',
[u'Special \u03a6 Char - One | MDN',
u'Special \xce\xa6 Char - One | MDN'],
'one/two/special_char')
# Additional tests for /Web/* changes
_make_doc('Firefox OS', ['Firefox OS | MDN'], 'firefox_os')
_make_doc('Email App', ['Email App - Firefox OS | MDN'],
'firefox_os/email_app')
_make_doc('Web', ['Web | MDN'], 'Web')
_make_doc('HTML', ['HTML | MDN'], 'Web/html')
_make_doc('Fieldset', ['Fieldset - HTML | MDN'], 'Web/html/fieldset')
_make_doc('Legend', ['Legend - HTML | MDN'],
'Web/html/fieldset/legend')
def test_seo_script(self):
self.client.login(username='admin', password='testpass')
def make_page_and_compare_seo(slug, content, aught_preview):
# Create the doc
data = new_document_data()
data.update({'title': 'blah', 'slug': slug, 'content': content})
response = self.client.post(reverse('wiki.create',
locale=settings.WIKI_DEFAULT_LANGUAGE),
data)
eq_(302, response.status_code)
# Connect to newly created page
response = self.client.get(reverse('wiki.document', args=[slug],
locale=settings.WIKI_DEFAULT_LANGUAGE))
page = pq(response.content)
meta_content = page.find('meta[name=description]').attr('content')
eq_(str(meta_content).decode('utf-8'),
str(aught_preview).decode('utf-8'))
# Test pages - very basic
good = 'This is the content which should be chosen, man.'
make_page_and_compare_seo('one', '<p>' + good + '</p>', good)
# No content, no seo
make_page_and_compare_seo('two', 'blahblahblahblah<br />', None)
# No summary, no seo
make_page_and_compare_seo('three', '<div><p>You cant see me</p></div>',
None)
# Warning paragraph ignored
make_page_and_compare_seo('four',
'<div class="geckoVersion">'
'<p>No no no</p></div><p>yes yes yes</p>',
'yes yes yes')
# Warning paragraph ignored, first one chosen if multiple matches
make_page_and_compare_seo('five',
'<div class="geckoVersion"><p>No no no</p>'
'</div><p>yes yes yes</p>'
'<p>ignore ignore ignore</p>',
'yes yes yes')
# Don't take legacy crumbs
make_page_and_compare_seo('six', u'<p>« CSS</p><p>I am me!</p>',
'I am me!')
# Take the seoSummary class'd element
make_page_and_compare_seo('seven',
u'<p>I could be taken</p>'
'<p class="seoSummary">I should be though</p>',
'I should be though')
# Two summaries append
make_page_and_compare_seo('eight',
u'<p>I could be taken</p>'
'<p class="seoSummary">a</p>'
'<p class="seoSummary">b</p>',
'a b')
# No brackets
make_page_and_compare_seo('nine',
u'<p>I <em>am</em> awesome.'
' <a href="blah">A link</a> is also <cool></p>',
u'I am awesome. A link is also cool')
class DocumentEditingTests(UserTestCase, WikiTestCase):
"""Tests for the document-editing view"""
localizing_client = True
def test_noindex_post(self):
self.client.login(username='admin', password='testpass')
# Go to new document page to ensure no-index header works
response = self.client.get(reverse('wiki.create', args=[],
locale=settings.WIKI_DEFAULT_LANGUAGE))
eq_(response['X-Robots-Tag'], 'noindex')
@attr('bug821986')
def test_editor_safety_filter(self):
"""Safety filter should be applied before rendering editor"""
self.client.login(username='admin', password='testpass')
r = revision(save=True, content="""
<svg><circle onload=confirm(3)>
""")
args = [r.document.slug]
urls = (
reverse('wiki.edit', args=args),
'%s?tolocale=%s' % (reverse('wiki.translate', args=args), 'fr')
)
for url in urls:
page = pq(self.client.get(url).content)
editor_src = page.find('#id_content').text()
ok_('onload' not in editor_src)
def test_create_on_404(self):
self.client.login(username='admin', password='testpass')
# Create the parent page.
d, r = doc_rev()
# Establish attribs of child page.
locale = settings.WIKI_DEFAULT_LANGUAGE
local_slug = 'Some_New_Title'
slug = '%s/%s' % (d.slug, local_slug)
url = reverse('wiki.document', args=[slug], locale=locale)
# Ensure redirect to create new page on attempt to visit non-existent
# child page.
resp = self.client.get(url)
eq_(302, resp.status_code)
ok_('docs/new' in resp['Location'])
ok_('?slug=%s' % local_slug in resp['Location'])
# Ensure real 404 for visit to non-existent page with params common to
# kumascript and raw content API.
for p_name in ('raw', 'include', 'nocreate'):
sub_url = '%s?%s=1' % (url, p_name)
resp = self.client.get(sub_url)
eq_(404, resp.status_code)
# Ensure root level documents work, not just children
response = self.client.get(reverse('wiki.document',
args=['noExist'], locale=locale))
eq_(302, response.status_code)
response = self.client.get(reverse('wiki.document',
args=['Template:NoExist'],
locale=locale))
eq_(302, response.status_code)
def test_new_document_comment(self):
"""Creating a new document with a revision comment saves the comment"""
self.client.login(username='admin', password='testpass')
comment = 'I am the revision comment'
slug = 'Test-doc-comment'
loc = settings.WIKI_DEFAULT_LANGUAGE
# Create a new doc.
data = new_document_data()
data.update({'slug': slug, 'comment': comment})
self.client.post(reverse('wiki.create'), data)
doc = Document.objects.get(slug=slug, locale=loc)
eq_(comment, doc.current_revision.comment)
@attr('toc')
def test_toc_initial(self):
self.client.login(username='admin', password='testpass')
resp = self.client.get(reverse('wiki.create'))
eq_(200, resp.status_code)
page = pq(resp.content)
toc_select = page.find('#id_toc_depth')
toc_options = toc_select.find('option')
for option in toc_options:
opt_element = pq(option)
found_selected = False
if opt_element.attr('selected'):
found_selected = True
eq_(str(Revision.TOC_DEPTH_H4), opt_element.attr('value'))
if not found_selected:
raise AssertionError("No ToC depth initially selected.")
@attr('retitle')
def test_retitling_solo_doc(self):
""" Editing just title of non-parent doc:
* Changes title
* Doesn't cause errors
* Doesn't create redirect
"""
# Not testing slug changes separately; the model tests cover those plus
# slug+title changes. If title changes work in the view, the rest
# should also.
self.client.login(username='admin', password='testpass')
new_title = 'Some New Title'
d, r = doc_rev()
old_title = d.title
data = new_document_data()
data.update({'title': new_title,
'form': 'rev'})
data['slug'] = ''
url = reverse('wiki.edit', args=[d.slug])
self.client.post(url, data)
eq_(new_title,
Document.objects.get(slug=d.slug, locale=d.locale).title)
try:
Document.objects.get(title=old_title)
self.fail("Should not find doc by old title after retitling.")
except Document.DoesNotExist:
pass
@attr('retitle')
def test_retitling_parent_doc(self):
""" Editing just title of parent doc:
* Changes title
* Doesn't cause errors
* Doesn't create redirect
"""
# Not testing slug changes separately; the model tests cover those plus
# slug+title changes. If title changes work in the view, the rest
# should also.
self.client.login(username='admin', password='testpass')
# create parent doc & rev along with child doc & rev
d = document(title='parent', save=True)
revision(document=d, content='parent', save=True)
d2 = document(title='child', parent_topic=d, save=True)
revision(document=d2, content='child', save=True)
old_title = d.title
new_title = 'Some New Title'
data = new_document_data()
data.update({'title': new_title,
'form': 'rev'})
data['slug'] = ''
url = reverse('wiki.edit', args=[d.slug])
self.client.post(url, data)
eq_(new_title,
Document.objects.get(slug=d.slug, locale=d.locale).title)
try:
Document.objects.get(title=old_title)
self.fail("Should not find doc by old title after retitling.")
except Document.DoesNotExist:
pass
def test_slug_change_ignored_for_iframe(self):
"""When the title of an article is edited in an iframe, the change is
ignored."""
self.client.login(username='admin', password='testpass')
new_slug = 'some_new_slug'
d, r = doc_rev()
old_slug = d.slug
data = new_document_data()
data.update({'title': d.title,
'slug': new_slug,
'form': 'rev'})
self.client.post('%s?iframe=1' % reverse('wiki.edit',
args=[d.slug]), data)
eq_(old_slug, Document.objects.get(slug=d.slug,
locale=d.locale).slug)
assert "REDIRECT" not in Document.objects.get(slug=old_slug).html
@attr('clobber')
def test_slug_collision_errors(self):
"""When an attempt is made to retitle an article and another with that
title already exists, there should be form errors"""
self.client.login(username='admin', password='testpass')
exist_slug = "existing-doc"
# Create a new doc.
data = new_document_data()
data.update({"slug": exist_slug})
resp = self.client.post(reverse('wiki.create'), data)
eq_(302, resp.status_code)
# Create another new doc.
data = new_document_data()
data.update({"slug": 'some-new-title'})
resp = self.client.post(reverse('wiki.create'), data)
eq_(302, resp.status_code)
# Now, post an update with duplicate slug
data.update({
'form': 'rev',
'slug': exist_slug
})
resp = self.client.post(reverse('wiki.edit',
args=['some-new-title']), data)
eq_(200, resp.status_code)
p = pq(resp.content)
ok_(p.find('.errorlist').length > 0)
ok_(p.find('.errorlist a[href="#id_slug"]').length > 0)
@attr('clobber')
def test_redirect_can_be_clobbered(self):
"""When an attempt is made to retitle an article, and another article
with that title exists but is a redirect, there should be no errors and
the redirect should be replaced."""
self.client.login(username='admin', password='testpass')
exist_title = "Existing doc"
exist_slug = "existing-doc"
changed_title = 'Changed title'
changed_slug = 'changed-title'
# Create a new doc.
data = new_document_data()
data.update({"title": exist_title, "slug": exist_slug})
resp = self.client.post(reverse('wiki.create'), data)
eq_(302, resp.status_code)
# Change title and slug
data.update({'form': 'rev',
'title': changed_title,
'slug': changed_slug})
resp = self.client.post(reverse('wiki.edit',
args=[exist_slug]),
data)
eq_(302, resp.status_code)
# Change title and slug back to originals, clobbering the redirect
data.update({'form': 'rev',
'title': exist_title,
'slug': exist_slug})
resp = self.client.post(reverse('wiki.edit',
args=[changed_slug]),
data)
eq_(302, resp.status_code)
def test_invalid_slug(self):
"""Slugs cannot contain "$", but can contain "/"."""
self.client.login(username='admin', password='testpass')
data = new_document_data()
data['title'] = 'valid slug'
data['slug'] = 'valid'
response = self.client.post(reverse('wiki.create'), data)
self.assertRedirects(response,
reverse('wiki.document', args=[data['slug']],
locale=settings.WIKI_DEFAULT_LANGUAGE))
new_url = reverse('wiki.create')
invalid_slugs = [
'va/lid', # slashes
'inva$lid', # dollar signs
'inva?lid', # question marks
'inva%lid', # percentage sign
'"invalid\'', # quotes
'in valid', # whitespace
]
for invalid_slug in invalid_slugs:
data['title'] = 'invalid with %s' % invalid_slug
data['slug'] = invalid_slug
response = self.client.post(new_url, data)
self.assertContains(response, 'The slug provided is not valid.')
def test_invalid_reserved_term_slug(self):
"""Slugs should not collide with reserved URL patterns"""
self.client.login(username='admin', password='testpass')
data = new_document_data()
# TODO: This is info derived from urls.py, but unsure how to DRY it
reserved_slugs = (
'ckeditor_config.js',
'watch-ready-for-review',
'unwatch-ready-for-review',
'watch-approved',
'unwatch-approved',
'.json',
'new',
'all',
'preview-wiki-content',
'category/10',
'needs-review/technical',
'needs-review/',
'feeds/atom/all/',
'feeds/atom/needs-review/technical',
'feeds/atom/needs-review/',
'tag/tasty-pie'
)
for term in reserved_slugs:
data['title'] = 'invalid with %s' % term
data['slug'] = term
response = self.client.post(reverse('wiki.create'), data)
self.assertContains(response, 'The slug provided is not valid.')
def test_slug_revamp(self):
self.client.login(username='admin', password='testpass')
def _createAndRunTests(slug):
# Create some vars
locale = settings.WIKI_DEFAULT_LANGUAGE
foreign_locale = 'es'
new_doc_url = reverse('wiki.create')
invalid_slug = "some/thing"
invalid_slugs = [
"some/thing",
"some?thing",
"some thing",
"some%thing",
"$omething",
]
child_slug = 'kiddy'
grandchild_slug = 'grandkiddy'
# Create the document data
doc_data = new_document_data()
doc_data['title'] = slug + ' Doc'
doc_data['slug'] = slug
doc_data['content'] = 'This is the content'
doc_data['is_localizable'] = True
""" NEW DOCUMENT CREATION, CHILD CREATION """
# Create the document, validate it exists
response = self.client.post(new_doc_url, doc_data)
eq_(302, response.status_code) # 302 = good, forward to new page
ok_(slug in response['Location'])
self.assertRedirects(response, reverse('wiki.document',
locale=locale, args=[slug]))
doc_url = reverse('wiki.document', locale=locale, args=[slug])
eq_(self.client.get(doc_url).status_code, 200)
doc = Document.objects.get(locale=locale, slug=slug)
eq_(doc.slug, slug)
eq_(0, len(Document.objects.filter(title=doc_data['title'] + 'Redirect')))
# Create child document data
child_data = new_document_data()
child_data['title'] = slug + ' Child Doc'
child_data['slug'] = invalid_slug
child_data['content'] = 'This is the content'
child_data['is_localizable'] = True
# Attempt to create the child with invalid slug, validate it fails
def test_invalid_slug(inv_slug, url, data, doc):
data['slug'] = inv_slug
response = self.client.post(url, data)
page = pq(response.content)
eq_(200, response.status_code) # 200 = bad, invalid data
# Slug doesn't add parent
eq_(inv_slug, page.find('input[name=slug]')[0].value)
eq_(doc.get_absolute_url(),
page.find('.metadataDisplay').attr('href'))
self.assertContains(response,
'The slug provided is not valid.')
for invalid_slug in invalid_slugs:
test_invalid_slug(invalid_slug,
new_doc_url + '?parent=' + str(doc.id),
child_data, doc)
# Attempt to create the child with *valid* slug,
# should succeed and redirect
child_data['slug'] = child_slug
full_child_slug = slug + '/' + child_data['slug']
response = self.client.post(new_doc_url + '?parent=' + str(doc.id),
child_data)
eq_(302, response.status_code)
self.assertRedirects(response, reverse('wiki.document',
locale=locale,
args=[full_child_slug]))
child_doc = Document.objects.get(locale=locale,
slug=full_child_slug)
eq_(child_doc.slug, full_child_slug)
eq_(0, len(Document.objects.filter(
title=child_data['title'] + ' Redirect 1',
locale=locale)))
# Create grandchild data
grandchild_data = new_document_data()
grandchild_data['title'] = slug + ' Grandchild Doc'
grandchild_data['slug'] = invalid_slug
grandchild_data['content'] = 'This is the content'
grandchild_data['is_localizable'] = True
# Attempt to create the child with invalid slug, validate it fails
response = self.client.post(
new_doc_url + '?parent=' + str(child_doc.id), grandchild_data)
page = pq(response.content)
eq_(200, response.status_code) # 200 = bad, invalid data
# Slug doesn't add parent
eq_(invalid_slug, page.find('input[name=slug]')[0].value)
eq_(child_doc.get_absolute_url(),
page.find('.metadataDisplay').attr('href'))
self.assertContains(response, 'The slug provided is not valid.')
# Attempt to create the child with *valid* slug,
# should succeed and redirect
grandchild_data['slug'] = grandchild_slug
full_grandchild_slug = (full_child_slug + '/' +
grandchild_data['slug'])
response = self.client.post(
new_doc_url + '?parent=' + str(child_doc.id),
grandchild_data)
eq_(302, response.status_code)
self.assertRedirects(response,
reverse('wiki.document', locale=locale,
args=[full_grandchild_slug]))
grandchild_doc = Document.objects.get(locale=locale,
slug=full_grandchild_slug)
eq_(grandchild_doc.slug, full_grandchild_slug)
missing_title = grandchild_data['title'] + ' Redirect 1'
eq_(0, len(Document.objects.filter(title=missing_title,
locale=locale)))
def _run_edit_tests(edit_slug, edit_data, edit_doc,
edit_parent_path):
"""EDIT DOCUMENT TESTING"""
# Load "Edit" page for the root doc, ensure no "/" in the slug
# Also ensure the 'parent' link is not present
response = self.client.get(reverse('wiki.edit',
args=[edit_doc.slug], locale=locale))
eq_(200, response.status_code)
page = pq(response.content)
eq_(edit_data['slug'], page.find('input[name=slug]')[0].value)
eq_(edit_parent_path,
page.find('.metadataDisplay').attr('href'))
# Attempt an invalid edit of the root,
# ensure the slug stays the same (i.e. no parent prepending)
def test_invalid_slug_edit(inv_slug, url, data):
data['slug'] = inv_slug
data['form'] = 'rev'
response = self.client.post(url, data)
eq_(200, response.status_code) # 200 = bad, invalid data
page = pq(response.content)
# Slug doesn't add parent
eq_(inv_slug, page.find('input[name=slug]')[0].value)
eq_(edit_parent_path,
page.find('.metadataDisplay').attr('href'))
self.assertContains(response,
'The slug provided is not valid.')
# Ensure no redirect
redirect_title = data['title'] + ' Redirect 1'
eq_(0, len(Document.objects.filter(title=redirect_title,
locale=locale)))
# Push a valid edit, without changing the slug
edit_data['slug'] = edit_slug
edit_data['form'] = 'rev'
response = self.client.post(reverse('wiki.edit',
args=[edit_doc.slug],
locale=locale),
edit_data)
eq_(302, response.status_code)
# Ensure no redirect
redirect_title = edit_data['title'] + ' Redirect 1'
eq_(0, len(Document.objects.filter(title=redirect_title,
locale=locale)))
self.assertRedirects(response,
reverse('wiki.document',
locale=locale,
args=[edit_doc.slug]))
def _run_translate_tests(translate_slug, translate_data,
translate_doc):
"""TRANSLATION DOCUMENT TESTING"""
foreign_url = (reverse('wiki.translate',
args=[translate_doc.slug],
locale=locale) +
'?tolocale=' + foreign_locale)
foreign_doc_url = reverse('wiki.document',
args=[translate_doc.slug],
locale=foreign_locale)
# Verify translate page form is populated correctly
response = self.client.get(foreign_url)
eq_(200, response.status_code)
page = pq(response.content)
eq_(translate_data['slug'],
page.find('input[name=slug]')[0].value)
# Attempt an invalid edit of the root
# ensure the slug stays the same (i.e. no parent prepending)
def test_invalid_slug_translate(inv_slug, url, data):
data['slug'] = inv_slug
data['form'] = 'both'
response = self.client.post(url, data)
eq_(200, response.status_code) # 200 = bad, invalid data
page = pq(response.content)
# Slug doesn't add parent
eq_(inv_slug, page.find('input[name=slug]')[0].value)
self.assertContains(response,
'The slug provided is not valid.')
# Ensure no redirect
eq_(0, len(Document.objects.filter(title=data['title'] +
' Redirect 1',
locale=foreign_locale)))
# Push a valid translation
translate_data['slug'] = translate_slug
translate_data['form'] = 'both'
response = self.client.post(foreign_url, translate_data)
eq_(302, response.status_code)
# Ensure no redirect
redirect_title = translate_data['title'] + ' Redirect 1'
eq_(0, len(Document.objects.filter(title=redirect_title,
locale=foreign_locale)))
self.assertRedirects(response, foreign_doc_url)
return Document.objects.get(locale=foreign_locale,
slug=translate_doc.slug)
_run_translate_tests(slug, doc_data, doc)
_run_translate_tests(child_slug, child_data, child_doc)
_run_translate_tests(grandchild_slug, grandchild_data,
grandchild_doc)
def _run_translate_edit_tests(edit_slug, edit_data, edit_doc):
"""TEST BASIC EDIT OF TRANSLATION"""
# Hit the initial URL
response = self.client.get(reverse('wiki.edit',
args=[edit_doc.slug],
locale=foreign_locale))
eq_(200, response.status_code)
page = pq(response.content)
eq_(edit_data['slug'], page.find('input[name=slug]')[0].value)
# Attempt an invalid edit of the root, ensure the slug stays
# the same (i.e. no parent prepending)
edit_data['slug'] = invalid_slug
edit_data['form'] = 'both'
response = self.client.post(reverse('wiki.edit',
args=[edit_doc.slug],
locale=foreign_locale),
edit_data)
eq_(200, response.status_code) # 200 = bad, invalid data
page = pq(response.content)
# Slug doesn't add parent
eq_(invalid_slug, page.find('input[name=slug]')[0].value)
self.assertContains(response, page.find('ul.errorlist li'
' a[href="#id_slug"]').
text())
# Ensure no redirect
eq_(0, len(Document.objects.filter(title=edit_data['title'] +
' Redirect 1',
locale=foreign_locale)))
# Push a valid edit, without changing the slug
edit_data['slug'] = edit_slug
response = self.client.post(reverse('wiki.edit',
args=[edit_doc.slug],
locale=foreign_locale),
edit_data)
eq_(302, response.status_code)
# Ensure no redirect
eq_(0, len(Document.objects.filter(title=edit_data['title'] +
' Redirect 1',
locale=foreign_locale)))
self.assertRedirects(response, reverse('wiki.document',
locale=foreign_locale,
args=[edit_doc.slug]))
""" TEST EDITING SLUGS AND TRANSLATIONS """
def _run_slug_edit_tests(edit_slug, edit_data, edit_doc, loc):
edit_data['slug'] = edit_data['slug'] + '_Updated'
edit_data['form'] = 'rev'
response = self.client.post(reverse('wiki.edit',
args=[edit_doc.slug],
locale=loc),
edit_data)
eq_(302, response.status_code)
# HACK: the es doc gets a 'Redirigen 1' if locale/ is updated
# Ensure *1* redirect
eq_(1,
len(Document.objects.filter(
title__contains=edit_data['title'] + ' Redir',
locale=loc)))
self.assertRedirects(response,
reverse('wiki.document',
locale=loc,
args=[edit_doc.slug.replace(
edit_slug,
edit_data['slug'])]))
# Run all of the tests
_createAndRunTests("parent")
# Test that slugs with the same "specific" slug but in different levels
# in the heiharachy are validate properly upon submission
# Create base doc
parent_doc = document(title='Length',
slug='length',
is_localizable=True,
locale=settings.WIKI_DEFAULT_LANGUAGE)
parent_doc.save()
r = revision(document=parent_doc)
r.save()
# Create child, try to use same slug, should work
child_data = new_document_data()
child_data['title'] = 'Child Length'
child_data['slug'] = 'length'
child_data['content'] = 'This is the content'
child_data['is_localizable'] = True
child_url = (reverse('wiki.create') +
'?parent=' +
str(parent_doc.id))
response = self.client.post(child_url, child_data)
eq_(302, response.status_code)
self.assertRedirects(response,
reverse('wiki.document',
args=['length/length'],
locale=settings.WIKI_DEFAULT_LANGUAGE))
# Editing "length/length" document doesn't cause errors
child_data['form'] = 'rev'
child_data['slug'] = ''
edit_url = reverse('wiki.edit', args=['length/length'],
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.post(edit_url, child_data)
eq_(302, response.status_code)
self.assertRedirects(response, reverse('wiki.document',
args=['length/length'],
locale=settings.WIKI_DEFAULT_LANGUAGE))
# Creating a new translation of "length" and "length/length"
# doesn't cause errors
child_data['form'] = 'both'
child_data['slug'] = 'length'
translate_url = reverse('wiki.document', args=[child_data['slug']],
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.post(translate_url + '$translate?tolocale=es',
child_data)
eq_(302, response.status_code)
self.assertRedirects(response, reverse('wiki.document',
args=[child_data['slug']],
locale='es'))
translate_url = reverse('wiki.document', args=['length/length'],
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.post(translate_url + '$translate?tolocale=es',
child_data)
eq_(302, response.status_code)
slug = 'length/' + child_data['slug']
self.assertRedirects(response, reverse('wiki.document',
args=[slug],
locale='es'))
def test_translate_keeps_topical_parent(self):
self.client.login(username='admin', password='testpass')
en_doc, de_doc = make_translation()
en_child_doc = document(parent_topic=en_doc, slug='en-child',
save=True)
en_child_rev = revision(document=en_child_doc, save=True)
de_child_doc = document(parent_topic=de_doc, locale='de',
slug='de-child', parent=en_child_doc,
save=True)
revision(document=de_child_doc, save=True)
post_data = {}
post_data['slug'] = de_child_doc.slug
post_data['title'] = 'New title'
post_data['form'] = 'both'
post_data['content'] = 'New translation'
post_data['tolocale'] = 'de'
post_data['toc_depth'] = 0
post_data['based_on'] = en_child_rev.id
post_data['parent_id'] = en_child_doc.id
translate_url = reverse('wiki.edit',
args=[de_child_doc.slug],
locale='de')
self.client.post(translate_url, post_data)
de_child_doc = Document.objects.get(locale='de', slug='de-child')
eq_(en_child_doc, de_child_doc.parent)
eq_(de_doc, de_child_doc.parent_topic)
eq_('New translation', de_child_doc.current_revision.content)
def test_translate_keeps_toc_depth(self):
self.client.login(username='admin', password='testpass')
locale = settings.WIKI_DEFAULT_LANGUAGE
original_slug = 'eng-doc'
foreign_locale = 'es'
foreign_slug = 'es-doc'
en_doc = document(title='Eng Doc', slug=original_slug,
is_localizable=True, locale=locale)
en_doc.save()
r = revision(document=en_doc, toc_depth=1)
r.save()
post_data = new_document_data()
post_data['title'] = 'ES Doc'
post_data['slug'] = foreign_slug
post_data['content'] = 'This is the content'
post_data['is_localizable'] = True
post_data['form'] = 'both'
post_data['toc_depth'] = r.toc_depth
translate_url = reverse('wiki.document', args=[original_slug],
locale=settings.WIKI_DEFAULT_LANGUAGE)
translate_url += '$translate?tolocale=' + foreign_locale
response = self.client.post(translate_url, post_data)
self.assertRedirects(response, reverse('wiki.document',
args=[foreign_slug],
locale=foreign_locale))
es_d = Document.objects.get(locale=foreign_locale, slug=foreign_slug)
eq_(r.toc_depth, es_d.current_revision.toc_depth)
@override_config(KUMASCRIPT_TIMEOUT=1.0)
def test_translate_rebuilds_source_json(self):
self.client.login(username='admin', password='testpass')
# Create an English original and a Spanish translation.
en_slug = 'en-doc'
es_locale = 'es'
es_slug = 'es-doc'
en_doc = document(title='EN Doc',
slug=en_slug,
is_localizable=True,
locale=settings.WIKI_DEFAULT_LANGUAGE)
en_doc.save()
en_doc.render()
en_doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=en_slug)
json.loads(en_doc.json)
r = revision(document=en_doc)
r.save()
translation_data = new_document_data()
translation_data['title'] = 'ES Doc'
translation_data['slug'] = es_slug
translation_data['content'] = 'This is the content'
translation_data['is_localizable'] = False
translation_data['form'] = 'both'
translate_url = reverse('wiki.document', args=[en_slug],
locale=settings.WIKI_DEFAULT_LANGUAGE)
translate_url += '$translate?tolocale=' + es_locale
response = self.client.post(translate_url, translation_data)
# Sanity to make sure the translate succeeded.
self.assertRedirects(response, reverse('wiki.document',
args=[es_slug],
locale=es_locale))
es_doc = Document.objects.get(locale=es_locale,
slug=es_slug)
es_doc.render()
new_en_json = json.loads(Document.objects.get(pk=en_doc.pk).json)
ok_('translations' in new_en_json)
ok_(translation_data['title'] in [t['title'] for t in
new_en_json['translations']])
es_translation_json = [t for t in new_en_json['translations'] if
t['title'] == translation_data['title']][0]
eq_(es_translation_json['last_edit'],
es_doc.current_revision.created.isoformat())
def test_slug_translate(self):
"""Editing a translated doc keeps the correct slug"""
self.client.login(username='admin', password='testpass')
# Settings
original_slug = 'eng-doc'
child_slug = 'child-eng-doc'
foreign_locale = 'es'
foreign_slug = 'es-doc'
foreign_child_slug = 'child-es-doc'
# Create the one-level English Doc
en_doc = document(title='Eng Doc',
slug=original_slug,
is_localizable=True,
locale=settings.WIKI_DEFAULT_LANGUAGE)
en_doc.save()
r = revision(document=en_doc)
r.save()
# Translate to ES
parent_data = new_document_data()
parent_data['title'] = 'ES Doc'
parent_data['slug'] = foreign_slug
parent_data['content'] = 'This is the content'
parent_data['is_localizable'] = True
parent_data['form'] = 'both'
translate_url = reverse('wiki.document', args=[original_slug],
locale=settings.WIKI_DEFAULT_LANGUAGE)
translate_url += '$translate?tolocale=' + foreign_locale
response = self.client.post(translate_url, parent_data)
self.assertRedirects(response, reverse('wiki.document',
args=[foreign_slug],
locale=foreign_locale))
# Go to edit the translation, ensure the the slug is correct
response = self.client.get(reverse('wiki.edit',
args=[foreign_slug],
locale=foreign_locale))
page = pq(response.content)
eq_(page.find('input[name=slug]')[0].value, foreign_slug)
# Create an English child now
en_doc = document(title='Child Eng Doc',
slug=original_slug + '/' + child_slug,
is_localizable=True,
locale=settings.WIKI_DEFAULT_LANGUAGE,
parent_topic=en_doc)
en_doc.save()
r = revision(document=en_doc)
r.save()
# Translate to ES
child_data = new_document_data()
child_data['title'] = 'ES Child Doc'
child_data['slug'] = foreign_child_slug
child_data['content'] = 'This is the content'
child_data['is_localizable'] = True
child_data['form'] = 'both'
translate_url = reverse('wiki.document',
args=[original_slug + '/' + child_slug],
locale=settings.WIKI_DEFAULT_LANGUAGE)
translate_url += '$translate?tolocale=' + foreign_locale
response = self.client.post(translate_url, child_data)
slug = foreign_slug + '/' + child_data['slug']
self.assertRedirects(response, reverse('wiki.document',
args=[slug],
locale=foreign_locale))
def test_clone(self):
self.client.login(username='admin', password='testpass')
slug = None
title = None
content = '<p>Hello!</p>'
test_revision = revision(save=True, title=title, slug=slug,
content=content)
document = test_revision.document
response = self.client.get(reverse('wiki.create',
args=[],
locale=settings.WIKI_DEFAULT_LANGUAGE) + '?clone=' + str(document.id))
page = pq(response.content)
eq_(page.find('input[name=title]')[0].value, title)
eq_(page.find('input[name=slug]')[0].value, slug)
self.assertHTMLEqual(page.find('textarea[name=content]')[0].value, content)
def test_localized_based_on(self):
"""Editing a localized article 'based on' an older revision of the
localization is OK."""
self.client.login(username='admin', password='testpass')
en_r = revision(save=True)
fr_d = document(parent=en_r.document, locale='fr', save=True)
fr_r = revision(document=fr_d, based_on=en_r, save=True)
url = reverse('wiki.new_revision_based_on',
locale='fr', args=(fr_d.slug, fr_r.pk,))
response = self.client.get(url)
input = pq(response.content)('#id_based_on')[0]
eq_(int(input.value), en_r.pk)
def test_restore_translation_source(self):
"""Edit a localized article without an English parent allows user to
set translation parent."""
# Create english doc
self.client.login(username='admin', password='testpass')
data = new_document_data()
self.client.post(reverse('wiki.create'), data)
en_d = Document.objects.get(locale=data['locale'], slug=data['slug'])
# Create french doc
data.update({'locale': 'fr',
'title': 'A Tést Articlé',
'content': "C'ést bon."})
self.client.post(reverse('wiki.create', locale='fr'), data)
fr_d = Document.objects.get(locale=data['locale'], slug=data['slug'])
# Check edit doc page for choose parent box
url = reverse('wiki.edit', args=[fr_d.slug], locale='fr')
response = self.client.get(url)
ok_(pq(response.content)('li.metadata-choose-parent'))
# Set the parent
data.update({'form': 'rev', 'parent_id': en_d.id})
resp = self.client.post(url, data)
eq_(302, resp.status_code)
ok_('fr/docs/a-test-article' in resp['Location'])
# Check the languages drop-down
resp = self.client.get(resp['Location'])
translations = pq(resp.content)('ul#translations li')
ok_('A Test Article' in translations.html())
ok_('English (US)' in translations.text())
def test_translation_source(self):
"""Allow users to change "translation source" settings"""
self.client.login(username='admin', password='testpass')
data = new_document_data()
self.client.post(reverse('wiki.create'), data)
parent = Document.objects.get(locale=data['locale'], slug=data['slug'])
data.update({'title': 'Another Test Article',
'content': "Yahoooo!",
'parent_id': parent.id})
self.client.post(reverse('wiki.create'), data)
child = Document.objects.get(locale=data['locale'], slug=data['slug'])
url = reverse('wiki.edit', args=[child.slug])
response = self.client.get(url)
content = pq(response.content)
ok_(content('li.metadata-choose-parent'))
ok_(str(parent.id) in content.html())
@attr('tags')
@mock.patch.object(Site.objects, 'get_current')
def test_document_tags(self, get_current):
"""Document tags can be edited through revisions"""
data = new_document_data()
locale = data['locale']
slug = data['slug']
path = slug
ts1 = ('JavaScript', 'AJAX', 'DOM')
ts2 = ('XML', 'JSON')
get_current.return_value.domain = 'su.mo.com'
self.client.login(username='admin', password='testpass')
def assert_tag_state(yes_tags, no_tags):
# Ensure the tags are found for the Documents
doc = Document.objects.get(locale=locale, slug=slug)
doc_tags = [x.name for x in doc.tags.all()]
for t in yes_tags:
ok_(t in doc_tags)
for t in no_tags:
ok_(t not in doc_tags)
# Ensure the tags are found in the Document view
response = self.client.get(reverse('wiki.document',
args=[doc.slug]), data)
page = pq(response.content)
for t in yes_tags:
eq_(1, page.find('.tags li a:contains("%s")' % t).length,
'%s should NOT appear in document view tags' % t)
for t in no_tags:
eq_(0, page.find('.tags li a:contains("%s")' % t).length,
'%s should appear in document view tags' % t)
# Check for the document slug (title in feeds) in the tag listing
for t in yes_tags:
response = self.client.get(reverse('wiki.tag', args=[t]))
self.assertContains(response, doc.slug, msg_prefix=t)
response = self.client.get(reverse('wiki.feeds.recent_documents',
args=['atom', t]))
self.assertContains(response, doc.title)
for t in no_tags:
response = self.client.get(reverse('wiki.tag', args=[t]))
ok_(doc.slug not in response.content.decode('utf-8'))
response = self.client.get(reverse('wiki.feeds.recent_documents',
args=['atom', t]))
self.assertNotContains(response, doc.title)
# Create a new doc with tags
data.update({'slug': slug, 'tags': ','.join(ts1)})
self.client.post(reverse('wiki.create'), data)
assert_tag_state(ts1, ts2)
# Now, update the tags.
data.update({'form': 'rev', 'tags': ', '.join(ts2)})
self.client.post(reverse('wiki.edit',
args=[path]), data)
assert_tag_state(ts2, ts1)
@attr('review_tags')
@mock.patch.object(Site.objects, 'get_current')
def test_review_tags(self, get_current):
"""Review tags can be managed on document revisions"""
get_current.return_value.domain = 'su.mo.com'
self.client.login(username='admin', password='testpass')
# Create a new doc with one review tag
data = new_document_data()
data.update({'review_tags': ['technical']})
response = self.client.post(reverse('wiki.create'), data)
# Ensure there's now a doc with that expected tag in its newest
# revision
doc = Document.objects.get(slug="a-test-article")
rev = doc.revisions.order_by('-id').all()[0]
review_tags = [x.name for x in rev.review_tags.all()]
eq_(['technical'], review_tags)
# Now, post an update with two tags
data.update({
'form': 'rev',
'review_tags': ['editorial', 'technical'],
})
response = self.client.post(reverse('wiki.edit',
args=[doc.slug]), data)
# Ensure the doc's newest revision has both tags.
doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug="a-test-article")
rev = doc.revisions.order_by('-id').all()[0]
review_tags = [x.name for x in rev.review_tags.all()]
review_tags.sort()
eq_(['editorial', 'technical'], review_tags)
# Now, ensure that warning boxes appear for the review tags.
response = self.client.get(reverse('wiki.document',
args=[doc.slug]), data)
page = pq(response.content)
eq_(2, page.find('.warning.warning-review').length)
# Ensure the page appears on the listing pages
response = self.client.get(reverse('wiki.list_review'))
eq_(1, pq(response.content).find("ul.document-list li a:contains('%s')" %
doc.title).length)
response = self.client.get(reverse('wiki.list_review_tag',
args=('technical',)))
eq_(1, pq(response.content).find("ul.document-list li a:contains('%s')" %
doc.title).length)
response = self.client.get(reverse('wiki.list_review_tag',
args=('editorial',)))
eq_(1, pq(response.content).find("ul.document-list li a:contains('%s')" %
doc.title).length)
# Also, ensure that the page appears in the proper feeds
# HACK: Too lazy to parse the XML. Lazy lazy.
response = self.client.get(reverse('wiki.feeds.list_review',
args=('atom',)))
ok_('<entry><title>%s</title>' % doc.title in response.content)
response = self.client.get(reverse('wiki.feeds.list_review_tag',
args=('atom', 'technical', )))
ok_('<entry><title>%s</title>' % doc.title in response.content)
response = self.client.get(reverse('wiki.feeds.list_review_tag',
args=('atom', 'editorial', )))
ok_('<entry><title>%s</title>' % doc.title in response.content)
# Post an edit that removes one of the tags.
data.update({
'form': 'rev',
'review_tags': ['editorial', ]
})
response = self.client.post(reverse('wiki.edit',
args=[doc.slug]), data)
# Ensure only one of the tags' warning boxes appears, now.
response = self.client.get(reverse('wiki.document',
args=[doc.slug]), data)
page = pq(response.content)
eq_(1, page.find('.warning.warning-review').length)
# Ensure the page appears on the listing pages
response = self.client.get(reverse('wiki.list_review'))
eq_(1, pq(response.content).find("ul.document-list li a:contains('%s')" %
doc.title).length)
response = self.client.get(reverse('wiki.list_review_tag',
args=('technical',)))
eq_(0, pq(response.content).find("ul.document-list li a:contains('%s')" %
doc.title).length)
response = self.client.get(reverse('wiki.list_review_tag',
args=('editorial',)))
eq_(1, pq(response.content).find("ul.document-list li a:contains('%s')" %
doc.title).length)
# Also, ensure that the page appears in the proper feeds
# HACK: Too lazy to parse the XML. Lazy lazy.
response = self.client.get(reverse('wiki.feeds.list_review',
args=('atom',)))
ok_('<entry><title>%s</title>' % doc.title in response.content)
response = self.client.get(reverse('wiki.feeds.list_review_tag',
args=('atom', 'technical', )))
ok_('<entry><title>%s</title>' % doc.title not in response.content)
response = self.client.get(reverse('wiki.feeds.list_review_tag',
args=('atom', 'editorial', )))
ok_('<entry><title>%s</title>' % doc.title in response.content)
@attr('review-tags')
def test_quick_review(self):
"""Test the quick-review button."""
self.client.login(username='admin', password='testpass')
test_data = [
{
'params': {'approve_technical': 1},
'expected_tags': ['editorial'],
'name': 'technical',
'message_contains': ['Technical review completed.']
},
{
'params': {'approve_editorial': 1},
'expected_tags': ['technical'],
'name': 'editorial',
'message_contains': ['Editorial review completed.']
},
{
'params': {
'approve_technical': 1,
'approve_editorial': 1
},
'expected_tags': [],
'name': 'editorial-technical',
'message_contains': [
'Technical review completed.',
'Editorial review completed.',
]
}
]
for data_dict in test_data:
slug = 'test-quick-review-%s' % data_dict['name']
data = new_document_data()
data.update({'review_tags': ['editorial', 'technical'],
'slug': slug})
resp = self.client.post(reverse('wiki.create'), data)
doc = Document.objects.get(slug=slug)
rev = doc.revisions.order_by('-id').all()[0]
review_url = reverse('wiki.quick_review',
args=[doc.slug])
params = dict(data_dict['params'], revision_id=rev.id)
resp = self.client.post(review_url, params)
eq_(302, resp.status_code)
doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=slug)
rev = doc.revisions.order_by('-id').all()[0]
review_tags = [x.name for x in rev.review_tags.all()]
review_tags.sort()
for expected_str in data_dict['message_contains']:
ok_(expected_str in rev.summary)
ok_(expected_str in rev.comment)
eq_(data_dict['expected_tags'], review_tags)
@attr('midair')
def test_edit_midair_collision(self):
self.client.login(username='admin', password='testpass')
# Post a new document.
data = new_document_data()
resp = self.client.post(reverse('wiki.create'), data)
doc = Document.objects.get(slug=data['slug'])
# Edit #1 starts...
resp = self.client.get(reverse('wiki.edit',
args=[doc.slug]))
page = pq(resp.content)
rev_id1 = page.find('input[name="current_rev"]').attr('value')
# Edit #2 starts...
resp = self.client.get(reverse('wiki.edit',
args=[doc.slug]))
page = pq(resp.content)
rev_id2 = page.find('input[name="current_rev"]').attr('value')
# Edit #2 submits successfully
data.update({
'form': 'rev',
'content': 'This edit got there first',
'current_rev': rev_id2
})
resp = self.client.post(reverse('wiki.edit',
args=[doc.slug]), data)
eq_(302, resp.status_code)
# Edit #1 submits, but receives a mid-aired notification
data.update({
'form': 'rev',
'content': 'This edit gets mid-aired',
'current_rev': rev_id1
})
resp = self.client.post(reverse('wiki.edit',
args=[doc.slug]), data)
eq_(200, resp.status_code)
ok_(unicode(MIDAIR_COLLISION).encode('utf-8') in resp.content,
"Midair collision message should appear")
@attr('toc')
def test_toc_toggle_off(self):
"""Toggling of table of contents in revisions"""
self.client.login(username='admin', password='testpass')
d, _ = doc_rev()
data = new_document_data()
ok_(Document.objects.get(slug=d.slug, locale=d.locale).show_toc)
data['form'] = 'rev'
data['toc_depth'] = 0
data['slug'] = d.slug
data['title'] = d.title
self.client.post(reverse('wiki.edit',
args=[d.slug]),
data)
doc = Document.objects.get(slug=d.slug, locale=d.locale)
eq_(0, doc.current_revision.toc_depth)
@attr('toc')
def test_toc_toggle_on(self):
"""Toggling of table of contents in revisions"""
self.client.login(username='admin', password='testpass')
d, r = doc_rev()
new_r = revision(document=d, content=r.content, toc_depth=0,
is_approved=True)
new_r.save()
ok_(not Document.objects.get(slug=d.slug, locale=d.locale).show_toc)
data = new_document_data()
data['form'] = 'rev'
data['slug'] = d.slug
data['title'] = d.title
self.client.post(reverse('wiki.edit',
args=[d.slug]),
data)
ok_(Document.objects.get(slug=d.slug, locale=d.locale).show_toc)
def test_parent_topic(self):
"""Selection of a parent topic when creating a document."""
self.client.login(username='admin', password='testpass')
d = document(title='HTML8')
d.save()
r = revision(document=d)
r.save()
data = new_document_data()
data['title'] = 'Replicated local storage'
data['parent_topic'] = d.id
resp = self.client.post(reverse('wiki.create'), data)
eq_(302, resp.status_code)
ok_(d.children.count() == 1)
ok_(d.children.all()[0].title == 'Replicated local storage')
def test_repair_breadcrumbs(self):
english_top = document(locale=settings.WIKI_DEFAULT_LANGUAGE,
title='English top',
save=True)
english_mid = document(locale=settings.WIKI_DEFAULT_LANGUAGE,
title='English mid',
parent_topic=english_top,
save=True)
english_bottom = document(locale=settings.WIKI_DEFAULT_LANGUAGE,
title='English bottom',
parent_topic=english_mid,
save=True)
french_top = document(locale='fr',
title='French top',
parent=english_top,
save=True)
french_mid = document(locale='fr',
title='French mid',
parent=english_mid,
parent_topic=english_mid,
save=True)
french_bottom = document(locale='fr',
title='French bottom',
parent=english_bottom,
parent_topic=english_bottom,
save=True)
self.client.login(username='admin', password='testpass')
resp = self.client.get(reverse('wiki.repair_breadcrumbs',
args=[french_bottom.slug],
locale='fr'))
eq_(302, resp.status_code)
ok_(french_bottom.get_absolute_url() in resp['Location'])
french_bottom_fixed = Document.objects.get(locale='fr',
title=french_bottom.title)
eq_(french_mid.id, french_bottom_fixed.parent_topic.id)
eq_(french_top.id, french_bottom_fixed.parent_topic.parent_topic.id)
def test_translate_on_edit(self):
d1 = document(title="Doc1", locale=settings.WIKI_DEFAULT_LANGUAGE,
save=True)
revision(document=d1, save=True)
d2 = document(title="TransDoc1", locale='de', parent=d1, save=True)
revision(document=d2, save=True)
self.client.login(username='admin', password='testpass')
url = reverse('wiki.edit', args=(d2.slug,), locale=d2.locale)
resp = self.client.get(url)
eq_(200, resp.status_code)
def test_discard_location(self):
"""Testing that the 'discard' HREF goes to the correct place when it's
explicitely and implicitely set"""
self.client.login(username='admin', password='testpass')
def _create_doc(slug, locale):
doc = document(slug=slug, is_localizable=True, locale=locale)
doc.save()
r = revision(document=doc)
r.save()
return doc
# Test that the 'discard' button on an edit goes to the original page
doc = _create_doc('testdiscarddoc', settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.get(reverse('wiki.edit',
args=[doc.slug], locale=doc.locale))
eq_(pq(response.content).find('.btn-discard').attr('href'),
reverse('wiki.document', args=[doc.slug], locale=doc.locale))
# Test that the 'discard button on a new translation goes
# to the en-US page'
response = self.client.get(reverse('wiki.translate',
args=[doc.slug], locale=doc.locale) + '?tolocale=es')
eq_(pq(response.content).find('.btn-discard').attr('href'),
reverse('wiki.document', args=[doc.slug], locale=doc.locale))
# Test that the 'discard' button on an existing translation goes
# to the 'es' page
foreign_doc = _create_doc('testdiscarddoc', 'es')
response = self.client.get(reverse('wiki.edit',
args=[foreign_doc.slug],
locale=foreign_doc.locale))
eq_(pq(response.content).find('.btn-discard').attr('href'),
reverse('wiki.document', args=[foreign_doc.slug],
locale=foreign_doc.locale))
# Test new
response = self.client.get(reverse('wiki.create',
locale=settings.WIKI_DEFAULT_LANGUAGE))
eq_(pq(response.content).find('.btn-discard').attr('href'),
reverse('wiki.create',
locale=settings.WIKI_DEFAULT_LANGUAGE))
@override_config(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_revert(self, mock_kumascript_get):
self.client.login(username='admin', password='testpass')
mock_kumascript_get.return_value = (
'lorem ipsum dolor sit amet', None)
data = new_document_data()
data['title'] = 'A Test Article For Reverting'
data['slug'] = 'test-article-for-reverting'
response = self.client.post(reverse('wiki.create'), data)
doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug='test-article-for-reverting')
rev = doc.revisions.order_by('-id').all()[0]
data['content'] = 'Not lorem ipsum anymore'
data['comment'] = 'Nobody likes Latin anyway'
response = self.client.post(reverse('wiki.edit',
args=[doc.slug]), data)
mock_kumascript_get.called = False
response = self.client.post(reverse('wiki.revert_document',
args=[doc.slug, rev.id]),
{'revert': True, 'comment': 'Blah blah'})
ok_(mock_kumascript_get.called,
"kumascript should have been used")
ok_(302 == response.status_code)
rev = doc.revisions.order_by('-id').all()[0]
ok_('lorem ipsum dolor sit amet' == rev.content)
ok_('Blah blah' in rev.comment)
mock_kumascript_get.called = False
rev = doc.revisions.order_by('-id').all()[1]
response = self.client.post(reverse('wiki.revert_document',
args=[doc.slug, rev.id]),
{'revert': True})
ok_(302 == response.status_code)
rev = doc.revisions.order_by('-id').all()[0]
ok_(': ' not in rev.comment)
ok_(mock_kumascript_get.called,
"kumascript should have been used")
def test_revert_moved(self):
doc = document(slug='move-me', save=True)
rev = revision(document=doc, save=True)
prev_rev_id = rev.id
doc._move_tree('moved-doc')
self.client.login(username='admin', password='testpass')
resp = self.client.post(reverse('wiki.revert_document',
args=[doc.slug, prev_rev_id],
locale=doc.locale),
follow=True)
eq_(200, resp.status_code)
ok_("cannot revert a document that has been moved" in resp.content)
def test_store_revision_ip(self):
self.client.login(username='testuser', password='testpass')
data = new_document_data()
slug = 'test-article-for-storing-revision-ip'
data.update({'title': 'A Test Article For Storing Revision IP',
'slug': slug})
self.client.post(reverse('wiki.create'), data)
doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=slug)
data.update({'form': 'rev',
'content': 'This revision should NOT record IP',
'comment': 'This revision should NOT record IP'})
self.client.post(reverse('wiki.edit', args=[doc.slug]),
data)
eq_(0, RevisionIP.objects.all().count())
Switch.objects.create(name='store_revision_ips', active=True)
data.update({'content': 'Store the IP address for the revision.',
'comment': 'Store the IP address for the revision.'})
self.client.post(reverse('wiki.edit', args=[doc.slug]),
data)
eq_(1, RevisionIP.objects.all().count())
rev = doc.revisions.order_by('-id').all()[0]
rev_ip = RevisionIP.objects.get(revision=rev)
eq_('127.0.0.1', rev_ip.ip)
@mock.patch.object(Site.objects, 'get_current')
def test_email_for_first_edits(self, get_current):
get_current.return_value.domain = 'dev.mo.org'
self.client.login(username='testuser', password='testpass')
data = new_document_data()
slug = 'test-article-for-storing-revision-ip'
data.update({'title': 'A Test Article For First Edit Emails',
'slug': slug})
self.client.post(reverse('wiki.create'), data)
eq_(1, len(mail.outbox))
doc = Document.objects.get(
locale=settings.WIKI_DEFAULT_LANGUAGE, slug=slug)
data.update({'form': 'rev',
'content': 'This edit should not send an email',
'comment': 'This edit should not send an email'})
self.client.post(reverse('wiki.edit',
args=[doc.slug]),
data)
eq_(1, len(mail.outbox))
self.client.login(username='admin', password='testpass')
data.update({'content': 'Admin first edit should send an email',
'comment': 'Admin first edit should send an email'})
self.client.post(reverse('wiki.edit',
args=[doc.slug]),
data)
eq_(2, len(mail.outbox))
def _check_message_for_headers(message, username):
ok_("%s made their first edit" % username in message.subject)
eq_({'X-Kuma-Document-Url': "https://dev.mo.org%s" % doc.get_absolute_url(),
'X-Kuma-Editor-Username': username}, message.extra_headers)
testuser_message = mail.outbox[0]
admin_message = mail.outbox[1]
_check_message_for_headers(testuser_message, 'testuser')
_check_message_for_headers(admin_message, 'admin')
class DocumentWatchTests(UserTestCase, WikiTestCase):
"""Tests for un/subscribing to document edit notifications."""
localizing_client = True
def setUp(self):
super(DocumentWatchTests, self).setUp()
self.document, self.r = doc_rev()
self.client.login(username='testuser', password='testpass')
def test_watch_GET_405(self):
"""Watch document with HTTP GET results in 405."""
response = self.client.get(reverse('wiki.subscribe',
args=[self.document.slug]),
follow=True)
eq_(405, response.status_code)
def test_unwatch_GET_405(self):
"""Unwatch document with HTTP GET results in 405."""
response = self.client.get(reverse('wiki.subscribe',
args=[self.document.slug]),
follow=True)
eq_(405, response.status_code)
def test_watch_unwatch(self):
"""Watch and unwatch a document."""
user = self.user_model.objects.get(username='testuser')
# Subscribe
response = self.client.post(reverse('wiki.subscribe',
args=[self.document.slug]),
follow=True)
eq_(200, response.status_code)
assert EditDocumentEvent.is_notifying(user, self.document), \
'Watch was not created'
# Unsubscribe
response = self.client.post(reverse('wiki.subscribe',
args=[self.document.slug]),
follow=True)
eq_(200, response.status_code)
assert not EditDocumentEvent.is_notifying(user, self.document), \
'Watch was not destroyed'
class SectionEditingResourceTests(UserTestCase, WikiTestCase):
localizing_client = True
def test_raw_source(self):
"""The raw source for a document can be requested"""
self.client.login(username='admin', password='testpass')
d, r = doc_rev("""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
expected = """
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
"""
Switch.objects.create(name='application_ACAO', active=True)
response = self.client.get('%s?raw=true' %
reverse('wiki.document', args=[d.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
ok_('Access-Control-Allow-Origin' in response)
eq_('*', response['Access-Control-Allow-Origin'])
eq_(normalize_html(expected),
normalize_html(response.content))
@attr('bug821986')
def test_raw_editor_safety_filter(self):
"""Safety filter should be applied before rendering editor"""
self.client.login(username='admin', password='testpass')
d, r = doc_rev("""
<p onload=alert(3)>FOO</p>
<svg><circle onload=confirm(3)>HI THERE</circle></svg>
""")
response = self.client.get('%s?raw=true' %
reverse('wiki.document', args=[d.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
ok_('<p onload=' not in response.content)
ok_('<circle onload=' not in response.content)
def test_raw_with_editing_links_source(self):
"""The raw source for a document can be requested, with section editing
links"""
self.client.login(username='admin', password='testpass')
d, r = doc_rev("""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
expected = """
<h1 id="s1"><a class="edit-section" data-section-id="s1" data-section-src-url="/en-US/docs/%(slug)s?raw=true&section=s1" href="/en-US/docs/%(slug)s$edit?section=s1&edit_links=true" title="Edit section">Edit</a>s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2"><a class="edit-section" data-section-id="s2" data-section-src-url="/en-US/docs/%(slug)s?raw=true&section=s2" href="/en-US/docs/%(slug)s$edit?section=s2&edit_links=true" title="Edit section">Edit</a>s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3"><a class="edit-section" data-section-id="s3" data-section-src-url="/en-US/docs/%(slug)s?raw=true&section=s3" href="/en-US/docs/%(slug)s$edit?section=s3&edit_links=true" title="Edit section">Edit</a>s3</h1>
<p>test</p>
<p>test</p>
""" % {'slug': d.slug}
response = self.client.get('%s?raw=true&edit_links=true' %
reverse('wiki.document', args=[d.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(normalize_html(expected),
normalize_html(response.content))
def test_raw_section_source(self):
"""The raw source for a document section can be requested"""
self.client.login(username='admin', password='testpass')
d, r = doc_rev("""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
expected = """
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
"""
response = self.client.get('%s?section=s2&raw=true' %
reverse('wiki.document',
args=[d.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(normalize_html(expected),
normalize_html(response.content))
@attr('midair')
@attr('rawsection')
def test_raw_section_edit(self):
self.client.login(username='admin', password='testpass')
d, r = doc_rev("""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
replace = """
<h1 id="s2">s2</h1>
<p>replace</p>
"""
expected = """
<h1 id="s2">s2</h1>
<p>replace</p>
"""
response = self.client.post('%s?section=s2&raw=true' %
reverse('wiki.edit',
args=[d.slug]),
{"form": "rev",
"slug": d.slug,
"content": replace},
follow=True,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(normalize_html(expected),
normalize_html(response.content))
expected = """
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>replace</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
"""
response = self.client.get('%s?raw=true' %
reverse('wiki.document',
args=[d.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(normalize_html(expected),
normalize_html(response.content))
@attr('midair')
def test_midair_section_merge(self):
"""If a page was changed while someone was editing, but the changes
didn't affect the specific section being edited, then ignore the midair
warning"""
self.client.login(username='admin', password='testpass')
doc, rev = doc_rev("""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
replace_1 = """
<h1 id="replace1">replace1</h1>
<p>replace</p>
"""
replace_2 = """
<h1 id="replace2">replace2</h1>
<p>replace</p>
"""
expected = """
<h1 id="replace1">replace1</h1>
<p>replace</p>
<h1 id="replace2">replace2</h1>
<p>replace</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
"""
data = {
'form': 'rev',
'content': rev.content,
'slug': ''
}
# Edit #1 starts...
resp = self.client.get('%s?section=s1' %
reverse('wiki.edit',
args=[doc.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
page = pq(resp.content)
rev_id1 = page.find('input[name="current_rev"]').attr('value')
# Edit #2 starts...
resp = self.client.get('%s?section=s2' %
reverse('wiki.edit',
args=[doc.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
page = pq(resp.content)
rev_id2 = page.find('input[name="current_rev"]').attr('value')
# Edit #2 submits successfully
data.update({
'form': 'rev',
'content': replace_2,
'current_rev': rev_id2,
'slug': doc.slug
})
resp = self.client.post('%s?section=s2&raw=true' %
reverse('wiki.edit',
args=[doc.slug]),
data,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(302, resp.status_code)
# Edit #1 submits, but since it's a different section, there's no
# mid-air collision
data.update({
'form': 'rev',
'content': replace_1,
'current_rev': rev_id1
})
resp = self.client.post('%s?section=s1&raw=true' %
reverse('wiki.edit', args=[doc.slug]),
data,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# No conflict, but we should get a 205 Reset as an indication that the
# page needs a refresh.
eq_(205, resp.status_code)
# Finally, make sure that all the edits landed
response = self.client.get('%s?raw=true' %
reverse('wiki.document',
args=[doc.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(normalize_html(expected),
normalize_html(response.content))
# Also, ensure that the revision is slipped into the headers
eq_(unicode(Document.objects.get(slug=doc.slug, locale=doc.locale)
.current_revision.id),
unicode(response['x-kuma-revision']))
@attr('midair')
def test_midair_section_collision(self):
"""If both a revision and the edited section has changed, then a
section edit is a collision."""
self.client.login(username='admin', password='testpass')
doc, rev = doc_rev("""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
replace_1 = """
<h1 id="s2">replace</h1>
<p>replace</p>
"""
replace_2 = """
<h1 id="s2">first replace</h1>
<p>first replace</p>
"""
data = {
'form': 'rev',
'content': rev.content
}
# Edit #1 starts...
resp = self.client.get('%s?section=s2' %
reverse('wiki.edit',
args=[doc.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
page = pq(resp.content)
rev_id1 = page.find('input[name="current_rev"]').attr('value')
# Edit #2 starts...
resp = self.client.get('%s?section=s2' %
reverse('wiki.edit',
args=[doc.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
page = pq(resp.content)
rev_id2 = page.find('input[name="current_rev"]').attr('value')
# Edit #2 submits successfully
data.update({
'form': 'rev',
'content': replace_2,
'slug': doc.slug,
'current_rev': rev_id2
})
resp = self.client.post('%s?section=s2&raw=true' %
reverse('wiki.edit',
args=[doc.slug]),
data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(302, resp.status_code)
# Edit #1 submits, but since it's the same section, there's a collision
data.update({
'form': 'rev',
'content': replace_1,
'current_rev': rev_id1
})
resp = self.client.post('%s?section=s2&raw=true' %
reverse('wiki.edit',
args=[doc.slug]),
data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# With the raw API, we should get a 409 Conflict on collision.
eq_(409, resp.status_code)
def test_raw_include_option(self):
doc_src = u"""
<div class="noinclude">{{ XULRefAttr() }}</div>
<dl>
<dt>{{ XULAttr("maxlength") }}</dt>
<dd>Type: <em>integer</em></dd>
<dd>Przykłady 例 예제 示例</dd>
</dl>
<div class="noinclude">
<p>{{ languages( { "ja": "ja/XUL/Attribute/maxlength" } ) }}</p>
</div>
"""
doc, rev = doc_rev(doc_src)
expected = u"""
<dl>
<dt>{{ XULAttr("maxlength") }}</dt>
<dd>Type: <em>integer</em></dd>
<dd>Przykłady 例 예제 示例</dd>
</dl>
"""
resp = self.client.get('%s?raw&include' %
reverse('wiki.document', args=[doc.slug]),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(normalize_html(expected),
normalize_html(resp.content.decode('utf-8')))
def test_section_edit_toc(self):
"""show_toc is preserved in section editing."""
self.client.login(username='admin', password='testpass')
doc, rev = doc_rev("""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
rev.toc_depth = 1
rev.save()
replace = """
<h1 id="s2">s2</h1>
<p>replace</p>
"""
self.client.post('%s?section=s2&raw=true' %
reverse('wiki.edit', args=[doc.slug]),
{"form": "rev", "slug": doc.slug, "content": replace},
follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
changed = Document.objects.get(pk=doc.id).current_revision
ok_(rev.id != changed.id)
eq_(1, changed.toc_depth)
def test_section_edit_review_tags(self):
"""review tags are preserved in section editing."""
self.client.login(username='admin', password='testpass')
doc, rev = doc_rev("""
<h1 id="s1">s1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">s2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">s3</h1>
<p>test</p>
<p>test</p>
""")
tags_to_save = ['bar', 'foo']
rev.save()
rev.review_tags.set(*tags_to_save)
replace = """
<h1 id="s2">s2</h1>
<p>replace</p>
"""
self.client.post('%s?section=s2&raw=true' %
reverse('wiki.edit', args=[doc.slug]),
{"form": "rev", "slug": doc.slug, "content": replace},
follow=True, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
changed = Document.objects.get(pk=doc.id).current_revision
ok_(rev.id != changed.id)
eq_(set(tags_to_save),
set([t.name for t in changed.review_tags.all()]))
class MindTouchRedirectTests(UserTestCase, WikiTestCase):
"""
Test that we appropriately redirect old-style MindTouch URLs to
new-style kuma URLs.
"""
# A note on these tests: we could try to use assertRedirects on
# these, but for the most part we're just constructing a URL
# similar enough to the wiki app's own built-in redirects that
# it'll pick up the request and do what we want with it. But it
# may end up issuing its own redirects, which are tricky to sort
# out from the ones the legacy MindTouch handling will emit, so
# instead we just test that A) we did issue a redirect and B) the
# URL we constructed is enough for the document views to go on.
localizing_client = True
server_prefix = 'http://testserver/%s/docs' % settings.WIKI_DEFAULT_LANGUAGE
namespace_urls = (
# One for each namespace.
{'mindtouch': '/Help:Foo',
'kuma': '%s/Help:Foo' % server_prefix},
{'mindtouch': '/Help_talk:Foo',
'kuma': '%s/Help_talk:Foo' % server_prefix},
{'mindtouch': '/Project:En/MDC_editor_guide',
'kuma': '%s/Project:MDC_editor_guide' % server_prefix},
{'mindtouch': '/Project_talk:En/MDC_style_guide',
'kuma': '%s/Project_talk:MDC_style_guide' % server_prefix},
{'mindtouch': '/Special:Foo',
'kuma': '%s/Special:Foo' % server_prefix},
{'mindtouch': '/Talk:en/Foo',
'kuma': '%s/Talk:Foo' % server_prefix},
{'mindtouch': '/Template:Foo',
'kuma': '%s/Template:Foo' % server_prefix},
{'mindtouch': '/User:Foo',
'kuma': '%s/User:Foo' % server_prefix},
)
documents = (
{'title': 'XHTML', 'mt_locale': 'cn', 'kuma_locale': 'zh-CN',
'expected': '/zh-CN/docs/XHTML'},
{'title': 'JavaScript', 'mt_locale': 'zh_cn', 'kuma_locale': 'zh-CN',
'expected': '/zh-CN/docs/JavaScript'},
{'title': 'XHTML6', 'mt_locale': 'zh_tw', 'kuma_locale': 'zh-CN',
'expected': '/zh-TW/docs/XHTML6'},
{'title': 'HTML7', 'mt_locale': 'fr', 'kuma_locale': 'fr',
'expected': '/fr/docs/HTML7'},
)
def test_namespace_urls(self):
new_doc = document()
new_doc.title = 'User:Foo'
new_doc.slug = 'User:Foo'
new_doc.save()
for namespace_test in self.namespace_urls:
resp = self.client.get(namespace_test['mindtouch'], follow=False)
eq_(301, resp.status_code)
eq_(namespace_test['kuma'], resp['Location'])
def test_trailing_slash(self):
d = document()
d.locale = 'zh-CN'
d.slug = 'foofoo'
d.title = 'FooFoo'
d.save()
mt_url = '/cn/%s/' % (d.slug,)
resp = self.client.get(mt_url)
eq_(301, resp.status_code)
eq_('http://testserver%s' % d.get_absolute_url(), resp['Location'])
def test_document_urls(self):
for doc in self.documents:
d = document()
d.title = doc['title']
d.slug = doc['title']
d.locale = doc['kuma_locale']
d.save()
mt_url = '/%s' % '/'.join([doc['mt_locale'], doc['title']])
resp = self.client.get(mt_url)
eq_(301, resp.status_code)
eq_('http://testserver%s' % doc['expected'], resp['Location'])
def test_view_param(self):
d = document()
d.locale = settings.WIKI_DEFAULT_LANGUAGE
d.slug = 'HTML/HTML5'
d.title = 'HTML 5'
d.save()
mt_url = '/en-US/%s?view=edit' % (d.slug,)
resp = self.client.get(mt_url)
eq_(301, resp.status_code)
expected_url = 'http://testserver%s$edit' % d.get_absolute_url()
eq_(expected_url, resp['Location'])
class AutosuggestDocumentsTests(WikiTestCase):
"""
Test the we're properly filtering out the Redirects from the document list
"""
localizing_client = True
def test_autosuggest_no_term(self):
url = reverse('wiki.autosuggest_documents',
locale=settings.WIKI_DEFAULT_LANGUAGE)
resp = self.client.get(url)
eq_(400, resp.status_code)
def test_document_redirects(self):
# All contain "e", so that will be the search term
invalid_documents = (
{
'title': 'Something Redirect 8',
'html': 'REDIRECT <a class="redirect" href="/blah">Something Redirect</a>',
'is_redirect': 1
},
)
valid_documents = (
{'title': 'e 6', 'html': '<p>Blah text Redirect'},
{'title': 'e 7', 'html': 'AppleTalk'},
{'title': 'Response.Redirect'},
)
for doc in invalid_documents + valid_documents:
d = document()
d.title = doc['title']
if 'html' in doc:
d.html = doc['html']
if 'slug' in doc:
d.slug = doc['slug']
if 'is_redirect' in doc:
d.is_redirect = 1
d.save()
url = reverse('wiki.autosuggest_documents',
locale=settings.WIKI_DEFAULT_LANGUAGE) + '?term=e'
Switch.objects.create(name='application_ACAO', active=True)
resp = self.client.get(url)
ok_('Access-Control-Allow-Origin' in resp)
eq_('*', resp['Access-Control-Allow-Origin'])
eq_(200, resp.status_code)
data = json.loads(resp.content)
eq_(len(data), len(valid_documents))
# Ensure that the valid docs found are all in the valid list
for d in data:
found = False
for v in valid_documents:
if v['title'] in d['title']:
found = True
break
eq_(True, found)
def test_list_no_redirects(self):
Document.objects.all().delete()
invalid_documents = [
{
'title': 'Something Redirect 8',
'slug': 'xx',
'html': 'REDIRECT <a class="redirect" href="%s">yo</a>' % settings.SITE_URL
},
{
'title': 'My Template',
'slug': 'Template:Something',
'html': 'blah',
},
]
valid_documents = [
{'title': 'A Doc', 'slug': 'blah', 'html': 'Blah blah blah'}
]
for doc in invalid_documents + valid_documents:
document(save=True, slug=doc['slug'],
title=doc['title'], html=doc['html'])
resp = self.client.get(reverse('wiki.all_documents',
locale=settings.WIKI_DEFAULT_LANGUAGE))
eq_(len(valid_documents), len(pq(resp.content).find('.document-list li')))
class CodeSampleViewTests(UserTestCase, WikiTestCase):
localizing_client = True
@override_config(
KUMA_WIKI_IFRAME_ALLOWED_HOSTS='^https?\:\/\/testserver')
def test_code_sample_1(self):
"""The raw source for a document can be requested"""
d, r = doc_rev("""
<p>This is a page. Deal with it.</p>
<div id="sample1" class="code-sample">
<pre class="brush: html">Some HTML</pre>
<pre class="brush: css">.some-css { color: red; }</pre>
<pre class="brush: js">window.alert("HI THERE")</pre>
</div>
<p>test</p>
""")
expecteds = (
'<style type="text/css">.some-css { color: red; }</style>',
'Some HTML',
'<script type="text/javascript">window.alert("HI THERE")</script>',
)
Switch.objects.create(name='application_ACAO', active=True)
response = self.client.get(reverse('wiki.code_sample',
args=[d.slug, 'sample1']),
HTTP_HOST='testserver')
ok_('Access-Control-Allow-Origin' in response)
eq_('*', response['Access-Control-Allow-Origin'])
eq_(200, response.status_code)
normalized = normalize_html(response.content)
# Content checks
ok_('<!DOCTYPE html>' in response.content)
for item in expecteds:
ok_(item in normalized)
@override_config(
KUMA_WIKI_IFRAME_ALLOWED_HOSTS='^https?\:\/\/sampleserver')
def test_code_sample_host_restriction(self):
d, r = doc_rev("""
<p>This is a page. Deal with it.</p>
<div id="sample1" class="code-sample">
<pre class="brush: html">Some HTML</pre>
<pre class="brush: css">.some-css { color: red; }</pre>
<pre class="brush: js">window.alert("HI THERE")</pre>
</div>
<p>test</p>
""")
response = self.client.get(reverse('wiki.code_sample',
args=[d.slug, 'sample1']),
HTTP_HOST='testserver')
eq_(403, response.status_code)
response = self.client.get(reverse('wiki.code_sample',
args=[d.slug, 'sample1']),
HTTP_HOST='sampleserver')
eq_(200, response.status_code)
@override_config(
KUMA_WIKI_IFRAME_ALLOWED_HOSTS='^https?\:\/\/sampleserver')
def test_code_sample_iframe_embed(self):
slug = 'test-code-embed'
embed_url = ('https://sampleserver/%s/docs/%s$samples/sample1' %
(settings.WIKI_DEFAULT_LANGUAGE, slug))
doc_src = """
<p>This is a page. Deal with it.</p>
<div id="sample1" class="code-sample">
<pre class="brush: html">Some HTML</pre>
<pre class="brush: css">.some-css { color: red; }</pre>
<pre class="brush: js">window.alert("HI THERE")</pre>
</div>
<iframe id="if1" src="%(embed_url)s"></iframe>
<iframe id="if2" src="http://testserver"></iframe>
<iframe id="if3" src="https://some.alien.site.com"></iframe>
<p>test</p>
""" % dict(embed_url=embed_url)
slug = 'test-code-doc'
d, r = doc_rev()
revision(save=True, document=d, title="Test code doc", slug=slug,
content=doc_src)
response = self.client.get(reverse('wiki.document', args=(d.slug,)))
eq_(200, response.status_code)
page = pq(response.content)
if1 = page.find('#if1')
eq_(if1.length, 1)
eq_(if1.attr('src'), embed_url)
if2 = page.find('#if2')
eq_(if2.length, 1)
eq_(if2.attr('src'), '')
if3 = page.find('#if3')
eq_(if3.length, 1)
eq_(if3.attr('src'), '')
class CodeSampleViewFileServingTests(UserTestCase, WikiTestCase):
@override_config(
KUMA_WIKI_IFRAME_ALLOWED_HOSTS='^https?\:\/\/testserver',
WIKI_ATTACHMENT_ALLOWED_TYPES='text/plain')
@override_settings(ATTACHMENT_HOST='testserver')
def test_code_sample_file_serving(self):
self.client.login(username='admin', password='testpass')
# first let's upload a file
file_for_upload = make_test_file(content='Something something unique')
post_data = {
'title': 'An uploaded file',
'description': 'A unique experience for your file serving needs.',
'comment': 'Yadda yadda yadda',
'file': file_for_upload,
}
response = self.client.post(reverse('attachments.new_attachment'),
data=post_data)
eq_(response.status_code, 302)
# then build the document and revision we need to test
attachment = Attachment.objects.get(title='An uploaded file')
filename = attachment.current_revision.filename()
url_css = 'url("files/%(attachment_id)s/%(filename)s")' % {
'attachment_id': attachment.id,
'filename': filename,
}
doc, rev = doc_rev("""
<p>This is a page. Deal with it.</p>
<div id="sample1" class="code-sample">
<pre class="brush: html">Some HTML</pre>
<pre class="brush: css">.some-css { background: %s }</pre>
<pre class="brush: js">window.alert("HI THERE")</pre>
</div>
<p>test</p>
""" % url_css)
# then see of the code sample view has successfully found the sample
response = self.client.get(reverse('wiki.code_sample',
args=[doc.slug, 'sample1'],
locale='en-US'))
eq_(response.status_code, 200)
normalized = normalize_html(response.content)
ok_(url_css in normalized)
# and then we try if a redirect by the file serving view redirects
# to the main file serving view
response = self.client.get(reverse('wiki.raw_code_sample_file',
args=[doc.slug,
'sample1',
attachment.id,
filename],
locale='en-US'))
eq_(response.status_code, 302)
eq_(response['Location'], attachment.get_file_url())
class DeferredRenderingViewTests(UserTestCase, WikiTestCase):
"""Tests for the deferred rendering system and interaction with views"""
localizing_client = True
def setUp(self):
super(DeferredRenderingViewTests, self).setUp()
self.rendered_content = 'HELLO RENDERED CONTENT'
self.raw_content = 'THIS IS RAW CONTENT'
self.d, self.r = doc_rev(self.raw_content)
# Disable TOC, makes content inspection easier.
self.r.toc_depth = 0
self.r.save()
self.d.html = self.raw_content
self.d.rendered_html = self.rendered_content
self.d.save()
self.url = reverse('wiki.document',
args=(self.d.slug,),
locale=self.d.locale)
config.KUMASCRIPT_TIMEOUT = 5.0
config.KUMASCRIPT_MAX_AGE = 600
def tearDown(self):
super(DeferredRenderingViewTests, self).tearDown()
config.KUMASCRIPT_TIMEOUT = 0
config.KUMASCRIPT_MAX_AGE = 0
@mock.patch('kuma.wiki.kumascript.get')
def test_rendered_content(self, mock_kumascript_get):
"""Document view should serve up rendered content when available"""
mock_kumascript_get.return_value = (self.rendered_content, None)
resp = self.client.get(self.url, follow=False)
p = pq(resp.content)
txt = p.find('#wikiArticle').text()
ok_(self.rendered_content in txt)
ok_(self.raw_content not in txt)
eq_(0, p.find('#doc-rendering-in-progress').length)
eq_(0, p.find('#doc-render-raw-fallback').length)
def test_rendering_in_progress_warning(self):
"""Document view should serve up rendered content when available"""
# Make the document look like there's a rendering in progress.
self.d.render_started_at = datetime.datetime.now()
self.d.save()
resp = self.client.get(self.url, follow=False)
p = pq(resp.content)
txt = p.find('#wikiArticle').text()
# Even though a rendering looks like it's in progress, ensure the
# last-known render is displayed.
ok_(self.rendered_content in txt)
ok_(self.raw_content not in txt)
eq_(0, p.find('#doc-rendering-in-progress').length)
# Only for logged-in users, ensure the render-in-progress warning is
# displayed.
self.client.login(username='testuser', password='testpass')
resp = self.client.get(self.url, follow=False)
p = pq(resp.content)
eq_(1, p.find('#doc-rendering-in-progress').length)
@mock.patch('kuma.wiki.kumascript.get')
def test_raw_content_during_initial_render(self, mock_kumascript_get):
"""Raw content should be displayed during a document's initial
deferred rendering"""
mock_kumascript_get.return_value = (self.rendered_content, None)
# Make the document look like there's no rendered content, but that a
# rendering is in progress.
self.d.html = self.raw_content
self.d.rendered_html = ''
self.d.render_started_at = datetime.datetime.now()
self.d.save()
# Now, ensure that raw content is shown in the view.
resp = self.client.get(self.url, follow=False)
p = pq(resp.content)
txt = p.find('#wikiArticle').text()
ok_(self.rendered_content not in txt)
ok_(self.raw_content in txt)
eq_(0, p.find('#doc-render-raw-fallback').length)
# Only for logged-in users, ensure that a warning is displayed about
# the fallback
self.client.login(username='testuser', password='testpass')
resp = self.client.get(self.url, follow=False)
p = pq(resp.content)
eq_(1, p.find('#doc-render-raw-fallback').length)
@attr('schedule_rendering')
@mock.patch.object(Document, 'schedule_rendering')
@mock.patch('kuma.wiki.kumascript.get')
def test_schedule_rendering(self, mock_kumascript_get,
mock_document_schedule_rendering):
mock_kumascript_get.return_value = (self.rendered_content, None)
self.client.login(username='testuser', password='testpass')
data = new_document_data()
data.update({
'form': 'rev',
'content': 'This is an update',
})
edit_url = reverse('wiki.edit', args=[self.d.slug])
resp = self.client.post(edit_url, data)
eq_(302, resp.status_code)
ok_(mock_document_schedule_rendering.called)
mock_document_schedule_rendering.reset_mock()
data.update({
'form': 'both',
'content': 'This is a translation',
})
translate_url = (reverse('wiki.translate', args=[data['slug']],
locale=settings.WIKI_DEFAULT_LANGUAGE) + '?tolocale=fr')
response = self.client.post(translate_url, data)
eq_(302, response.status_code)
ok_(mock_document_schedule_rendering.called)
@mock.patch('kuma.wiki.kumascript.get')
@mock.patch('requests.post')
def test_alternate_bleach_whitelist(self, mock_requests_post,
mock_kumascript_get):
# Some test content with contentious tags.
test_content = """
<p id="foo">
<a style="position: absolute; border: 1px;" href="http://example.com">This is a test</a>
<textarea name="foo"></textarea>
</p>
"""
# Expected result filtered through old/current Bleach rules
expected_content_old = """
<p id="foo">
<a style="position: absolute; border: 1px;" href="http://example.com">This is a test</a>
<textarea name="foo"></textarea>
</p>
"""
# Expected result filtered through alternate whitelist
expected_content_new = """
<p id="foo">
<a style="border: 1px;" href="http://example.com">This is a test</a>
<textarea name="foo"></textarea>
</p>
"""
# Set up an alternate set of whitelists...
config.BLEACH_ALLOWED_TAGS = json.dumps([
"a", "p"
])
config.BLEACH_ALLOWED_ATTRIBUTES = json.dumps({
"a": ['href', 'style'],
"p": ['id']
})
config.BLEACH_ALLOWED_STYLES = json.dumps([
"border"
])
config.KUMASCRIPT_TIMEOUT = 100
# Rig up a mocked response from KumaScript GET method
mock_kumascript_get.return_value = (test_content, None)
# Rig up a mocked response from KumaScript POST service
# Digging a little deeper into the stack, so that the rest of
# kumascript.post processing happens.
from StringIO import StringIO
m_resp = mock.Mock()
m_resp.status_code = 200
m_resp.text = test_content
m_resp.read = StringIO(test_content).read
mock_requests_post.return_value = m_resp
d, r = doc_rev(test_content)
trials = (
(False, '', expected_content_old),
(False, '&bleach_new', expected_content_old),
(True, '', expected_content_old),
(True, '&bleach_new', expected_content_new),
)
for trial in trials:
do_login, param, expected = trial
if do_login:
self.client.login(username='testuser', password='testpass')
else:
self.client.logout()
url = ('%s?raw¯os%s' % (
reverse('wiki.document', args=(d.slug,), locale=d.locale),
param))
resp = self.client.get(url, follow=True)
eq_(normalize_html(expected),
normalize_html(resp.content),
"Should match? %s %s %s %s" %
(do_login, param, expected, resp.content))
class APITests(UserTestCase, WikiTestCase):
localizing_client = True
def setUp(self):
super(APITests, self).setUp()
self.username = 'tester23'
self.password = 'trustno1'
self.email = '[email protected]'
self.user = user(username=self.username,
email=self.email,
password=self.password,
save=True)
self.key = Key(user=self.user, description='Test Key 1')
self.secret = self.key.generate_secret()
self.key_id = self.key.key
self.key.save()
auth = '%s:%s' % (self.key_id, self.secret)
self.basic_auth = 'Basic %s' % base64.encodestring(auth)
self.d, self.r = doc_rev("""
<h3 id="S1">Section 1</h3>
<p>This is a page. Deal with it.</p>
<h3 id="S2">Section 2</h3>
<p>This is a page. Deal with it.</p>
<h3 id="S3">Section 3</h3>
<p>This is a page. Deal with it.</p>
""")
self.r.tags = "foo, bar, baz"
self.r.review_tags.set('technical', 'editorial')
self.url = self.d.get_absolute_url()
def tearDown(self):
super(APITests, self).tearDown()
Document.objects.filter(current_revision__creator=self.user).delete()
Revision.objects.filter(creator=self.user).delete()
Key.objects.filter(user=self.user).delete()
self.user.delete()
def test_put_existing(self):
"""PUT API should allow overwrite of existing document content"""
data = dict(
summary="Look, I made an edit!",
content="""
<p>This is an edit to the page. We've dealt with it.</p>
""",
)
# No auth key leads to a 403 Forbidden
resp = self._put(self.url, data)
eq_(403, resp.status_code)
# But, this should work, given a proper auth key
resp = self._put(self.url, data,
HTTP_AUTHORIZATION=self.basic_auth)
eq_(205, resp.status_code)
# Verify the edit happened.
curr_d = Document.objects.get(pk=self.d.pk)
eq_(normalize_html(data['content'].strip()),
normalize_html(Document.objects.get(pk=self.d.pk).html))
# Also, verify that this resulted in a new revision.
curr_r = curr_d.current_revision
ok_(self.r.pk != curr_r.pk)
eq_(data['summary'], curr_r.summary)
r_tags = ','.join(sorted(t.name for t in curr_r.review_tags.all()))
eq_('editorial,technical', r_tags)
def test_put_section_edit(self):
"""PUT API should allow overwrite of a specific section of an existing
document"""
data = dict(
content="""
<h3 id="S2">Section 2</h3>
<p>This is an edit to the page. We've dealt with it.</p>
""",
# Along with the section, let's piggyback in some other metadata
# edits just for good measure. They're not tied to section edit
# though.
title="Hahah this is a new title!",
tags="hello,quux,xyzzy",
review_tags="technical",
)
resp = self._put('%s?section=S2' % self.url, data,
HTTP_AUTHORIZATION=self.basic_auth)
eq_(205, resp.status_code)
expected = """
<h3 id="S1">Section 1</h3>
<p>This is a page. Deal with it.</p>
<h3 id="S2">Section 2</h3>
<p>This is an edit to the page. We've dealt with it.</p>
<h3 id="S3">Section 3</h3>
<p>This is a page. Deal with it.</p>
"""
# Verify the section edit happened.
curr_d = Document.objects.get(pk=self.d.pk)
eq_(normalize_html(expected.strip()),
normalize_html(curr_d.html))
eq_(data['title'], curr_d.title)
d_tags = ','.join(sorted(t.name for t in curr_d.tags.all()))
eq_(data['tags'], d_tags)
# Also, verify that this resulted in a new revision.
curr_r = curr_d.current_revision
ok_(self.r.pk != curr_r.pk)
r_tags = ','.join(sorted(t.name for t in curr_r.review_tags.all()))
eq_(data['review_tags'], r_tags)
def test_put_new_root(self):
"""PUT API should allow creation of a document whose path would place
it at the root of the topic hierarchy."""
slug = 'new-root-doc'
url = reverse('wiki.document', args=(slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
data = dict(
title="This is the title of a new page",
content="""
<p>This is a new page, hooray!</p>
""",
tags="hello,quux,xyzzy",
review_tags="technical",
)
resp = self._put(url, data,
HTTP_AUTHORIZATION=self.basic_auth)
eq_(201, resp.status_code)
def test_put_new_child(self):
"""PUT API should allow creation of a document whose path would make it
a child of an existing parent."""
data = dict(
title="This is the title of a new page",
content="""
<p>This is a new page, hooray!</p>
""",
tags="hello,quux,xyzzy",
review_tags="technical",
)
# This first attempt should fail; the proposed parent does not exist.
url = '%s/nonexistent/newchild' % self.url
resp = self._put(url, data,
HTTP_AUTHORIZATION=self.basic_auth)
eq_(404, resp.status_code)
# TODO: I suppose we could rework this part to create the chain of
# missing parents with stub content, but currently this demands
# that API users do that themselves.
# Now, fill in the parent gap...
p_doc = document(slug='%s/nonexistent' % self.d.slug,
locale=settings.WIKI_DEFAULT_LANGUAGE,
parent_topic=self.d)
p_doc.save()
p_rev = revision(document=p_doc,
slug='%s/nonexistent' % self.d.slug,
title='I EXIST NOW', save=True)
p_rev.save()
# The creation should work, now.
resp = self._put(url, data,
HTTP_AUTHORIZATION=self.basic_auth)
eq_(201, resp.status_code)
new_slug = '%s/nonexistent/newchild' % self.d.slug
new_doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=new_slug)
eq_(p_doc.pk, new_doc.parent_topic.pk)
def test_put_unsupported_content_type(self):
"""PUT API should complain with a 400 Bad Request on an unsupported
content type submission"""
slug = 'new-root-doc'
url = reverse('wiki.document', args=(slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
data = "I don't even know what this content is."
resp = self._put(url, json.dumps(data),
content_type='x-super-happy-fun-text',
HTTP_AUTHORIZATION=self.basic_auth)
eq_(400, resp.status_code)
def test_put_json(self):
"""PUT API should handle application/json requests"""
slug = 'new-root-json-doc'
url = reverse('wiki.document', args=(slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
data = dict(
title="This is the title of a new page",
content="""
<p>This is a new page, hooray!</p>
""",
tags="hello,quux,xyzzy",
review_tags="technical",
)
resp = self._put(url, json.dumps(data),
content_type='application/json',
HTTP_AUTHORIZATION=self.basic_auth)
eq_(201, resp.status_code)
new_doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=slug)
eq_(data['title'], new_doc.title)
eq_(normalize_html(data['content']), normalize_html(new_doc.html))
def test_put_simple_html(self):
"""PUT API should handle text/html requests"""
slug = 'new-root-html-doc-1'
url = reverse('wiki.document', args=(slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
html = """
<p>This is a new page, hooray!</p>
"""
resp = self._put(url, html, content_type='text/html',
HTTP_AUTHORIZATION=self.basic_auth)
eq_(201, resp.status_code)
new_doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=slug)
eq_(normalize_html(html), normalize_html(new_doc.html))
def test_put_complex_html(self):
"""PUT API should handle text/html requests with complex HTML documents
and extract document fields from the markup"""
slug = 'new-root-html-doc-2'
url = reverse('wiki.document', args=(slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
data = dict(
title='This is a complex document',
content="""
<p>This is a new page, hooray!</p>
""",
)
html = """
<html>
<head>
<title>%(title)s</title>
</head>
<body>%(content)s</body>
</html>
""" % data
resp = self._put(url, html, content_type='text/html',
HTTP_AUTHORIZATION=self.basic_auth)
eq_(201, resp.status_code)
new_doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=slug)
eq_(data['title'], new_doc.title)
eq_(normalize_html(data['content']), normalize_html(new_doc.html))
# TODO: Anything else useful to extract from HTML?
# Extract tags from head metadata?
def test_put_track_authkey(self):
"""Revisions modified by PUT API should track the auth key used"""
slug = 'new-root-doc'
url = reverse('wiki.document', args=(slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
data = dict(
title="This is the title of a new page",
content="""
<p>This is a new page, hooray!</p>
""",
tags="hello,quux,xyzzy",
review_tags="technical",
)
resp = self._put(url, data, HTTP_AUTHORIZATION=self.basic_auth)
eq_(201, resp.status_code)
last_log = self.key.history.order_by('-pk').all()[0]
eq_('created', last_log.action)
data['title'] = 'New title for old page'
resp = self._put(url, data, HTTP_AUTHORIZATION=self.basic_auth)
eq_(205, resp.status_code)
last_log = self.key.history.order_by('-pk').all()[0]
eq_('updated', last_log.action)
def test_put_etag_conflict(self):
"""A PUT request with an if-match header throws a 412 Precondition
Failed if the underlying document has been changed."""
resp = self.client.get(self.url)
orig_etag = resp['ETag']
content1 = """
<h2 id="s1">Section 1</h2>
<p>New section 1</p>
<h2 id="s2">Section 2</h2>
<p>New section 2</p>
"""
# First update should work.
resp = self._put(self.url, dict(content=content1),
HTTP_IF_MATCH=orig_etag,
HTTP_AUTHORIZATION=self.basic_auth)
eq_(205, resp.status_code)
# Get the new etag, ensure it doesn't match the original.
resp = self.client.get(self.url)
new_etag = resp['ETag']
ok_(orig_etag != new_etag)
# But, the ETag should have changed, so this update shouldn't work.
# Using the old ETag suggests a mid-air edit collision happened.
resp = self._put(self.url, dict(content=content1),
HTTP_IF_MATCH=orig_etag,
HTTP_AUTHORIZATION=self.basic_auth)
eq_(412, resp.status_code)
# Just for good measure, switching to the new ETag should work
resp = self._put(self.url, dict(content=content1),
HTTP_IF_MATCH=new_etag,
HTTP_AUTHORIZATION=self.basic_auth)
eq_(205, resp.status_code)
def _put(self, path, data={}, content_type=MULTIPART_CONTENT,
follow=False, **extra):
"""django.test.client.put() does the wrong thing, here. This does
better, based on post()."""
if content_type is MULTIPART_CONTENT:
post_data = encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
post_data = smart_str(data, encoding=charset)
parsed = urlparse(path)
params = {
'CONTENT_LENGTH': len(post_data),
'CONTENT_TYPE': content_type,
'PATH_INFO': self.client._get_path(parsed),
'QUERY_STRING': parsed[4],
'REQUEST_METHOD': 'PUT',
'wsgi.input': FakePayload(post_data),
}
params.update(extra)
response = self.client.request(**params)
if follow:
response = self.client._handle_redirects(response, **extra)
return response
class PageMoveTests(UserTestCase, WikiTestCase):
localizing_client = True
def setUp(self):
super(PageMoveTests, self).setUp()
page_move_flag = Flag.objects.create(name='page_move')
page_move_flag.users = self.user_model.objects.filter(is_superuser=True)
page_move_flag.save()
def test_move_conflict(self):
parent = revision(title='Test page move views',
slug='test-page-move-views',
is_approved=True,
save=True)
parent_doc = parent.document
child = revision(title='Child of page-move view test',
slug='page-move/test-views',
is_approved=True,
save=True)
child_doc = child.document
child_doc.parent_topic = parent.document
child_doc.save()
revision(title='Conflict for page-move view',
slug='moved/test-page-move-views/test-views',
is_approved=True,
save=True)
data = {'slug': 'moved/test-page-move-views'}
self.client.login(username='admin', password='testpass')
resp = self.client.post(reverse('wiki.move',
args=(parent_doc.slug,),
locale=parent_doc.locale),
data=data)
eq_(200, resp.status_code)
class DocumentZoneTests(UserTestCase, WikiTestCase):
localizing_client = True
def setUp(self):
super(DocumentZoneTests, self).setUp()
root_rev = revision(title='ZoneRoot', slug='ZoneRoot',
content='This is the Zone Root',
is_approved=True, save=True)
self.root_doc = root_rev.document
middle_rev = revision(title='middlePage', slug='middlePage',
content='This is a middlepage',
is_approved=True, save=True)
self.middle_doc = middle_rev.document
self.middle_doc.parent_topic = self.root_doc
self.middle_doc.save()
sub_rev = revision(title='SubPage', slug='SubPage',
content='This is a subpage',
is_approved=True, save=True)
self.sub_doc = sub_rev.document
self.sub_doc.parent_topic = self.middle_doc
self.sub_doc.save()
self.root_zone = DocumentZone(document=self.root_doc)
self.root_zone.styles = """
article { color: blue; }
"""
self.root_zone.save()
self.middle_zone = DocumentZone(document=self.middle_doc)
self.middle_zone.styles = """
article { font-weight: bold; }
"""
self.middle_zone.save()
def test_zone_styles(self):
"""Ensure CSS styles for a zone can be fetched"""
url = reverse('wiki.styles', args=(self.root_doc.slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.get(url, follow=True)
eq_(self.root_zone.styles, response.content)
url = reverse('wiki.styles', args=(self.middle_doc.slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.get(url, follow=True)
eq_(self.middle_zone.styles, response.content)
url = reverse('wiki.styles', args=(self.sub_doc.slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.get(url, follow=True)
eq_(404, response.status_code)
def test_zone_styles_links(self):
"""Ensure link to zone style appears in child document views"""
url = reverse('wiki.document', args=(self.sub_doc.slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.get(url, follow=True)
styles_url = reverse('wiki.styles', args=(self.root_doc.slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
root_expected = ('<link rel="stylesheet" type="text/css" href="%s"' %
styles_url)
ok_(root_expected in response.content)
styles_url = reverse('wiki.styles', args=(self.middle_doc.slug,),
locale=settings.WIKI_DEFAULT_LANGUAGE)
middle_expected = ('<link rel="stylesheet" type="text/css" href="%s"' %
styles_url)
ok_(middle_expected in response.content)
class ListDocumentTests(UserTestCase, WikiTestCase):
"""Tests for list_documents view"""
localizing_client = True
fixtures = UserTestCase.fixtures + ['wiki/documents.json']
def test_case_insensitive_tags(self):
"""
Bug 976071 - Tags should be case insensitive
https://bugzil.la/976071
"""
lower_tag = DocumentTag.objects.create(name='foo', slug='foo')
lower_tag.save()
doc = Document.objects.get(pk=1)
doc.tags.set(lower_tag)
response = self.client.get(reverse('wiki.tag', args=['foo']))
ok_(doc.slug in response.content.decode('utf-8'))
response = self.client.get(reverse('wiki.tag', args=['Foo']))
ok_(doc.slug in response.content.decode('utf-8'))
| mpl-2.0 |
dougbeal/gyp | test/win/gyptest-link-defrelink.py | 210 | 1683 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure a relink is performed when a .def file is touched.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
target = 'test_deffile_dll_ok'
def_contents = test.read('linker-flags/deffile.def')
# This first build makes sure everything is up to date.
test.run_gyp('deffile.gyp', chdir=CHDIR)
test.build('deffile.gyp', target, chdir=CHDIR)
test.up_to_date('deffile.gyp', target, chdir=CHDIR)
def HasExport(binary, export):
full_path = test.built_file_path(binary, chdir=CHDIR)
output = test.run_dumpbin('/exports', full_path)
return export in output
# Verify that only one function is exported.
if not HasExport('test_deffile_dll_ok.dll', 'AnExportedFunction'):
test.fail_test()
if HasExport('test_deffile_dll_ok.dll', 'AnotherExportedFunction'):
test.fail_test()
# Add AnotherExportedFunction to the def file, then rebuild. If it doesn't
# relink the DLL, then the subsequent check for AnotherExportedFunction will
# fail.
new_def_contents = def_contents + "\n AnotherExportedFunction"
test.write('linker-flags/deffile.def', new_def_contents)
test.build('deffile.gyp', target, chdir=CHDIR)
test.up_to_date('deffile.gyp', target, chdir=CHDIR)
if not HasExport('test_deffile_dll_ok.dll', 'AnExportedFunction'):
test.fail_test()
if not HasExport('test_deffile_dll_ok.dll', 'AnotherExportedFunction'):
test.fail_test()
test.pass_test()
| bsd-3-clause |
odoomrp/odoomrp-wip | crm_claim_links/models/res_partner.py | 31 | 1096 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Daniel Campos ([email protected]) Date: 26/08/2014
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import fields, models
class ResPartner(models.Model):
_inherit = 'res.partner'
claims = fields.One2many('crm.claim', 'partner_id', string='Claims')
| agpl-3.0 |
Preetwinder/scrapy | tests/test_spidermiddleware_depth.py | 136 | 1348 | from unittest import TestCase
from scrapy.spidermiddlewares.depth import DepthMiddleware
from scrapy.http import Response, Request
from scrapy.spiders import Spider
from scrapy.statscollectors import StatsCollector
from scrapy.utils.test import get_crawler
class TestDepthMiddleware(TestCase):
def setUp(self):
crawler = get_crawler(Spider)
self.spider = crawler._create_spider('scrapytest.org')
self.stats = StatsCollector(crawler)
self.stats.open_spider(self.spider)
self.mw = DepthMiddleware(1, self.stats, True)
def test_process_spider_output(self):
req = Request('http://scrapytest.org')
resp = Response('http://scrapytest.org')
resp.request = req
result = [Request('http://scrapytest.org')]
out = list(self.mw.process_spider_output(resp, result, self.spider))
self.assertEquals(out, result)
rdc = self.stats.get_value('request_depth_count/1', spider=self.spider)
self.assertEquals(rdc, 1)
req.meta['depth'] = 1
out2 = list(self.mw.process_spider_output(resp, result, self.spider))
self.assertEquals(out2, [])
rdm = self.stats.get_value('request_depth_max', spider=self.spider)
self.assertEquals(rdm, 1)
def tearDown(self):
self.stats.close_spider(self.spider, '')
| bsd-3-clause |
Bladefidz/wfuzz | plugins/iterations.py | 1 | 2703 | from externals.moduleman.plugin import moduleman_plugin
import itertools
class piterator_void:
text="void"
def count(self):
return self.__count
def __init__(self, *i):
self._dic = i
self.__count = max(map(lambda x:x.count(), i))
self.it = self._dic[0]
def next(self):
return (self.it.next(),)
def restart(self):
for dic in self._dic:
dic.restart()
self.it = self._dic[0]
def __iter__(self):
self.restart()
return self
@moduleman_plugin("restart", "count", "next", "__iter__")
class zip:
name = "zip"
description = "Returns an iterator that aggregates elements from each of the iterables."
category = ["default"]
priority = 99
def __init__(self, *i):
self._dic = i
self.it = itertools.izip(*self._dic)
self.__count = min(map(lambda x:x.count(), i)) # Only possible match counted.
def count(self):
return self.__count
def restart(self):
for dic in self._dic:
dic.restart()
self.it = itertools.izip.__init__(self, *self._dic)
def next(self):
return self.it.next()
def __iter__(self):
self.restart()
return self
@moduleman_plugin("restart", "count", "next", "__iter__")
class product:
name = "product"
description = "Returns an iterator cartesian product of input iterables."
category = ["default"]
priority = 99
def __init__(self, *i):
self._dic = i
self.it = itertools.product(*self._dic)
self.__count = reduce(lambda x,y:x*y.count(), i[1:], i[0].count())
def restart(self):
for dic in self._dic:
dic.restart()
self.it = itertools.product(*self._dic)
def count(self):
return self.__count
def next(self):
return self.it.next()
def __iter__(self):
self.restart()
return self
@moduleman_plugin("restart", "count", "next", "__iter__")
class chain:
name = "chain"
description = "Returns an iterator returns elements from the first iterable until it is exhausted, then proceeds to the next iterable, until all of the iterables are exhausted."
category = ["default"]
priority = 99
def count(self):
return self.__count
def __init__(self, *i):
self.__count = sum(map(lambda x:x.count(), i))
self._dic = i
self.it = itertools.chain(*i)
def restart(self):
for dic in self._dic:
dic.restart()
self.it = itertools.chain(*self._dic)
def next(self):
return (self.it.next(),)
def __iter__(self):
self.restart()
return self
| gpl-2.0 |
bcroq/kansha | kansha/card_addons/label/tests.py | 2 | 1553 | # -*- coding:utf-8 -*-
#--
# Copyright (c) 2012-2014 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
#--
from kansha.cardextension.tests import CardExtensionTestCase
from .comp import CardLabels
class CardLabelsTest(CardExtensionTestCase):
extension_name = 'labels'
extension_class = CardLabels
def test_activate(self):
self.assertTrue(self.extension.get_available_labels() > 0)
self.assertEqual(len(self.extension.labels), 0)
label = self.extension.get_available_labels()[0]
self.extension.activate(label)
self.assertIn(label, self.extension.labels)
self.extension.activate(label)
self.assertNotIn(label, self.extension.labels)
def test_copy(self):
labels = self.extension.get_available_labels()
for label in labels:
self.extension.activate(label)
cpy = self.extension_copy
labels2 = zip(self.extension.labels, cpy.labels)
for labela, labelb in labels2:
assert(labela.get_title() == labelb.get_title())
def test_update_document(self):
doc = self.card.schema(docid=None)
label = self.extension.get_available_labels()[0]
self.extension.activate(label)
label = self.extension.get_available_labels()[1]
self.extension.activate(label)
self.extension.update_document(doc)
self.assertEqual(doc.labels, u'Green Red')
| bsd-3-clause |
ojake/django | django/contrib/gis/db/backends/oracle/introspection.py | 539 | 1977 | import sys
import cx_Oracle
from django.db.backends.oracle.introspection import DatabaseIntrospection
from django.utils import six
class OracleIntrospection(DatabaseIntrospection):
# Associating any OBJECTVAR instances with GeometryField. Of course,
# this won't work right on Oracle objects that aren't MDSYS.SDO_GEOMETRY,
# but it is the only object type supported within Django anyways.
data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
data_types_reverse[cx_Oracle.OBJECT] = 'GeometryField'
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying USER_SDO_GEOM_METADATA to get the SRID and dimension information.
try:
cursor.execute(
'SELECT "DIMINFO", "SRID" FROM "USER_SDO_GEOM_METADATA" '
'WHERE "TABLE_NAME"=%s AND "COLUMN_NAME"=%s',
(table_name.upper(), geo_col.upper())
)
row = cursor.fetchone()
except Exception as msg:
new_msg = (
'Could not find entry in USER_SDO_GEOM_METADATA '
'corresponding to "%s"."%s"\n'
'Error message: %s.') % (table_name, geo_col, msg)
six.reraise(Exception, Exception(new_msg), sys.exc_info()[2])
# TODO: Research way to find a more specific geometry field type for
# the column's contents.
field_type = 'GeometryField'
# Getting the field parameters.
field_params = {}
dim, srid = row
if srid != 4326:
field_params['srid'] = srid
# Length of object array ( SDO_DIM_ARRAY ) is number of dimensions.
dim = len(dim)
if dim != 2:
field_params['dim'] = dim
finally:
cursor.close()
return field_type, field_params
| bsd-3-clause |
bowang/tensorflow | tensorflow/python/keras/_impl/keras/applications/inception_v3_test.py | 34 | 2148 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Inception V3 application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras._impl import keras
from tensorflow.python.platform import test
class InceptionV3Test(test.TestCase):
def test_with_top(self):
model = keras.applications.InceptionV3(weights=None)
self.assertEqual(model.output_shape, (None, 1000))
def test_no_top(self):
model = keras.applications.InceptionV3(weights=None, include_top=False)
self.assertEqual(model.output_shape, (None, None, None, 2048))
def test_with_pooling(self):
model = keras.applications.InceptionV3(weights=None,
include_top=False,
pooling='avg')
self.assertEqual(model.output_shape, (None, 2048))
def test_weight_loading(self):
with self.assertRaises(ValueError):
keras.applications.InceptionV3(weights='unknown',
include_top=False)
with self.assertRaises(ValueError):
keras.applications.InceptionV3(weights='imagenet',
classes=2000)
def test_preprocess_input(self):
x = np.random.uniform(0, 255, (2, 300, 200, 3))
out1 = keras.applications.inception_v3.preprocess_input(x)
self.assertAllClose(np.mean(out1), 0., atol=0.1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
kuiwei/edx-platform | common/djangoapps/student/helpers.py | 6 | 3995 | """Helpers for the student app. """
import time
from django.utils.http import cookie_date
from django.conf import settings
from django.core.urlresolvers import reverse
from opaque_keys.edx.keys import CourseKey
from course_modes.models import CourseMode
from third_party_auth import ( # pylint: disable=W0611
pipeline, provider,
is_enabled as third_party_auth_enabled
)
def auth_pipeline_urls(auth_entry, redirect_url=None, course_id=None):
"""Retrieve URLs for each enabled third-party auth provider.
These URLs are used on the "sign up" and "sign in" buttons
on the login/registration forms to allow users to begin
authentication with a third-party provider.
Optionally, we can redirect the user to an arbitrary
url after auth completes successfully. We use this
to redirect the user to a page that required login,
or to send users to the payment flow when enrolling
in a course.
Args:
auth_entry (string): Either `pipeline.AUTH_ENTRY_LOGIN` or `pipeline.AUTH_ENTRY_REGISTER`
Keyword Args:
redirect_url (unicode): If provided, send users to this URL
after they successfully authenticate.
course_id (unicode): The ID of the course the user is enrolling in.
We use this to send users to the track selection page
if the course has a payment option.
Note that `redirect_url` takes precedence over the redirect
to the track selection page.
Returns:
dict mapping provider names to URLs
"""
if not third_party_auth_enabled():
return {}
if redirect_url is not None:
pipeline_redirect = redirect_url
elif course_id is not None:
# If the course is white-label (paid), then we send users
# to the shopping cart. (There is a third party auth pipeline
# step that will add the course to the cart.)
if CourseMode.is_white_label(CourseKey.from_string(course_id)):
pipeline_redirect = reverse("shoppingcart.views.show_cart")
# Otherwise, send the user to the track selection page.
# The track selection page may redirect the user to the dashboard
# (if the only available mode is honor), or directly to verification
# (for professional ed).
else:
pipeline_redirect = reverse(
"course_modes_choose",
kwargs={'course_id': unicode(course_id)}
)
else:
pipeline_redirect = None
return {
provider.NAME: pipeline.get_login_url(
provider.NAME, auth_entry,
enroll_course_id=course_id,
redirect_url=pipeline_redirect
)
for provider in provider.Registry.enabled()
}
def set_logged_in_cookie(request, response):
"""Set a cookie indicating that the user is logged in.
Some installations have an external marketing site configured
that displays a different UI when the user is logged in
(e.g. a link to the student dashboard instead of to the login page)
Arguments:
request (HttpRequest): The request to the view, used to calculate
the cookie's expiration date based on the session expiration date.
response (HttpResponse): The response on which the cookie will be set.
Returns:
HttpResponse
"""
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
response.set_cookie(
settings.EDXMKTG_COOKIE_NAME, 'true', max_age=max_age,
expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,
path='/', secure=None, httponly=None,
)
return response
def is_logged_in_cookie_set(request):
"""Check whether the request has the logged in cookie set. """
return settings.EDXMKTG_COOKIE_NAME in request.COOKIES
| agpl-3.0 |
Curso-OpenShift/Formulario | OverFlow/ProjectFormulario/env/lib/python2.7/site-packages/setuptools/lib2to3_ex.py | 907 | 1998 | """
Customized Mixin2to3 support:
- adds support for converting doctests
This module raises an ImportError on Python 2.
"""
from distutils.util import Mixin2to3 as _Mixin2to3
from distutils import log
from lib2to3.refactor import RefactoringTool, get_fixers_from_package
import setuptools
class DistutilsRefactoringTool(RefactoringTool):
def log_error(self, msg, *args, **kw):
log.error(msg, *args)
def log_message(self, msg, *args):
log.info(msg, *args)
def log_debug(self, msg, *args):
log.debug(msg, *args)
class Mixin2to3(_Mixin2to3):
def run_2to3(self, files, doctests = False):
# See of the distribution option has been set, otherwise check the
# setuptools default.
if self.distribution.use_2to3 is not True:
return
if not files:
return
log.info("Fixing "+" ".join(files))
self.__build_fixer_names()
self.__exclude_fixers()
if doctests:
if setuptools.run_2to3_on_doctests:
r = DistutilsRefactoringTool(self.fixer_names)
r.refactor(files, write=True, doctests_only=True)
else:
_Mixin2to3.run_2to3(self, files)
def __build_fixer_names(self):
if self.fixer_names: return
self.fixer_names = []
for p in setuptools.lib2to3_fixer_packages:
self.fixer_names.extend(get_fixers_from_package(p))
if self.distribution.use_2to3_fixers is not None:
for p in self.distribution.use_2to3_fixers:
self.fixer_names.extend(get_fixers_from_package(p))
def __exclude_fixers(self):
excluded_fixers = getattr(self, 'exclude_fixers', [])
if self.distribution.use_2to3_exclude_fixers is not None:
excluded_fixers.extend(self.distribution.use_2to3_exclude_fixers)
for fixer_name in excluded_fixers:
if fixer_name in self.fixer_names:
self.fixer_names.remove(fixer_name)
| gpl-3.0 |
spreg-git/pysal | pysal/esda/tests/test_getisord.py | 14 | 1952 | import unittest
from pysal.weights.Distance import DistanceBand
from pysal.esda import getisord
import numpy as np
POINTS = [(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
W = DistanceBand(POINTS, threshold=15)
Y = np.array([2, 3, 3.2, 5, 8, 7])
class G_Tester(unittest.TestCase):
def setUp(self):
self.w = W
self.y = Y
np.random.seed(10)
def test_G(self):
g = getisord.G(self.y, self.w)
self.assertAlmostEquals(g.G, 0.55709779, places=8)
self.assertAlmostEquals(g.p_norm, 0.1729, places=4)
class G_Local_Tester(unittest.TestCase):
def setUp(self):
self.w = W
self.y = Y
np.random.seed(10)
def test_G_Local_Binary(self):
lg = getisord.G_Local(self.y, self.w, transform='B')
self.assertAlmostEquals(lg.Zs[0], -1.0136729, places=7)
self.assertAlmostEquals(lg.p_sim[0], 0.10100000000000001, places=7)
def test_G_Local_Row_Standardized(self):
lg = getisord.G_Local(self.y, self.w, transform='R')
self.assertAlmostEquals(lg.Zs[0], -0.62074534, places=7)
self.assertAlmostEquals(lg.p_sim[0], 0.10100000000000001, places=7)
def test_G_star_Local_Binary(self):
lg = getisord.G_Local(self.y, self.w, transform='B', star=True)
self.assertAlmostEquals(lg.Zs[0], -1.39727626, places=8)
self.assertAlmostEquals(lg.p_sim[0], 0.10100000000000001, places=7)
def test_G_star_Row_Standardized(self):
lg = getisord.G_Local(self.y, self.w, transform='R', star=True)
self.assertAlmostEquals(lg.Zs[0], -0.62488094, places=8)
self.assertAlmostEquals(lg.p_sim[0], 0.10100000000000001, places=7)
suite = unittest.TestSuite()
test_classes = [G_Tester, G_Local_Tester]
for i in test_classes:
a = unittest.TestLoader().loadTestsFromTestCase(i)
suite.addTest(a)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
| bsd-3-clause |
imsparsh/python-for-android | python-modules/twisted/twisted/test/test_strcred.py | 56 | 21750 | # Copyright (c) 2007-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.cred.strcred}.
"""
import os
import StringIO
from twisted import plugin
from twisted.trial import unittest
from twisted.cred import credentials, checkers, error, strcred
from twisted.plugins import cred_file, cred_anonymous
from twisted.python import usage
from twisted.python.filepath import FilePath
from twisted.python.fakepwd import UserDatabase
try:
import crypt
except ImportError:
crypt = None
try:
import pwd
except ImportError:
pwd = None
try:
import spwd
except ImportError:
spwd = None
def getInvalidAuthType():
"""
Helper method to produce an auth type that doesn't exist.
"""
invalidAuthType = 'ThisPluginDoesNotExist'
while (invalidAuthType in
[factory.authType for factory in strcred.findCheckerFactories()]):
invalidAuthType += '_'
return invalidAuthType
class TestPublicAPI(unittest.TestCase):
def test_emptyDescription(self):
"""
Test that the description string cannot be empty.
"""
iat = getInvalidAuthType()
self.assertRaises(strcred.InvalidAuthType, strcred.makeChecker, iat)
self.assertRaises(strcred.InvalidAuthType, strcred.findCheckerFactory, iat)
def test_invalidAuthType(self):
"""
Test that an unrecognized auth type raises an exception.
"""
iat = getInvalidAuthType()
self.assertRaises(strcred.InvalidAuthType, strcred.makeChecker, iat)
self.assertRaises(strcred.InvalidAuthType, strcred.findCheckerFactory, iat)
class TestStrcredFunctions(unittest.TestCase):
def test_findCheckerFactories(self):
"""
Test that findCheckerFactories returns all available plugins.
"""
availablePlugins = list(strcred.findCheckerFactories())
for plg in plugin.getPlugins(strcred.ICheckerFactory):
self.assertIn(plg, availablePlugins)
def test_findCheckerFactory(self):
"""
Test that findCheckerFactory returns the first plugin
available for a given authentication type.
"""
self.assertIdentical(strcred.findCheckerFactory('file'),
cred_file.theFileCheckerFactory)
class TestMemoryChecker(unittest.TestCase):
def setUp(self):
self.admin = credentials.UsernamePassword('admin', 'asdf')
self.alice = credentials.UsernamePassword('alice', 'foo')
self.badPass = credentials.UsernamePassword('alice', 'foobar')
self.badUser = credentials.UsernamePassword('x', 'yz')
self.checker = strcred.makeChecker('memory:admin:asdf:alice:foo')
def test_isChecker(self):
"""
Verifies that strcred.makeChecker('memory') returns an object
that implements the L{ICredentialsChecker} interface.
"""
self.assertTrue(checkers.ICredentialsChecker.providedBy(self.checker))
self.assertIn(credentials.IUsernamePassword,
self.checker.credentialInterfaces)
def test_badFormatArgString(self):
"""
Test that an argument string which does not contain user:pass
pairs (i.e., an odd number of ':' characters) raises an exception.
"""
self.assertRaises(strcred.InvalidAuthArgumentString,
strcred.makeChecker, 'memory:a:b:c')
def test_memoryCheckerSucceeds(self):
"""
Test that the checker works with valid credentials.
"""
def _gotAvatar(username):
self.assertEquals(username, self.admin.username)
return (self.checker
.requestAvatarId(self.admin)
.addCallback(_gotAvatar))
def test_memoryCheckerFailsUsername(self):
"""
Test that the checker fails with an invalid username.
"""
return self.assertFailure(self.checker.requestAvatarId(self.badUser),
error.UnauthorizedLogin)
def test_memoryCheckerFailsPassword(self):
"""
Test that the checker fails with an invalid password.
"""
return self.assertFailure(self.checker.requestAvatarId(self.badPass),
error.UnauthorizedLogin)
class TestAnonymousChecker(unittest.TestCase):
def test_isChecker(self):
"""
Verifies that strcred.makeChecker('anonymous') returns an object
that implements the L{ICredentialsChecker} interface.
"""
checker = strcred.makeChecker('anonymous')
self.assertTrue(checkers.ICredentialsChecker.providedBy(checker))
self.assertIn(credentials.IAnonymous, checker.credentialInterfaces)
def testAnonymousAccessSucceeds(self):
"""
Test that we can log in anonymously using this checker.
"""
checker = strcred.makeChecker('anonymous')
request = checker.requestAvatarId(credentials.Anonymous())
def _gotAvatar(avatar):
self.assertIdentical(checkers.ANONYMOUS, avatar)
return request.addCallback(_gotAvatar)
class TestUnixChecker(unittest.TestCase):
users = {
'admin': 'asdf',
'alice': 'foo',
}
def _spwd(self, username):
return (username, crypt.crypt(self.users[username], 'F/'),
0, 0, 99999, 7, -1, -1, -1)
def setUp(self):
self.admin = credentials.UsernamePassword('admin', 'asdf')
self.alice = credentials.UsernamePassword('alice', 'foo')
self.badPass = credentials.UsernamePassword('alice', 'foobar')
self.badUser = credentials.UsernamePassword('x', 'yz')
self.checker = strcred.makeChecker('unix')
# Hack around the pwd and spwd modules, since we can't really
# go about reading your /etc/passwd or /etc/shadow files
if pwd:
database = UserDatabase()
for username, password in self.users.items():
database.addUser(
username, crypt.crypt(password, 'F/'),
1000, 1000, username, '/home/' + username, '/bin/sh')
self.patch(pwd, 'getpwnam', database.getpwnam)
if spwd:
self._spwd_getspnam = spwd.getspnam
spwd.getspnam = self._spwd
def tearDown(self):
if spwd:
spwd.getspnam = self._spwd_getspnam
def test_isChecker(self):
"""
Verifies that strcred.makeChecker('unix') returns an object
that implements the L{ICredentialsChecker} interface.
"""
self.assertTrue(checkers.ICredentialsChecker.providedBy(self.checker))
self.assertIn(credentials.IUsernamePassword,
self.checker.credentialInterfaces)
def test_unixCheckerSucceeds(self):
"""
Test that the checker works with valid credentials.
"""
def _gotAvatar(username):
self.assertEquals(username, self.admin.username)
return (self.checker
.requestAvatarId(self.admin)
.addCallback(_gotAvatar))
def test_unixCheckerFailsUsername(self):
"""
Test that the checker fails with an invalid username.
"""
return self.assertFailure(self.checker.requestAvatarId(self.badUser),
error.UnauthorizedLogin)
def test_unixCheckerFailsPassword(self):
"""
Test that the checker fails with an invalid password.
"""
return self.assertFailure(self.checker.requestAvatarId(self.badPass),
error.UnauthorizedLogin)
if None in (pwd, spwd, crypt):
availability = []
for module, name in ((pwd, "pwd"), (spwd, "swpd"), (crypt, "crypt")):
if module is None:
availability += [name]
for method in (test_unixCheckerSucceeds,
test_unixCheckerFailsUsername,
test_unixCheckerFailsPassword):
method.skip = ("Required module(s) are unavailable: " +
", ".join(availability))
class TestFileDBChecker(unittest.TestCase):
"""
Test for the --auth=file:... file checker.
"""
def setUp(self):
self.admin = credentials.UsernamePassword('admin', 'asdf')
self.alice = credentials.UsernamePassword('alice', 'foo')
self.badPass = credentials.UsernamePassword('alice', 'foobar')
self.badUser = credentials.UsernamePassword('x', 'yz')
self.filename = self.mktemp()
FilePath(self.filename).setContent('admin:asdf\nalice:foo\n')
self.checker = strcred.makeChecker('file:' + self.filename)
def _fakeFilename(self):
filename = '/DoesNotExist'
while os.path.exists(filename):
filename += '_'
return filename
def test_isChecker(self):
"""
Verifies that strcred.makeChecker('memory') returns an object
that implements the L{ICredentialsChecker} interface.
"""
self.assertTrue(checkers.ICredentialsChecker.providedBy(self.checker))
self.assertIn(credentials.IUsernamePassword,
self.checker.credentialInterfaces)
def test_fileCheckerSucceeds(self):
"""
Test that the checker works with valid credentials.
"""
def _gotAvatar(username):
self.assertEquals(username, self.admin.username)
return (self.checker
.requestAvatarId(self.admin)
.addCallback(_gotAvatar))
def test_fileCheckerFailsUsername(self):
"""
Test that the checker fails with an invalid username.
"""
return self.assertFailure(self.checker.requestAvatarId(self.badUser),
error.UnauthorizedLogin)
def test_fileCheckerFailsPassword(self):
"""
Test that the checker fails with an invalid password.
"""
return self.assertFailure(self.checker.requestAvatarId(self.badPass),
error.UnauthorizedLogin)
def test_failsWithEmptyFilename(self):
"""
Test that an empty filename raises an error.
"""
self.assertRaises(ValueError, strcred.makeChecker, 'file')
self.assertRaises(ValueError, strcred.makeChecker, 'file:')
def test_warnWithBadFilename(self):
"""
When the file auth plugin is given a file that doesn't exist, it
should produce a warning.
"""
oldOutput = cred_file.theFileCheckerFactory.errorOutput
newOutput = StringIO.StringIO()
cred_file.theFileCheckerFactory.errorOutput = newOutput
checker = strcred.makeChecker('file:' + self._fakeFilename())
cred_file.theFileCheckerFactory.errorOutput = oldOutput
self.assertIn(cred_file.invalidFileWarning, newOutput.getvalue())
class DummyOptions(usage.Options, strcred.AuthOptionMixin):
"""
Simple options for testing L{strcred.AuthOptionMixin}.
"""
class TestCheckerOptions(unittest.TestCase):
def test_createsList(self):
"""
Test that the --auth command line creates a list in the
Options instance and appends values to it.
"""
options = DummyOptions()
options.parseOptions(['--auth', 'memory'])
self.assertEqual(len(options['credCheckers']), 1)
options = DummyOptions()
options.parseOptions(['--auth', 'memory', '--auth', 'memory'])
self.assertEqual(len(options['credCheckers']), 2)
def test_invalidAuthError(self):
"""
Test that the --auth command line raises an exception when it
gets a parameter it doesn't understand.
"""
options = DummyOptions()
# If someone adds a 'ThisPluginDoesNotExist' then this unit
# test should still run.
invalidParameter = getInvalidAuthType()
self.assertRaises(
usage.UsageError,
options.parseOptions, ['--auth', invalidParameter])
self.assertRaises(
usage.UsageError,
options.parseOptions, ['--help-auth-type', invalidParameter])
def test_createsDictionary(self):
"""
Test that the --auth command line creates a dictionary
mapping supported interfaces to the list of credentials
checkers that support it.
"""
options = DummyOptions()
options.parseOptions(['--auth', 'memory', '--auth', 'anonymous'])
chd = options['credInterfaces']
self.assertEquals(len(chd[credentials.IAnonymous]), 1)
self.assertEquals(len(chd[credentials.IUsernamePassword]), 1)
chdAnonymous = chd[credentials.IAnonymous][0]
chdUserPass = chd[credentials.IUsernamePassword][0]
self.assertTrue(checkers.ICredentialsChecker.providedBy(chdAnonymous))
self.assertTrue(checkers.ICredentialsChecker.providedBy(chdUserPass))
self.assertIn(credentials.IAnonymous,
chdAnonymous.credentialInterfaces)
self.assertIn(credentials.IUsernamePassword,
chdUserPass.credentialInterfaces)
def test_credInterfacesProvidesLists(self):
"""
Test that when two --auth arguments are passed along which
support the same interface, a list with both is created.
"""
options = DummyOptions()
options.parseOptions(['--auth', 'memory', '--auth', 'unix'])
self.assertEquals(
options['credCheckers'],
options['credInterfaces'][credentials.IUsernamePassword])
def test_listDoesNotDisplayDuplicates(self):
"""
Test that the list for --help-auth does not duplicate items.
"""
authTypes = []
options = DummyOptions()
for cf in options._checkerFactoriesForOptHelpAuth():
self.assertNotIn(cf.authType, authTypes)
authTypes.append(cf.authType)
def test_displaysListCorrectly(self):
"""
Test that the --help-auth argument correctly displays all
available authentication plugins, then exits.
"""
newStdout = StringIO.StringIO()
options = DummyOptions()
options.authOutput = newStdout
self.assertRaises(SystemExit, options.parseOptions, ['--help-auth'])
for checkerFactory in strcred.findCheckerFactories():
self.assertIn(checkerFactory.authType, newStdout.getvalue())
def test_displaysHelpCorrectly(self):
"""
Test that the --help-auth-for argument will correctly display
the help file for a particular authentication plugin.
"""
newStdout = StringIO.StringIO()
options = DummyOptions()
options.authOutput = newStdout
self.assertRaises(
SystemExit, options.parseOptions, ['--help-auth-type', 'file'])
for line in cred_file.theFileCheckerFactory.authHelp:
if line.strip():
self.assertIn(line.strip(), newStdout.getvalue())
def test_unexpectedException(self):
"""
When the checker specified by --auth raises an unexpected error, it
should be caught and re-raised within a L{usage.UsageError}.
"""
options = DummyOptions()
err = self.assertRaises(usage.UsageError, options.parseOptions,
['--auth', 'file'])
self.assertEquals(str(err),
"Unexpected error: 'file' requires a filename")
class OptionsForUsernamePassword(usage.Options, strcred.AuthOptionMixin):
supportedInterfaces = (credentials.IUsernamePassword,)
class OptionsForUsernameHashedPassword(usage.Options, strcred.AuthOptionMixin):
supportedInterfaces = (credentials.IUsernameHashedPassword,)
class OptionsSupportsAllInterfaces(usage.Options, strcred.AuthOptionMixin):
supportedInterfaces = None
class OptionsSupportsNoInterfaces(usage.Options, strcred.AuthOptionMixin):
supportedInterfaces = []
class TestLimitingInterfaces(unittest.TestCase):
"""
Tests functionality that allows an application to limit the
credential interfaces it can support. For the purposes of this
test, we use IUsernameHashedPassword, although this will never
really be used by the command line.
(I have, to date, not thought of a half-decent way for a user to
specify a hash algorithm via the command-line. Nor do I think it's
very useful.)
I should note that, at first, this test is counter-intuitive,
because we're using the checker with a pre-defined hash function
as the 'bad' checker. See the documentation for
L{twisted.cred.checkers.FilePasswordDB.hash} for more details.
"""
def setUp(self):
self.filename = self.mktemp()
file(self.filename, 'w').write('admin:asdf\nalice:foo\n')
self.goodChecker = checkers.FilePasswordDB(self.filename)
self.badChecker = checkers.FilePasswordDB(self.filename, hash=self._hash)
self.anonChecker = checkers.AllowAnonymousAccess()
def _hash(self, networkUsername, networkPassword, storedPassword):
"""
A dumb hash that doesn't really do anything.
"""
return networkPassword
def test_supportsInterface(self):
"""
Test that the supportsInterface method behaves appropriately.
"""
options = OptionsForUsernamePassword()
self.assertTrue(
options.supportsInterface(credentials.IUsernamePassword))
self.assertFalse(
options.supportsInterface(credentials.IAnonymous))
self.assertRaises(
strcred.UnsupportedInterfaces, options.addChecker, self.anonChecker)
def test_supportsAllInterfaces(self):
"""
Test that the supportsInterface method behaves appropriately
when the supportedInterfaces attribute is None.
"""
options = OptionsSupportsAllInterfaces()
self.assertTrue(
options.supportsInterface(credentials.IUsernamePassword))
self.assertTrue(
options.supportsInterface(credentials.IAnonymous))
def test_supportsCheckerFactory(self):
"""
Test that the supportsCheckerFactory method behaves appropriately.
"""
options = OptionsForUsernamePassword()
fileCF = cred_file.theFileCheckerFactory
anonCF = cred_anonymous.theAnonymousCheckerFactory
self.assertTrue(options.supportsCheckerFactory(fileCF))
self.assertFalse(options.supportsCheckerFactory(anonCF))
def test_canAddSupportedChecker(self):
"""
Test that when addChecker is called with a checker that
implements at least one of the interfaces our application
supports, it is successful.
"""
options = OptionsForUsernamePassword()
options.addChecker(self.goodChecker)
iface = options.supportedInterfaces[0]
# Test that we did get IUsernamePassword
self.assertIdentical(options['credInterfaces'][iface][0], self.goodChecker)
self.assertIdentical(options['credCheckers'][0], self.goodChecker)
# Test that we didn't get IUsernameHashedPassword
self.assertEquals(len(options['credInterfaces'][iface]), 1)
self.assertEquals(len(options['credCheckers']), 1)
def test_failOnAddingUnsupportedChecker(self):
"""
Test that when addChecker is called with a checker that does
not implement any supported interfaces, it fails.
"""
options = OptionsForUsernameHashedPassword()
self.assertRaises(strcred.UnsupportedInterfaces,
options.addChecker, self.badChecker)
def test_unsupportedInterfaceError(self):
"""
Test that the --auth command line raises an exception when it
gets a checker we don't support.
"""
options = OptionsSupportsNoInterfaces()
authType = cred_anonymous.theAnonymousCheckerFactory.authType
self.assertRaises(
usage.UsageError,
options.parseOptions, ['--auth', authType])
def test_helpAuthLimitsOutput(self):
"""
Test that --help-auth will only list checkers that purport to
supply at least one of the credential interfaces our
application can use.
"""
options = OptionsForUsernamePassword()
for factory in options._checkerFactoriesForOptHelpAuth():
invalid = True
for interface in factory.credentialInterfaces:
if options.supportsInterface(interface):
invalid = False
if invalid:
raise strcred.UnsupportedInterfaces()
def test_helpAuthTypeLimitsOutput(self):
"""
Test that --help-auth-type will display a warning if you get
help for an authType that does not supply at least one of the
credential interfaces our application can use.
"""
options = OptionsForUsernamePassword()
# Find an interface that we can use for our test
invalidFactory = None
for factory in strcred.findCheckerFactories():
if not options.supportsCheckerFactory(factory):
invalidFactory = factory
break
self.assertNotIdentical(invalidFactory, None)
# Capture output and make sure the warning is there
newStdout = StringIO.StringIO()
options.authOutput = newStdout
self.assertRaises(SystemExit, options.parseOptions,
['--help-auth-type', 'anonymous'])
self.assertIn(strcred.notSupportedWarning, newStdout.getvalue())
| apache-2.0 |
Chuban/moose | python/peacock/tests/input_tab/ExecutableInfo/test_ExecutableInfo.py | 4 | 3032 | #!/usr/bin/env python
import unittest
from peacock.Input.ExecutableInfo import ExecutableInfo
from peacock.utils import Testing
from PyQt5 import QtWidgets
class Tests(Testing.PeacockTester):
qapp = QtWidgets.QApplication([])
def checkFile(self, output, gold_file, write_output=False):
if write_output:
with open("tmp_out.txt", "w") as f:
f.write(output)
with open(gold_file, "r") as f:
gold_output = f.read()
self.assertEqual(gold_output, output)
def testInfo(self):
e = ExecutableInfo()
e.clearCache()
e.setPath("")
self.assertFalse(e.valid())
e.setPath("no_exist")
self.assertFalse(e.valid())
exe_path = Testing.find_moose_test_exe()
e.setPath(exe_path)
self.assertTrue(e.valid())
e.setPath(exe_path)
self.assertTrue(e.valid())
e.setPath("")
self.assertTrue(e.valid())
e.setPath("no_exist")
self.assertFalse(e.valid())
# this should hit the cache
e.setPath(exe_path)
self.assertTrue(e.valid())
def testTree(self):
e = ExecutableInfo()
e.clearCache()
exe_path = Testing.find_moose_test_exe()
e.setPath(exe_path)
root = e.path_map["/"]
self.assertIn("Mesh", root.children_list)
m = root.children["Mesh"]
self.assertEqual(m.hard, True)
self.assertEqual(e.path_map["/Mesh"], m)
out = e.dumpDefaultTree(hard_only=False)
self.assertIn("Partitioner", out)
self.assertIn("Partitioner", out)
self.assertIn("ScalarKernels", out)
self.assertNotIn("DirichletBC", out)
def testPickle(self):
exe_path = Testing.find_moose_test_exe()
e = ExecutableInfo()
e.clearCache()
e.setPath(exe_path)
p = e.toPickle()
e2 = ExecutableInfo()
e2.fromPickle(p)
self.assertEqual(e2.path_map, e.path_map)
def checkPath(self, e, path, star, hard):
p = e.path_map.get(path)
self.assertNotEqual(p, None)
self.assertEqual(p.star, star)
self.assertEqual(p.hard, hard)
def testCombined(self):
e = ExecutableInfo()
e.setPath(Testing.find_moose_test_exe(dirname="modules/combined", exe_base="combined"))
self.checkPath(e, "/Preconditioning", True, True)
self.checkPath(e, "/BCs", True, True)
self.checkPath(e, "/BCs/Pressure", True, True)
self.checkPath(e, "/SolidMechanics", True, True)
self.checkPath(e, "/Adaptivity", False, True)
self.checkPath(e, "/Adaptivity/Markers", True, True)
self.checkPath(e, "/GlobalParams", False, True)
self.checkPath(e, "/Mesh", False, True)
self.checkPath(e, "/AuxVariables", True, True)
self.checkPath(e, "/AuxVariables/*/InitialCondition", False, False)
self.checkPath(e, "/Variables/*/InitialCondition", False, False)
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 |
ketjow4/NOV | Lib/encodings/cp437.py | 593 | 34820 | """ Python Character Mapping Codec cp437 generated from 'VENDORS/MICSFT/PC/CP437.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp437',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00a5, # YEN SIGN
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xa2' # 0x009b -> CENT SIGN
u'\xa3' # 0x009c -> POUND SIGN
u'\xa5' # 0x009d -> YEN SIGN
u'\u20a7' # 0x009e -> PESETA SIGN
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\u2310' # 0x00a9 -> REVERSED NOT SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
u'\u221e' # 0x00ec -> INFINITY
u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
u'\u2229' # 0x00ef -> INTERSECTION
u'\u2261' # 0x00f0 -> IDENTICAL TO
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a5: 0x009d, # YEN SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| gpl-3.0 |
berkmancenter/mediacloud | apps/common/src/python/mediawords/db/locks.py | 1 | 3477 | """Constants and routines for handling advisory postgres locks."""
import mediawords.db
from mediawords.util.log import create_logger
from mediawords.util.perl import decode_object_from_bytes_if_needed
log = create_logger(__name__)
"""
This package just has constants that can be passed to the first value of the postgres pg_advisory_*lock functions.
If you are using an advisory lock, you should use the two key version and use a constant from this package to
avoid conflicts.
"""
# locks to make sure we are not mining or snapshotting a topic in more than one process at a time
LOCK_TYPES = {
'test-a': 10,
'test-b': 11,
'MediaWords::Job::TM::MineTopic': 12,
'MediaWords::Job::TM::SnapshotTopic': 13,
'MediaWords::TM::Media::media_normalized_urls': 14,
'MediaWords::Crawler::Engine::run_fetcher': 15,
# Testing lock types
'TestPerlWorkerLock': 900,
'TestPythonWorkerLock': 901,
}
class McDBLocksException(Exception):
"""Default exception for package."""
pass
def get_session_lock(db: mediawords.db.DatabaseHandler, lock_type: str, lock_id: int, wait: bool = False) -> bool:
"""Get a postgres advisory lock with the lock_type and lock_id as the two keys.
Arguments:
db - db handle
lock_type - must be in LOCK_TYPES dict above
lock_id - id for the particular lock within the type
wait - if true, block while waiting for the lock, else return false if the lock is not available
Returns:
True if the lock is available
"""
lock_type = str(decode_object_from_bytes_if_needed(lock_type))
if isinstance(lock_id, bytes):
lock_id = decode_object_from_bytes_if_needed(lock_id)
lock_id = int(lock_id)
if isinstance(wait, bytes):
wait = decode_object_from_bytes_if_needed(wait)
wait = bool(wait)
log.debug("trying for lock: %s, %d" % (lock_type, lock_id))
if lock_type not in LOCK_TYPES:
raise McDBLocksException("lock type not in LOCK_TYPES: %s" % lock_type)
lock_type_id = LOCK_TYPES[lock_type]
if wait:
db.query("select pg_advisory_lock(%(a)s, %(b)s)", {'a': lock_type_id, 'b': lock_id})
return True
else:
r = db.query("select pg_try_advisory_lock(%(a)s, %(b)s) as locked", {'a': lock_type_id, 'b': lock_id}).hash()
return r['locked']
def release_session_lock(db: mediawords.db.DatabaseHandler, lock_type: str, lock_id: int) -> None:
"""Release the postgres advisory lock if it is held."""
lock_type = str(decode_object_from_bytes_if_needed(lock_type))
if isinstance(lock_id, bytes):
lock_id = decode_object_from_bytes_if_needed(lock_id)
lock_id = int(lock_id)
if lock_type not in LOCK_TYPES:
raise McDBLocksException("lock type not in LOCK_TYPES: %s" % lock_type)
lock_type_id = LOCK_TYPES[lock_type]
db.query("select pg_advisory_unlock(%(a)s, %(b)s)", {'a': lock_type_id, 'b': lock_id})
def list_session_locks(db: mediawords.db.DatabaseHandler, lock_type: str) -> list:
"""Return a list of all locked ids for the given lock_type."""
lock_type = str(decode_object_from_bytes_if_needed(lock_type))
if lock_type not in LOCK_TYPES:
raise McDBLocksException("lock type not in LOCK_TYPES: %s" % lock_type)
lock_type_id = LOCK_TYPES[lock_type]
# noinspection SqlResolve
return db.query(
"select objid from pg_locks where locktype = 'advisory' and classid = %(a)s",
{'a': lock_type_id}).flat()
| agpl-3.0 |
mixturemodel-flow/tensorflow | tensorflow/contrib/graph_editor/select.py | 75 | 28656 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various ways of selecting operations and tensors in a graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from six import iteritems
from six import string_types
from tensorflow.contrib.graph_editor import util
from tensorflow.python.framework import ops as tf_ops
__all__ = [
"can_be_regex",
"make_regex",
"filter_ts",
"filter_ts_from_regex",
"filter_ops",
"filter_ops_from_regex",
"get_name_scope_ops",
"check_cios",
"get_ops_ios",
"compute_boundary_ts",
"get_within_boundary_ops",
"get_forward_walk_ops",
"get_backward_walk_ops",
"get_walks_intersection_ops",
"get_walks_union_ops",
"select_ops",
"select_ts",
"select_ops_and_ts",
]
_RE_TYPE = type(re.compile(""))
def can_be_regex(obj):
"""Return True if obj can be turned into a regular expression."""
return isinstance(obj, string_types + (_RE_TYPE,))
def make_regex(obj):
"""Return a compiled regular expression.
Args:
obj: a string or a regular expression.
Returns:
A compiled regular expression.
Raises:
ValueError: if obj could not be converted to a regular expression.
"""
if not can_be_regex(obj):
raise ValueError("Expected a string or a regex, got: {}".format(type(obj)))
if isinstance(obj, string_types):
return re.compile(obj)
else:
return obj
def _get_input_ts(ops):
"""Compute the list of unique input tensors of all the op in ops.
Args:
ops: an object convertible to a list of `tf.Operation`.
Returns:
The list of unique input tensors of all the op in ops.
Raises:
TypeError: if ops cannot be converted to a list of `tf.Operation`.
"""
ops = util.make_list_of_op(ops)
ts = []
ts_set = set()
for op in ops:
for t in op.inputs:
if t not in ts_set:
ts.append(t)
ts_set.add(t)
return ts
def _get_output_ts(ops):
"""Compute the list of unique output tensors of all the op in ops.
Args:
ops: an object convertible to a list of tf.Operation.
Returns:
The list of unique output tensors of all the op in ops.
Raises:
TypeError: if ops cannot be converted to a list of tf.Operation.
"""
ops = util.make_list_of_op(ops)
ts = []
for op in ops:
ts += op.outputs
return ts
def filter_ts(ops, positive_filter):
"""Get all the tensors which are input or output of an op in ops.
Args:
ops: an object convertible to a list of `tf.Operation`.
positive_filter: a function deciding whether to keep a tensor or not.
If `True`, all the tensors are returned.
Returns:
A list of `tf.Tensor`.
Raises:
TypeError: if ops cannot be converted to a list of `tf.Operation`.
"""
ops = util.make_list_of_op(ops)
ts = _get_input_ts(ops)
util.concatenate_unique(ts, _get_output_ts(ops))
if positive_filter is not True:
ts = [t for t in ts if positive_filter(t)]
return ts
def filter_ts_from_regex(ops, regex):
r"""Get all the tensors linked to ops that match the given regex.
Args:
ops: an object convertible to a list of tf.Operation.
regex: a regular expression matching the tensors' name.
For example, "^foo(/.*)?:\d+$" will match all the tensors in the "foo"
scope.
Returns:
A list of tf.Tensor.
Raises:
TypeError: if ops cannot be converted to a list of tf.Operation.
"""
ops = util.make_list_of_op(ops)
regex_obj = make_regex(regex)
return filter_ts(ops, positive_filter=lambda op: regex_obj.search(op.name))
def filter_ops(ops, positive_filter):
"""Get the ops passing the given filter.
Args:
ops: an object convertible to a list of tf.Operation.
positive_filter: a function deciding where to keep an operation or not.
If True, all the operations are returned.
Returns:
A list of selected tf.Operation.
Raises:
TypeError: if ops cannot be converted to a list of tf.Operation.
"""
ops = util.make_list_of_op(ops)
if positive_filter is not True: # pylint: disable=g-explicit-bool-comparison
ops = [op for op in ops if positive_filter(op)]
return ops
def filter_ops_from_regex(ops, regex):
"""Get all the operations that match the given regex.
Args:
ops: an object convertible to a list of `tf.Operation`.
regex: a regular expression matching the operation's name.
For example, `"^foo(/.*)?$"` will match all the operations in the "foo"
scope.
Returns:
A list of `tf.Operation`.
Raises:
TypeError: if ops cannot be converted to a list of `tf.Operation`.
"""
ops = util.make_list_of_op(ops)
regex_obj = make_regex(regex)
return filter_ops(ops, lambda op: regex_obj.search(op.name))
def get_name_scope_ops(ops, scope):
"""Get all the operations under the given scope path.
Args:
ops: an object convertible to a list of tf.Operation.
scope: a scope path.
Returns:
A list of tf.Operation.
Raises:
TypeError: if ops cannot be converted to a list of tf.Operation.
"""
if scope and scope[-1] == "/":
scope = scope[:-1]
return filter_ops_from_regex(ops, "^{}(/.*)?$".format(scope))
def check_cios(control_inputs=False, control_outputs=None, control_ios=None):
"""Do various check on control_inputs and control_outputs.
Args:
control_inputs: A boolean indicating whether control inputs are enabled.
control_outputs: An instance of util.ControlOutputs or None. If not None,
control outputs are enabled.
control_ios: An instance of util.ControlOutputs or None. If not None, both
control inputs and control outputs are enabled. This is equivalent to set
control_inputs to True and control_outputs to the util.ControlOutputs
instance.
Returns:
A tuple `(control_inputs, control_outputs)` where:
`control_inputs` is a boolean indicating whether to use control inputs.
`control_outputs` is an instance of util.ControlOutputs or None
Raises:
ValueError: if control_inputs is an instance of util.ControlOutputs but
control_outputs is not None
TypeError: if control_outputs is not None and is not a util.ControlOutputs.
"""
if control_ios is not None:
if not isinstance(control_ios, util.ControlOutputs):
raise TypeError("Expected a util.ControlOutputs, got: {}".format(
type(control_ios)))
if control_outputs is not None:
raise ValueError("control_outputs should be None when using control_ios.")
control_inputs = True
control_outputs = control_ios
elif control_outputs is not None:
if not isinstance(control_outputs, util.ControlOutputs):
raise TypeError("Expected a util.ControlOutputs, got: {}".format(
type(control_outputs)))
if control_outputs is not None:
control_outputs.update()
return control_inputs, control_outputs
def get_ops_ios(ops, control_inputs=False, control_outputs=None,
control_ios=None):
"""Return all the `tf.Operation` which are connected to an op in ops.
Args:
ops: an object convertible to a list of `tf.Operation`.
control_inputs: A boolean indicating whether control inputs are enabled.
control_outputs: An instance of `util.ControlOutputs` or `None`. If not
`None`, control outputs are enabled.
control_ios: An instance of `util.ControlOutputs` or `None`. If not `None`,
both control inputs and control outputs are enabled. This is equivalent to
set `control_inputs` to `True` and `control_outputs` to the
`util.ControlOutputs` instance.
Returns:
All the `tf.Operation` surrounding the given ops.
Raises:
TypeError: if `ops` cannot be converted to a list of `tf.Operation`.
"""
control_inputs, control_outputs = check_cios(control_inputs, control_outputs,
control_ios)
ops = util.make_list_of_op(ops)
res = []
for op in ops:
util.concatenate_unique(res, [t.op for t in op.inputs])
for t in op.outputs:
util.concatenate_unique(res, t.consumers())
if control_outputs is not None:
util.concatenate_unique(res, control_outputs.get(op))
if control_inputs:
util.concatenate_unique(res, op.control_inputs)
return res
def compute_boundary_ts(ops):
"""Compute the tensors at the boundary of a set of ops.
This function looks at all the tensors connected to the given ops (in/out)
and classify them into three categories:
1) input tensors: tensors whose generating operation is not in ops.
2) output tensors: tensors whose consumer operations are not in ops
3) inside tensors: tensors which are neither input nor output tensors.
Note that a tensor can be both an inside tensor and an output tensor if it is
consumed by operations both outside and inside of `ops`.
Args:
ops: an object convertible to a list of tf.Operation.
Returns:
A tuple `(outside_input_ts, outside_output_ts, inside_ts)` where:
`outside_input_ts` is a Python list of input tensors;
`outside_output_ts` is a python list of output tensors;
`inside_ts` is a python list of inside tensors.
Since a tensor can be both an inside tensor and an output tensor,
`outside_output_ts` and `inside_ts` might intersect.
Raises:
TypeError: if ops cannot be converted to a list of tf.Operation.
"""
ops = util.make_list_of_op(ops)
input_ts = _get_input_ts(ops)
output_ts = _get_output_ts(ops)
output_ts_set = frozenset(output_ts)
ops_set = frozenset(ops)
# Compute inside tensors.
inside_ts = []
only_inside_ts = []
for t in input_ts:
# Skip if the input tensor is not also an output tensor.
if t not in output_ts_set:
continue
# Mark as "inside".
inside_ts.append(t)
# Mark as "only inside" if the tensor is not both inside and output.
consumers = frozenset(t.consumers())
if consumers - ops_set:
continue
only_inside_ts.append(t)
inside_ts_set = frozenset(inside_ts)
only_inside_ts_set = frozenset(only_inside_ts)
outside_output_ts = [t for t in output_ts if t not in only_inside_ts_set]
outside_input_ts = [t for t in input_ts if t not in inside_ts_set]
return outside_input_ts, outside_output_ts, inside_ts
def get_within_boundary_ops(ops,
seed_ops,
boundary_ops=(),
inclusive=True,
control_inputs=False,
control_outputs=None,
control_ios=None):
"""Return all the `tf.Operation` within the given boundary.
Args:
ops: an object convertible to a list of `tf.Operation`. those ops define the
set in which to perform the operation (if a `tf.Graph` is given, it
will be converted to the list of all its operations).
seed_ops: the operations from which to start expanding.
boundary_ops: the ops forming the boundary.
inclusive: if `True`, the result will also include the boundary ops.
control_inputs: A boolean indicating whether control inputs are enabled.
control_outputs: An instance of `util.ControlOutputs` or `None`. If not
`None`, control outputs are enabled.
control_ios: An instance of `util.ControlOutputs` or `None`. If not
`None`, both control inputs and control outputs are enabled. This is
equivalent to set control_inputs to True and control_outputs to
the `util.ControlOutputs` instance.
Returns:
All the `tf.Operation` surrounding the given ops.
Raises:
TypeError: if `ops` or `seed_ops` cannot be converted to a list of
`tf.Operation`.
ValueError: if the boundary is intersecting with the seeds.
"""
control_inputs, control_outputs = check_cios(control_inputs, control_outputs,
control_ios)
ops = util.make_list_of_op(ops)
seed_ops = util.make_list_of_op(seed_ops, allow_graph=False)
boundary_ops = set(util.make_list_of_op(boundary_ops))
res = set(seed_ops)
if boundary_ops & res:
raise ValueError("Boundary is intersecting with the seeds.")
wave = set(seed_ops)
while wave:
new_wave = set()
ops_io = get_ops_ios(wave, control_inputs, control_outputs)
for op in ops_io:
if op in res:
continue
if op in boundary_ops:
if inclusive:
res.add(op)
else:
new_wave.add(op)
res.update(new_wave)
wave = new_wave
return [op for op in ops if op in res]
def get_forward_walk_ops(seed_ops,
inclusive=True,
within_ops=None,
stop_at_ts=(),
control_outputs=None):
"""Do a forward graph walk and return all the visited ops.
Args:
seed_ops: an iterable of operations from which the forward graph
walk starts. If a list of tensors is given instead, the seed_ops are set
to be the consumers of those tensors.
inclusive: if True the given seed_ops are also part of the resulting set.
within_ops: an iterable of `tf.Operation` within which the search is
restricted. If `within_ops` is `None`, the search is performed within
the whole graph.
stop_at_ts: an iterable of tensors at which the graph walk stops.
control_outputs: a `util.ControlOutputs` instance or None.
If not `None`, it will be used while walking the graph forward.
Returns:
A Python set of all the `tf.Operation` ahead of `seed_ops`.
Raises:
TypeError: if `seed_ops` or `within_ops` cannot be converted to a list of
`tf.Operation`.
"""
_, control_outputs = check_cios(False, control_outputs)
if not util.is_iterable(seed_ops):
seed_ops = [seed_ops]
if not seed_ops:
return []
if isinstance(seed_ops[0], tf_ops.Tensor):
ts = util.make_list_of_t(seed_ops, allow_graph=False)
seed_ops = util.get_consuming_ops(ts)
else:
seed_ops = util.make_list_of_op(seed_ops, allow_graph=False)
seed_ops = frozenset(seed_ops)
stop_at_ts = frozenset(util.make_list_of_t(stop_at_ts))
if within_ops:
within_ops = util.make_list_of_op(within_ops, allow_graph=False)
within_ops = frozenset(within_ops)
seed_ops &= within_ops
def is_within(op):
return within_ops is None or op in within_ops
result = list(seed_ops)
wave = set(seed_ops)
while wave:
new_wave = set()
for op in wave:
for new_t in op.outputs:
if new_t in stop_at_ts:
continue
for new_op in new_t.consumers():
if new_op not in result and is_within(new_op):
new_wave.add(new_op)
if control_outputs is not None:
for new_op in control_outputs.get(op):
if new_op not in result and is_within(new_op):
new_wave.add(new_op)
util.concatenate_unique(result, new_wave)
wave = new_wave
if not inclusive:
result = [op for op in result if op not in seed_ops]
return result
def get_backward_walk_ops(seed_ops,
inclusive=True,
within_ops=None,
stop_at_ts=(),
control_inputs=False):
"""Do a backward graph walk and return all the visited ops.
Args:
seed_ops: an iterable of operations from which the backward graph
walk starts. If a list of tensors is given instead, the seed_ops are set
to be the generators of those tensors.
inclusive: if True the given seed_ops are also part of the resulting set.
within_ops: an iterable of `tf.Operation` within which the search is
restricted. If `within_ops` is `None`, the search is performed within
the whole graph.
stop_at_ts: an iterable of tensors at which the graph walk stops.
control_inputs: if True, control inputs will be used while moving backward.
Returns:
A Python set of all the `tf.Operation` behind `seed_ops`.
Raises:
TypeError: if `seed_ops` or `within_ops` cannot be converted to a list of
`tf.Operation`.
"""
if not util.is_iterable(seed_ops):
seed_ops = [seed_ops]
if not seed_ops:
return []
if isinstance(seed_ops[0], tf_ops.Tensor):
ts = util.make_list_of_t(seed_ops, allow_graph=False)
seed_ops = util.get_generating_ops(ts)
else:
seed_ops = util.make_list_of_op(seed_ops, allow_graph=False)
stop_at_ts = frozenset(util.make_list_of_t(stop_at_ts))
seed_ops = frozenset(util.make_list_of_op(seed_ops))
if within_ops:
within_ops = util.make_list_of_op(within_ops, allow_graph=False)
within_ops = frozenset(within_ops)
seed_ops &= within_ops
def is_within(op):
return within_ops is None or op in within_ops
result = list(seed_ops)
wave = set(seed_ops)
while wave:
new_wave = set()
for op in wave:
for new_t in op.inputs:
if new_t in stop_at_ts:
continue
if new_t.op not in result and is_within(new_t.op):
new_wave.add(new_t.op)
if control_inputs:
for new_op in op.control_inputs:
if new_op not in result and is_within(new_op):
new_wave.add(new_op)
util.concatenate_unique(result, new_wave)
wave = new_wave
if not inclusive:
result = [op for op in result if op not in seed_ops]
return result
def get_walks_intersection_ops(forward_seed_ops,
backward_seed_ops,
forward_inclusive=True,
backward_inclusive=True,
within_ops=None,
control_inputs=False,
control_outputs=None,
control_ios=None):
"""Return the intersection of a forward and a backward walk.
Args:
forward_seed_ops: an iterable of operations from which the forward graph
walk starts. If a list of tensors is given instead, the seed_ops are set
to be the consumers of those tensors.
backward_seed_ops: an iterable of operations from which the backward graph
walk starts. If a list of tensors is given instead, the seed_ops are set
to be the generators of those tensors.
forward_inclusive: if True the given forward_seed_ops are also part of the
resulting set.
backward_inclusive: if True the given backward_seed_ops are also part of the
resulting set.
within_ops: an iterable of tf.Operation within which the search is
restricted. If within_ops is None, the search is performed within
the whole graph.
control_inputs: A boolean indicating whether control inputs are enabled.
control_outputs: An instance of util.ControlOutputs or None. If not None,
control outputs are enabled.
control_ios: An instance of util.ControlOutputs or None. If not None, both
control inputs and control outputs are enabled. This is equivalent to set
control_inputs to True and control_outputs to the util.ControlOutputs
instance.
Returns:
A Python set of all the tf.Operation in the intersection of a forward and a
backward walk.
Raises:
TypeError: if `forward_seed_ops` or `backward_seed_ops` or `within_ops`
cannot be converted to a list of `tf.Operation`.
"""
control_inputs, control_outputs = check_cios(control_inputs, control_outputs,
control_ios)
forward_ops = get_forward_walk_ops(
forward_seed_ops,
inclusive=forward_inclusive,
within_ops=within_ops,
control_outputs=control_outputs)
backward_ops = get_backward_walk_ops(
backward_seed_ops,
inclusive=backward_inclusive,
within_ops=within_ops,
control_inputs=control_inputs)
return [op for op in forward_ops if op in backward_ops]
def get_walks_union_ops(forward_seed_ops,
backward_seed_ops,
forward_inclusive=True,
backward_inclusive=True,
within_ops=None,
control_inputs=False,
control_outputs=None,
control_ios=None):
"""Return the union of a forward and a backward walk.
Args:
forward_seed_ops: an iterable of operations from which the forward graph
walk starts. If a list of tensors is given instead, the seed_ops are set
to be the consumers of those tensors.
backward_seed_ops: an iterable of operations from which the backward graph
walk starts. If a list of tensors is given instead, the seed_ops are set
to be the generators of those tensors.
forward_inclusive: if True the given forward_seed_ops are also part of the
resulting set.
backward_inclusive: if True the given backward_seed_ops are also part of the
resulting set.
within_ops: restrict the search within those operations. If within_ops is
None, the search is done within the whole graph.
control_inputs: A boolean indicating whether control inputs are enabled.
control_outputs: An instance of util.ControlOutputs or None. If not None,
control outputs are enabled.
control_ios: An instance of util.ControlOutputs or None. If not None, both
control inputs and control outputs are enabled. This is equivalent to set
control_inputs to True and control_outputs to the util.ControlOutputs
instance.
Returns:
A Python set of all the tf.Operation in the union of a forward and a
backward walk.
Raises:
TypeError: if forward_seed_ops or backward_seed_ops or within_ops cannot be
converted to a list of tf.Operation.
"""
control_inputs, control_outputs = check_cios(control_inputs, control_outputs,
control_ios)
forward_ops = get_forward_walk_ops(
forward_seed_ops,
inclusive=forward_inclusive,
within_ops=within_ops,
control_outputs=control_outputs)
backward_ops = get_backward_walk_ops(
backward_seed_ops,
inclusive=backward_inclusive,
within_ops=within_ops,
control_inputs=control_inputs)
return util.concatenate_unique(forward_ops, backward_ops)
def select_ops(*args, **kwargs):
"""Helper to select operations.
Args:
*args: list of 1) regular expressions (compiled or not) or 2) (array of)
`tf.Operation`. `tf.Tensor` instances are silently ignored.
**kwargs: 'graph': `tf.Graph` in which to perform the regex query.This is
required when using regex.
'positive_filter': an elem if selected only if `positive_filter(elem)` is
`True`. This is optional.
'restrict_ops_regex': a regular expression is ignored if it doesn't start
with the substring "(?#ops)".
Returns:
A list of `tf.Operation`.
Raises:
TypeError: if the optional keyword argument graph is not a `tf.Graph`
or if an argument in args is not an (array of) `tf.Operation`
or an (array of) `tf.Tensor` (silently ignored) or a string
or a regular expression.
ValueError: if one of the keyword arguments is unexpected or if a regular
expression is used without passing a graph as a keyword argument.
"""
# get keywords arguments
graph = None
positive_filter = None
restrict_ops_regex = False
for k, v in iteritems(kwargs):
if k == "graph":
graph = v
if graph is not None and not isinstance(graph, tf_ops.Graph):
raise TypeError("Expected a tf.Graph, got: {}".format(type(graph)))
elif k == "positive_filter":
positive_filter = v
elif k == "restrict_ops_regex":
restrict_ops_regex = v
elif k == "restrict_ts_regex":
pass
else:
raise ValueError("Wrong keywords argument: {}.".format(k))
ops = []
for arg in args:
if can_be_regex(arg):
if graph is None:
raise ValueError("Use the keyword argument 'graph' to use regex.")
regex = make_regex(arg)
if regex.pattern.startswith("(?#ts)"):
continue
if restrict_ops_regex and not regex.pattern.startswith("(?#ops)"):
continue
ops_ = filter_ops_from_regex(graph, regex)
for op_ in ops_:
if op_ not in ops:
if positive_filter is None or positive_filter(op_):
ops.append(op_)
else:
ops_aux = util.make_list_of_op(arg, ignore_ts=True)
if positive_filter is not None:
ops_aux = [op for op in ops_aux if positive_filter(op)]
ops_aux = [op for op in ops_aux if op not in ops]
ops += ops_aux
return ops
def select_ts(*args, **kwargs):
"""Helper to select tensors.
Args:
*args: list of 1) regular expressions (compiled or not) or 2) (array of)
`tf.Tensor`. `tf.Operation` instances are silently ignored.
**kwargs: 'graph': `tf.Graph` in which to perform the regex query.This is
required when using regex.
'positive_filter': an elem if selected only if `positive_filter(elem)` is
`True`. This is optional.
'restrict_ts_regex': a regular expression is ignored if it doesn't start
with the substring "(?#ts)".
Returns:
A list of `tf.Tensor`.
Raises:
TypeError: if the optional keyword argument graph is not a `tf.Graph`
or if an argument in args is not an (array of) `tf.Tensor`
or an (array of) `tf.Operation` (silently ignored) or a string
or a regular expression.
ValueError: if one of the keyword arguments is unexpected or if a regular
expression is used without passing a graph as a keyword argument.
"""
# get keywords arguments
graph = None
positive_filter = None
restrict_ts_regex = False
for k, v in iteritems(kwargs):
if k == "graph":
graph = v
if graph is not None and not isinstance(graph, tf_ops.Graph):
raise TypeError("Expected a tf.Graph, got {}".format(type(graph)))
elif k == "positive_filter":
positive_filter = v
elif k == "restrict_ts_regex":
restrict_ts_regex = v
elif k == "restrict_ops_regex":
pass
else:
raise ValueError("Wrong keywords argument: {}.".format(k))
ts = []
for arg in args:
if can_be_regex(arg):
if graph is None:
raise ValueError("Use the keyword argument 'graph' to use regex.")
regex = make_regex(arg)
if regex.pattern.startswith("(?#ops)"):
continue
if restrict_ts_regex and not regex.pattern.startswith("(?#ts)"):
continue
ts_ = filter_ts_from_regex(graph, regex)
for t_ in ts_:
if t_ not in ts:
if positive_filter is None or positive_filter(t_):
ts.append(t_)
else:
ts_aux = util.make_list_of_t(arg, ignore_ops=True)
if positive_filter is not None:
ts_aux = [t for t in ts_aux if positive_filter(t)]
ts_aux = [t for t in ts_aux if t not in ts]
ts += ts_aux
return ts
def select_ops_and_ts(*args, **kwargs):
"""Helper to select operations and tensors.
Args:
*args: list of 1) regular expressions (compiled or not) or 2) (array of)
`tf.Operation` 3) (array of) tf.Tensor. Regular expressions matching
tensors must start with the comment `"(?#ts)"`, for instance:
`"(?#ts)^foo/.*"`.
**kwargs: 'graph': `tf.Graph` in which to perform the regex query.This is
required when using regex.
'positive_filter': an elem if selected only if `positive_filter(elem)` is
`True`. This is optional.
Returns:
A tuple `(ops, ts)` where:
`ops` is a list of `tf.Operation`, and
`ts` is a list of `tf.Tensor`
Raises:
TypeError: if the optional keyword argument graph is not a `tf.Graph`
or if an argument in args is not an (array of) `tf.Tensor`
or an (array of) `tf.Operation` or a string or a regular expression.
ValueError: if one of the keyword arguments is unexpected or if a regular
expression is used without passing a graph as a keyword argument.
"""
ops = select_ops(*args, restrict_ops_regex=False, **kwargs)
ts = select_ts(*args, restrict_ts_regex=True, **kwargs)
return ops, ts
| apache-2.0 |
suiyuan2009/tensorflow | tensorflow/contrib/saved_model/__init__.py | 109 | 1411 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel contrib support.
SavedModel provides a language-neutral format to save machine-learned models
that is recoverable and hermetic. It enables higher-level systems and tools to
produce, consume and transform TensorFlow models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,line-too-long
from tensorflow.contrib.saved_model.python.saved_model.signature_def_utils import *
# pylint: enable=unused-import,widcard-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ["get_signature_def_by_key"]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/newrelic-2.46.0.37/newrelic/hooks/component_cornice.py | 2 | 1974 | """Instrumentation for the Cornice REST library for Pyramid.
"""
import functools
from newrelic.agent import (ObjectProxy, function_wrapper, callable_name,
current_transaction, FunctionTrace, wrap_function_wrapper)
module_cornice_service = None
@function_wrapper
def wrapper_Resource_method(wrapped, instance, args, kwargs):
transaction = current_transaction()
if transaction is None:
return wrapped(*args, **kwargs)
name = callable_name(wrapped)
transaction.set_transaction_name(name)
with FunctionTrace(transaction, name):
return wrapped(*args, **kwargs)
def wrapper_Resource(view):
@function_wrapper
def _wrapper_Resource(wrapped, instance, args, kwargs):
ob = wrapped(*args, **kwargs)
method = getattr(ob, view)
setattr(ob, view, wrapper_Resource_method(method))
return ob
return _wrapper_Resource
def wrapper_decorate_view(wrapped, instance, args, kwargs):
def _bind_params(view, args, method):
return view, args, method
_view, _args, _method = _bind_params(*args, **kwargs)
if 'klass' in _args and not callable(_view):
if module_cornice_service.is_string(_view):
_klass = _args['klass']
_args = dict(_args)
_args['klass'] = wrapper_Resource(_view)(_klass)
return wrapped(_view, _args, _method)
# For Cornice 0.17 or older we need to fixup the fact that they do
# not copy the wrapped view attributes to the wrapper it returns.
# This is only needed where the view is not a string.
wrapper = wrapped(*args, **kwargs)
if not module_cornice_service.is_string(_view):
if wrapper.__name__ != _view.__name__:
return functools.wraps(_view)(wrapper)
return wrapper
def instrument_cornice_service(module):
global module_cornice_service
module_cornice_service = module
wrap_function_wrapper(module, 'decorate_view', wrapper_decorate_view)
| agpl-3.0 |
zachcp/qiime | qiime/quality_scores_plot.py | 9 | 6918 | #!/usr/bin/env python
# File created Sept 29, 2010
from __future__ import division
__author__ = "William Walters"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["William Walters", "Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "William Walters"
__email__ = "[email protected]"
from matplotlib import use
use('Agg', warn=False)
from skbio.parse.sequences import parse_fasta
from numpy import arange, std, average
from pylab import plot, savefig, xlabel, ylabel, text, \
hist, figure, legend, title, show, xlim, ylim, xticks, yticks,\
scatter, subplot
from matplotlib.font_manager import fontManager, FontProperties
from qiime.util import gzip_open
from qiime.parse import parse_qual_score
def bin_qual_scores(qual_scores):
""" Bins qual score according to nucleotide position
qual_scores: Dict of label: numpy array of base scores
"""
qual_bins = []
qual_lens = []
for l in qual_scores.values():
qual_lens.append(len(l))
max_seq_size = max(qual_lens)
for base_position in range(max_seq_size):
qual_bins.append([])
for scores in qual_scores.values():
# Add score if exists in base position, otherwise skip
try:
qual_bins[base_position].append(scores[base_position])
except IndexError:
continue
return qual_bins
def get_qual_stats(qual_bins, score_min):
""" Generates bins of averages, std devs, total NT from quality bins"""
ave_bins = []
std_dev_bins = []
total_bases_bins = []
found_first_poor_qual_pos = False
suggested_trunc_pos = None
for base_position in qual_bins:
total_bases_bins.append(len(base_position))
std_dev_bins.append(std(base_position))
ave_bins.append(average(base_position))
if not found_first_poor_qual_pos:
if average(base_position) < score_min:
suggested_trunc_pos = qual_bins.index(base_position)
found_first_poor_qual_pos = True
return ave_bins, std_dev_bins, total_bases_bins, suggested_trunc_pos
def plot_qual_report(ave_bins,
std_dev_bins,
total_bases_bins,
score_min,
output_dir):
""" Plots, saves graph showing quality score averages, stddev.
Additionally, the total nucleotide count for each position is shown on
a second subplot
ave_bins: list with average quality score for each base position
std_dev_bins: list with standard deviation for each base position
total_bases_bins: list with total counts of bases for each position
score_min: lowest value that a given base call can be and still be
acceptable. Used to generate a dotted line on the graph for easy assay
of the poor scoring positions.
output_dir: output directory
"""
t = arange(0, len(ave_bins), 1)
std_dev_plus = []
std_dev_minus = []
for n in range(len(ave_bins)):
std_dev_plus.append(ave_bins[n] + std_dev_bins[n])
std_dev_minus.append(ave_bins[n] - std_dev_bins[n])
figure_num = 0
f = figure(figure_num, figsize=(8, 10))
figure_title = "Quality Scores Report"
f.text(.5, .93, figure_title, horizontalalignment='center', size="large")
subplot(2, 1, 1)
plot(t, ave_bins, linewidth=2.0, color="black")
plot(t, std_dev_plus, linewidth=0.5, color="red")
dashed_line = [score_min] * len(ave_bins)
l, = plot(dashed_line, '--', color='gray')
plot(t, std_dev_minus, linewidth=0.5, color="red")
legend(
('Quality Score Average',
'Std Dev',
'Score Threshold'),
loc='lower left')
xlabel("Nucleotide Position")
ylabel("Quality Score")
subplot(2, 1, 2)
plot(t, total_bases_bins, linewidth=2.0, color="blue")
xlabel("Nucleotide Position")
ylabel("Nucleotide Counts")
outfile_name = output_dir + "/quality_scores_plot.pdf"
savefig(outfile_name)
def write_qual_report(ave_bins,
std_dev_bins,
total_bases_bins,
output_dir,
suggested_trunc_pos):
""" Writes data in bins to output text file
ave_bins: list with average quality score for each base position
std_dev_bins: list with standard deviation for each base position
total_bases_bins: list with total counts of bases for each position
output_dir: output directory
suggested_trunc_pos: Position where average quality score dropped below
the score minimum (25 by default)
"""
outfile_name = output_dir + "/quality_bins.txt"
outfile = open(outfile_name, "w")
outfile.write("# Suggested nucleotide truncation position (None if " +
"quality score average did not drop below the score minimum threshold)" +
": %s\n" % suggested_trunc_pos)
outfile.write("# Average quality score bins\n")
outfile.write(",".join(str("%2.3f" % ave) for ave in ave_bins) + "\n")
outfile.write("# Standard deviation bins\n")
outfile.write(",".join(str("%2.3f" % std) for std in std_dev_bins) + "\n")
outfile.write("# Total bases per nucleotide position bins\n")
outfile.write(",".join(str("%d" %
total_bases) for total_bases in total_bases_bins))
def generate_histogram(qual_fp,
output_dir,
score_min=25,
verbose=True,
qual_parser=parse_qual_score):
""" Main program function for generating quality score histogram
qual_fp: quality score filepath
output_dir: output directory
score_min: minimum score to be considered a reliable base call, used
to generate dotted line on histogram for easy visualization of poor
quality scores.
qual_parser : function to apply to extract quality scores
"""
if qual_fp.endswith('.gz'):
qual_lines = gzip_open(qual_fp)
else:
qual_lines = open(qual_fp, "U")
qual_scores = qual_parser(qual_lines)
# Sort bins according to base position
qual_bins = bin_qual_scores(qual_scores)
# Get average, std dev, and total nucleotide counts for each base position
ave_bins, std_dev_bins, total_bases_bins, suggested_trunc_pos =\
get_qual_stats(qual_bins, score_min)
plot_qual_report(ave_bins, std_dev_bins, total_bases_bins, score_min,
output_dir)
# Save values to output text file
write_qual_report(ave_bins, std_dev_bins, total_bases_bins, output_dir,
suggested_trunc_pos)
if verbose:
print "Suggested nucleotide truncation position (None if quality " +\
"score average did not fall below the minimum score parameter): %s\n" %\
suggested_trunc_pos
| gpl-2.0 |
CCPorg/DMD-Diamond-Ver-102-Copy | share/qt/make_spinner.py | 4415 | 1035 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
| mit |
bssrdf/zulip | zerver/lib/test_helpers.py | 113 | 12407 | from django.test import TestCase
from zerver.lib.initial_password import initial_password
from zerver.lib.db import TimeTrackingCursor
from zerver.lib import cache
from zerver.lib import event_queue
from zerver.worker import queue_processors
from zerver.lib.actions import (
check_send_message, create_stream_if_needed, do_add_subscription,
get_display_recipient, get_user_profile_by_email,
)
from zerver.models import (
resolve_email_to_domain,
Client,
Message,
Realm,
Recipient,
Stream,
Subscription,
UserMessage,
)
import base64
import os
import re
import time
import ujson
import urllib
from contextlib import contextmanager
API_KEYS = {}
@contextmanager
def stub(obj, name, f):
old_f = getattr(obj, name)
setattr(obj, name, f)
yield
setattr(obj, name, old_f)
@contextmanager
def simulated_queue_client(client):
real_SimpleQueueClient = queue_processors.SimpleQueueClient
queue_processors.SimpleQueueClient = client
yield
queue_processors.SimpleQueueClient = real_SimpleQueueClient
@contextmanager
def tornado_redirected_to_list(lst):
real_event_queue_process_notification = event_queue.process_notification
event_queue.process_notification = lst.append
yield
event_queue.process_notification = real_event_queue_process_notification
@contextmanager
def simulated_empty_cache():
cache_queries = []
def my_cache_get(key, cache_name=None):
cache_queries.append(('get', key, cache_name))
return None
def my_cache_get_many(keys, cache_name=None):
cache_queries.append(('getmany', keys, cache_name))
return None
old_get = cache.cache_get
old_get_many = cache.cache_get_many
cache.cache_get = my_cache_get
cache.cache_get_many = my_cache_get_many
yield cache_queries
cache.cache_get = old_get
cache.cache_get_many = old_get_many
@contextmanager
def queries_captured():
'''
Allow a user to capture just the queries executed during
the with statement.
'''
queries = []
def wrapper_execute(self, action, sql, params=()):
start = time.time()
try:
return action(sql, params)
finally:
stop = time.time()
duration = stop - start
queries.append({
'sql': self.mogrify(sql, params),
'time': "%.3f" % duration,
})
old_execute = TimeTrackingCursor.execute
old_executemany = TimeTrackingCursor.executemany
def cursor_execute(self, sql, params=()):
return wrapper_execute(self, super(TimeTrackingCursor, self).execute, sql, params)
TimeTrackingCursor.execute = cursor_execute
def cursor_executemany(self, sql, params=()):
return wrapper_execute(self, super(TimeTrackingCursor, self).executemany, sql, params)
TimeTrackingCursor.executemany = cursor_executemany
yield queries
TimeTrackingCursor.execute = old_execute
TimeTrackingCursor.executemany = old_executemany
def find_key_by_email(address):
from django.core.mail import outbox
key_regex = re.compile("accounts/do_confirm/([a-f0-9]{40})>")
for message in reversed(outbox):
if address in message.to:
return key_regex.search(message.body).groups()[0]
def message_ids(result):
return set(message['id'] for message in result['messages'])
def message_stream_count(user_profile):
return UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
count()
def most_recent_usermessage(user_profile):
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('-message')
return query[0] # Django does LIMIT here
def most_recent_message(user_profile):
usermessage = most_recent_usermessage(user_profile)
return usermessage.message
def get_user_messages(user_profile):
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('message')
return [um.message for um in query]
class DummyObject:
pass
class DummyTornadoRequest:
def __init__(self):
self.connection = DummyObject()
self.connection.stream = DummyStream()
class DummyHandler(object):
def __init__(self, assert_callback):
self.assert_callback = assert_callback
self.request = DummyTornadoRequest()
# Mocks RequestHandler.async_callback, which wraps a callback to
# handle exceptions. We return the callback as-is.
def async_callback(self, cb):
return cb
def write(self, response):
raise NotImplemented
def zulip_finish(self, response, *ignore):
if self.assert_callback:
self.assert_callback(response)
class DummySession(object):
session_key = "0"
class DummyStream:
def closed(self):
return False
class POSTRequestMock(object):
method = "POST"
def __init__(self, post_data, user_profile, assert_callback=None):
self.REQUEST = self.POST = post_data
self.user = user_profile
self._tornado_handler = DummyHandler(assert_callback)
self.session = DummySession()
self._log_data = {}
self.META = {'PATH_INFO': 'test'}
self._log_data = {}
class AuthedTestCase(TestCase):
# Helper because self.client.patch annoying requires you to urlencode
def client_patch(self, url, info={}, **kwargs):
info = urllib.urlencode(info)
return self.client.patch(url, info, **kwargs)
def client_put(self, url, info={}, **kwargs):
info = urllib.urlencode(info)
return self.client.put(url, info, **kwargs)
def client_delete(self, url, info={}, **kwargs):
info = urllib.urlencode(info)
return self.client.delete(url, info, **kwargs)
def login(self, email, password=None):
if password is None:
password = initial_password(email)
return self.client.post('/accounts/login/',
{'username':email, 'password':password})
def register(self, username, password, domain="zulip.com"):
self.client.post('/accounts/home/',
{'email': username + "@" + domain})
return self.submit_reg_form_for_user(username, password, domain=domain)
def submit_reg_form_for_user(self, username, password, domain="zulip.com"):
"""
Stage two of the two-step registration process.
If things are working correctly the account should be fully
registered after this call.
"""
return self.client.post('/accounts/register/',
{'full_name': username, 'password': password,
'key': find_key_by_email(username + '@' + domain),
'terms': True})
def get_api_key(self, email):
if email not in API_KEYS:
API_KEYS[email] = get_user_profile_by_email(email).api_key
return API_KEYS[email]
def api_auth(self, email):
credentials = "%s:%s" % (email, self.get_api_key(email))
return {
'HTTP_AUTHORIZATION': 'Basic ' + base64.b64encode(credentials)
}
def get_streams(self, email):
"""
Helper function to get the stream names for a user
"""
user_profile = get_user_profile_by_email(email)
subs = Subscription.objects.filter(
user_profile = user_profile,
active = True,
recipient__type = Recipient.STREAM)
return [get_display_recipient(sub.recipient) for sub in subs]
def send_message(self, sender_name, recipient_list, message_type,
content="test content", subject="test", **kwargs):
sender = get_user_profile_by_email(sender_name)
if message_type == Recipient.PERSONAL:
message_type_name = "private"
else:
message_type_name = "stream"
if isinstance(recipient_list, basestring):
recipient_list = [recipient_list]
(sending_client, _) = Client.objects.get_or_create(name="test suite")
return check_send_message(
sender, sending_client, message_type_name, recipient_list, subject,
content, forged=False, forged_timestamp=None,
forwarder_user_profile=sender, realm=sender.realm, **kwargs)
def get_old_messages(self, anchor=1, num_before=100, num_after=100):
post_params = {"anchor": anchor, "num_before": num_before,
"num_after": num_after}
result = self.client.post("/json/get_old_messages", dict(post_params))
data = ujson.loads(result.content)
return data['messages']
def users_subscribed_to_stream(self, stream_name, realm_domain):
realm = Realm.objects.get(domain=realm_domain)
stream = Stream.objects.get(name=stream_name, realm=realm)
recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
subscriptions = Subscription.objects.filter(recipient=recipient, active=True)
return [subscription.user_profile for subscription in subscriptions]
def assert_json_success(self, result):
"""
Successful POSTs return a 200 and JSON of the form {"result": "success",
"msg": ""}.
"""
self.assertEqual(result.status_code, 200, result)
json = ujson.loads(result.content)
self.assertEqual(json.get("result"), "success")
# We have a msg key for consistency with errors, but it typically has an
# empty value.
self.assertIn("msg", json)
return json
def get_json_error(self, result, status_code=400):
self.assertEqual(result.status_code, status_code)
json = ujson.loads(result.content)
self.assertEqual(json.get("result"), "error")
return json['msg']
def assert_json_error(self, result, msg, status_code=400):
"""
Invalid POSTs return an error status code and JSON of the form
{"result": "error", "msg": "reason"}.
"""
self.assertEqual(self.get_json_error(result, status_code=status_code), msg)
def assert_length(self, queries, count, exact=False):
actual_count = len(queries)
if exact:
return self.assertTrue(actual_count == count,
"len(%s) == %s, != %s" % (queries, actual_count, count))
return self.assertTrue(actual_count <= count,
"len(%s) == %s, > %s" % (queries, actual_count, count))
def assert_json_error_contains(self, result, msg_substring):
self.assertIn(msg_substring, self.get_json_error(result))
def fixture_data(self, type, action, file_type='json'):
return open(os.path.join(os.path.dirname(__file__),
"../fixtures/%s/%s_%s.%s" % (type, type, action,file_type))).read()
# Subscribe to a stream directly
def subscribe_to_stream(self, email, stream_name, realm=None):
realm = Realm.objects.get(domain=resolve_email_to_domain(email))
stream, _ = create_stream_if_needed(realm, stream_name)
user_profile = get_user_profile_by_email(email)
do_add_subscription(user_profile, stream, no_log=True)
# Subscribe to a stream by making an API request
def common_subscribe_to_streams(self, email, streams, extra_post_data = {}, invite_only=False):
post_data = {'subscriptions': ujson.dumps([{"name": stream} for stream in streams]),
'invite_only': ujson.dumps(invite_only)}
post_data.update(extra_post_data)
result = self.client.post("/api/v1/users/me/subscriptions", post_data, **self.api_auth(email))
return result
def send_json_payload(self, email, url, payload, stream_name=None, **post_params):
if stream_name != None:
self.subscribe_to_stream(email, stream_name)
result = self.client.post(url, payload, **post_params)
self.assert_json_success(result)
# Check the correct message was sent
msg = Message.objects.filter().order_by('-id')[0]
self.assertEqual(msg.sender.email, email)
self.assertEqual(get_display_recipient(msg.recipient), stream_name)
return msg
| apache-2.0 |
nrb/ansible-modules-extras | cloud/amazon/route53_zone.py | 37 | 5487 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
module: route53_zone
short_description: add or delete Route53 zones
description:
- Creates and deletes Route53 private and public zones
version_added: "2.0"
options:
zone:
description:
- "The DNS zone record (eg: foo.com.)"
required: true
state:
description:
- whether or not the zone should exist or not
required: false
default: true
choices: [ "present", "absent" ]
vpc_id:
description:
- The VPC ID the zone should be a part of (if this is going to be a private zone)
required: false
default: null
vpc_region:
description:
- The VPC Region the zone should be a part of (if this is going to be a private zone)
required: false
default: null
comment:
description:
- Comment associated with the zone
required: false
default: ''
extends_documentation_fragment: aws
author: "Christopher Troup (@minichate)"
'''
import time
try:
import boto
import boto.ec2
from boto import route53
from boto.route53 import Route53Connection
from boto.route53.zone import Zone
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def main():
module = AnsibleModule(
argument_spec=dict(
zone=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
vpc_id=dict(default=None),
vpc_region=dict(default=None),
comment=dict(default=''),
)
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
zone_in = module.params.get('zone').lower()
state = module.params.get('state').lower()
vpc_id = module.params.get('vpc_id')
vpc_region = module.params.get('vpc_region')
comment = module.params.get('comment')
private_zone = vpc_id is not None and vpc_region is not None
_, _, aws_connect_kwargs = get_aws_connection_info(module)
# connect to the route53 endpoint
try:
conn = Route53Connection(**aws_connect_kwargs)
except boto.exception.BotoServerError, e:
module.fail_json(msg=e.error_message)
results = conn.get_all_hosted_zones()
zones = {}
for r53zone in results['ListHostedZonesResponse']['HostedZones']:
zone_id = r53zone['Id'].replace('/hostedzone/', '')
zone_details = conn.get_hosted_zone(zone_id)['GetHostedZoneResponse']
if vpc_id and 'VPCs' in zone_details:
# this is to deal with this boto bug: https://github.com/boto/boto/pull/2882
if isinstance(zone_details['VPCs'], dict):
if zone_details['VPCs']['VPC']['VPCId'] == vpc_id:
zones[r53zone['Name']] = zone_id
else: # Forward compatibility for when boto fixes that bug
if vpc_id in [v['VPCId'] for v in zone_details['VPCs']]:
zones[r53zone['Name']] = zone_id
else:
zones[r53zone['Name']] = zone_id
record = {
'private_zone': private_zone,
'vpc_id': vpc_id,
'vpc_region': vpc_region,
'comment': comment,
}
if state == 'present' and zone_in in zones:
if private_zone:
details = conn.get_hosted_zone(zones[zone_in])
if 'VPCs' not in details['GetHostedZoneResponse']:
module.fail_json(
msg="Can't change VPC from public to private"
)
vpc_details = details['GetHostedZoneResponse']['VPCs']['VPC']
current_vpc_id = vpc_details['VPCId']
current_vpc_region = vpc_details['VPCRegion']
if current_vpc_id != vpc_id:
module.fail_json(
msg="Can't change VPC ID once a zone has been created"
)
if current_vpc_region != vpc_region:
module.fail_json(
msg="Can't change VPC Region once a zone has been created"
)
record['zone_id'] = zones[zone_in]
record['name'] = zone_in
module.exit_json(changed=False, set=record)
elif state == 'present':
result = conn.create_hosted_zone(zone_in, **record)
hosted_zone = result['CreateHostedZoneResponse']['HostedZone']
zone_id = hosted_zone['Id'].replace('/hostedzone/', '')
record['zone_id'] = zone_id
record['name'] = zone_in
module.exit_json(changed=True, set=record)
elif state == 'absent' and zone_in in zones:
conn.delete_hosted_zone(zones[zone_in])
module.exit_json(changed=True)
elif state == 'absent':
module.exit_json(changed=False)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
KaranToor/MA450 | google-cloud-sdk/.install/.backup/platform/ext-runtime/ruby/test/runtime_test.py | 2 | 18352 | #!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from gae_ext_runtime import testutil
RUNTIME_DEF_ROOT = os.path.dirname(os.path.dirname(__file__))
DOCKERFILE_TEXT = '''\
# This Dockerfile for a Ruby application was generated by gcloud.
# The base Dockerfile installs:
# * A number of packages needed by the Ruby runtime and by gems
# commonly used in Ruby web apps (such as libsqlite3)
# * A recent version of NodeJS
# * A recent version of the standard Ruby runtime to use by default
# * The bundler gem
FROM gcr.io/google_appengine/ruby:{base_image_tag}
# If your application requires a specific ruby version (compatible with rbenv),
# set it here. Leave blank to use the currently recommended default.
ARG REQUESTED_RUBY_VERSION="{ruby_version}"
# Install any requested ruby if not already preinstalled by the base image.
# Tries installing a prebuilt package first, then falls back to a source build.
RUN if test -n "$REQUESTED_RUBY_VERSION" -a \\
! -x /rbenv/versions/$REQUESTED_RUBY_VERSION/bin/ruby; then \\
(apt-get update -y \\
&& apt-get install -y -q gcp-ruby-$REQUESTED_RUBY_VERSION) \\
|| (cd /rbenv/plugins/ruby-build \\
&& git pull \\
&& rbenv install -s $REQUESTED_RUBY_VERSION) \\
&& rbenv global $REQUESTED_RUBY_VERSION \\
&& gem install -q --no-rdoc --no-ri bundler --version $BUNDLER_VERSION \\
&& apt-get clean \\
&& rm -f /var/lib/apt/lists/*_*; \\
fi
ENV RBENV_VERSION=${{REQUESTED_RUBY_VERSION:-$RBENV_VERSION}}
# Copy the application files.
COPY . /app/
# Install required gems if Gemfile.lock is present.
RUN if test -f Gemfile.lock; then \\
bundle install --deployment --without="development test" \\
&& rbenv rehash; \\
fi
# Temporary. Will be moved to base image later.
ENV RACK_ENV=production \\
RAILS_ENV=production \\
RAILS_SERVE_STATIC_FILES=true
# Run asset pipeline if we're in a Rails app.
RUN if test -d app/assets -a -f config/application.rb; then \\
bundle exec rake assets:precompile || true; \\
fi
# BUG: Reset entrypoint to override base image.
ENTRYPOINT []
# Start application on port $PORT.
CMD {entrypoint}
'''
class RuntimeTestCase(testutil.TestBase):
"""Tests for the Ruby external runtime fingerprinter."""
def file_contents(self, filename):
"""Reads the contents of the file from the tempdir.
Args:
filename: (str) filename to be joined with tempdir prefix.
Returns:
File contents.
"""
with open(self.full_path(filename)) as f:
return f.read()
def stub_response(self, response):
"""Stubs the console response from the user.
Args:
response: (str) stubbed response.
Returns:
A function to reset the stubbed functions to their original
implementations.
"""
can_prompt = self.exec_env.CanPrompt
prompt_response = self.exec_env.PromptResponse
def unstub():
self.exec_env.CanPrompt = can_prompt
self.exec_env.PromptResponse = prompt_response
self.exec_env.CanPrompt = lambda: True
self.exec_env.PromptResponse = lambda prompt: response
return unstub
def setUp(self):
self.runtime_def_root = RUNTIME_DEF_ROOT
super(RuntimeTestCase, self).setUp()
def test_generate_without_ruby_files(self):
self.write_file('index.html', 'index')
self.generate_configs()
self.assertFalse(os.path.exists(self.full_path('app.yaml')))
self.assertFalse(os.path.exists(self.full_path('Dockerfile')))
self.assertFalse(os.path.exists(self.full_path('.dockerignore')))
def test_generate_without_ruby_files_no_write(self):
"""Tests generate_config_data does nothing if no ruby files."""
self.write_file('index.html', 'index')
self.assertIsNone(self.generate_config_data())
self.assertFalse(os.path.exists(self.full_path('app.yaml')))
def test_generate_with_ruby_files(self):
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
unstub = self.stub_response('bundle exec rackup -p $PORT -E deployment')
self.generate_configs()
unstub()
app_yaml = self.file_contents('app.yaml')
self.assertIn('runtime: ruby\n', app_yaml)
self.assertIn('env: flex\n', app_yaml)
self.assertIn('entrypoint: bundle exec rackup -p $PORT -E deployment\n',
app_yaml)
self.assertFalse(os.path.exists(self.full_path('Dockerfile')))
self.assertFalse(os.path.exists(self.full_path('.dockerignore')))
def test_generate_with_ruby_files_no_write(self):
"""Tests generate_config_data with basic Ruby files.
Tests that app.yaml is written with correct contents given entrypoint
response, and that Dockerfile and .dockerignore not written to disk.
"""
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
unstub = self.stub_response('bundle exec rackup -p $PORT -E deployment')
cfg_files = self.generate_config_data()
unstub()
app_yaml = self.file_contents('app.yaml')
self.assertIn('runtime: ruby\n', app_yaml)
self.assertIn('env: flex\n', app_yaml)
self.assertIn('entrypoint: bundle exec rackup -p $PORT -E deployment\n',
app_yaml)
self.assertNotIn('Dockerfile', [f.filename for f in cfg_files])
self.assertNotIn('.dockerignore', [f.filename for f in cfg_files])
def test_generate_with_deploy(self):
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
self.write_file('.ruby-version', 'rbx-3.9')
unstub = self.stub_response('bundle exec rackup -p $PORT -E deployment')
self.generate_configs(deploy=True)
unstub()
dockerfile = self.file_contents('Dockerfile')
self.assertEqual(
dockerfile,
DOCKERFILE_TEXT.format(
ruby_version='rbx-3.9',
entrypoint='bundle exec rackup -p $PORT -E deployment'))
dockerignore = self.file_contents('.dockerignore')
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_deploy_no_write(self):
"""Tests generate_config_data with deploy=True.
Tests that .dockerignore and Dockerfile contents are correct
based on contents of app.
"""
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
self.write_file('.ruby-version', 'rbx-3.9')
unstub = self.stub_response('bundle exec rackup -p $PORT -E deployment')
cfg_files = self.generate_config_data(deploy=True)
unstub()
self.assert_genfile_exists_with_contents(
cfg_files,
'Dockerfile',
DOCKERFILE_TEXT.format(
ruby_version='rbx-3.9',
entrypoint='bundle exec rackup -p $PORT -E deployment'))
self.assertIn('.dockerignore', [f.filename for f in cfg_files])
dockerignore = [f.contents for f in cfg_files if
f.filename == '.dockerignore'][0]
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_custom(self):
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
unstub = self.stub_response('bundle exec rackup -p $PORT -E deployment')
self.generate_configs(custom=True)
unstub()
app_yaml = self.file_contents('app.yaml')
self.assertIn('runtime: custom\n', app_yaml)
self.assertIn('env: flex\n', app_yaml)
self.assertIn('entrypoint: bundle exec rackup -p $PORT -E deployment\n',
app_yaml)
dockerfile = self.file_contents('Dockerfile')
self.assertEqual(
dockerfile,
DOCKERFILE_TEXT.format(
ruby_version='',
entrypoint='bundle exec rackup -p $PORT -E deployment'))
dockerignore = self.file_contents('.dockerignore')
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_custom_no_write(self):
"""Tests generate_config_data with custom=True.
Tests that app.yaml is written with correct parameters and
Dockerfile, .dockerignore contents are correctly returned by method.
"""
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
unstub = self.stub_response('bundle exec rackup -p $PORT -E deployment')
cfg_files = self.generate_config_data(custom=True)
unstub()
app_yaml = self.file_contents('app.yaml')
self.assertIn('runtime: custom\n', app_yaml)
self.assertIn('env: flex\n', app_yaml)
self.assertIn('entrypoint: bundle exec rackup -p $PORT -E deployment\n',
app_yaml)
self.assert_genfile_exists_with_contents(
cfg_files,
'Dockerfile',
DOCKERFILE_TEXT.format(
ruby_version='',
entrypoint='bundle exec rackup -p $PORT -E deployment'))
self.assertIn('.dockerignore', [f.filename for f in cfg_files])
dockerignore = [f.contents for f in cfg_files if
f.filename == '.dockerignore'][0]
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_existing_appinfo(self):
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
appinfo = testutil.AppInfoFake(
entrypoint='bundle exec ruby index.rb $PORT',
runtime='ruby',
vm=True)
self.generate_configs(appinfo=appinfo, deploy=True)
self.assertFalse(os.path.exists(self.full_path('app.yaml')))
dockerfile = self.file_contents('Dockerfile')
self.assertEqual(
dockerfile,
DOCKERFILE_TEXT.format(
ruby_version='',
entrypoint='bundle exec ruby index.rb $PORT'))
dockerignore = self.file_contents('.dockerignore')
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_existing_appinfo_no_write(self):
"""Tests generate_config_data with passed appinfo."""
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
appinfo = testutil.AppInfoFake(
entrypoint='bundle exec ruby index.rb $PORT',
runtime='ruby',
vm=True)
cfg_files = self.generate_config_data(appinfo=appinfo, deploy=True)
self.assertFalse(os.path.exists(self.full_path('app.yaml')))
self.assert_genfile_exists_with_contents(
cfg_files,
'Dockerfile',
DOCKERFILE_TEXT.format(
ruby_version='',
entrypoint='bundle exec ruby index.rb $PORT'))
self.assertIn('.dockerignore', [f.filename for f in cfg_files])
dockerignore = [f.contents for f in cfg_files if
f.filename == '.dockerignore'][0]
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_ruby_version(self):
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
self.write_file('.ruby-version', '2.3.1\n')
appinfo = testutil.AppInfoFake(
entrypoint='bundle exec ruby index.rb $PORT',
runtime='ruby',
vm=True)
self.generate_configs(appinfo=appinfo, deploy=True)
self.assertFalse(os.path.exists(self.full_path('app.yaml')))
dockerfile = self.file_contents('Dockerfile')
self.assertEqual(
dockerfile,
DOCKERFILE_TEXT.format(
ruby_version='2.3.1',
entrypoint='bundle exec ruby index.rb $PORT'))
dockerignore = self.file_contents('.dockerignore')
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_ruby_version_no_write(self):
"""Tests generate_config_data with .ruby-version file."""
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
self.write_file('.ruby-version', '2.3.1\n')
appinfo = testutil.AppInfoFake(
entrypoint='bundle exec ruby index.rb $PORT',
runtime='ruby',
vm=True)
cfg_files = self.generate_config_data(appinfo=appinfo, deploy=True)
self.assertFalse(os.path.exists(self.full_path('app.yaml')))
self.assert_genfile_exists_with_contents(
cfg_files,
'Dockerfile',
DOCKERFILE_TEXT.format(
ruby_version='2.3.1',
entrypoint='bundle exec ruby index.rb $PORT'))
self.assertIn('.dockerignore', [f.filename for f in cfg_files])
dockerignore = [f.contents for f in cfg_files if
f.filename == '.dockerignore'][0]
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_prompt(self):
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
unstub = self.stub_response('bundle exec ruby index.rb $PORT')
self.generate_configs(deploy=True)
unstub()
dockerfile = self.file_contents('Dockerfile')
self.assertEqual(
dockerfile,
DOCKERFILE_TEXT.format(
ruby_version='',
entrypoint='bundle exec ruby index.rb $PORT'))
dockerignore = self.file_contents('.dockerignore')
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_prompt_no_write(self):
"""Tests generate_config_data with entrypoint given by prompt."""
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
unstub = self.stub_response('bundle exec ruby index.rb $PORT')
cfg_files = self.generate_config_data(deploy=True)
unstub()
self.assert_genfile_exists_with_contents(
cfg_files,
'Dockerfile',
DOCKERFILE_TEXT.format(
ruby_version='',
entrypoint='bundle exec ruby index.rb $PORT'))
self.assertIn('.dockerignore', [f.filename for f in cfg_files])
dockerignore = [f.contents for f in cfg_files if
f.filename == '.dockerignore'][0]
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
premanandchandrasekar/boto | boto/ec2/buyreservation.py | 56 | 3813 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto.ec2
from boto.sdb.db.property import StringProperty, IntegerProperty
from boto.manage import propget
InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge',
'c1.medium', 'c1.xlarge', 'm2.xlarge',
'm2.2xlarge', 'm2.4xlarge', 'cc1.4xlarge',
't1.micro']
class BuyReservation(object):
def get_region(self, params):
if not params.get('region', None):
prop = StringProperty(name='region', verbose_name='EC2 Region',
choices=boto.ec2.regions)
params['region'] = propget.get(prop, choices=boto.ec2.regions)
def get_instance_type(self, params):
if not params.get('instance_type', None):
prop = StringProperty(name='instance_type', verbose_name='Instance Type',
choices=InstanceTypes)
params['instance_type'] = propget.get(prop)
def get_quantity(self, params):
if not params.get('quantity', None):
prop = IntegerProperty(name='quantity', verbose_name='Number of Instances')
params['quantity'] = propget.get(prop)
def get_zone(self, params):
if not params.get('zone', None):
prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone',
choices=self.ec2.get_all_zones)
params['zone'] = propget.get(prop)
def get(self, params):
self.get_region(params)
self.ec2 = params['region'].connect()
self.get_instance_type(params)
self.get_zone(params)
self.get_quantity(params)
if __name__ == "__main__":
obj = BuyReservation()
params = {}
obj.get(params)
offerings = obj.ec2.get_all_reserved_instances_offerings(instance_type=params['instance_type'],
availability_zone=params['zone'].name)
print '\nThe following Reserved Instances Offerings are available:\n'
for offering in offerings:
offering.describe()
prop = StringProperty(name='offering', verbose_name='Offering',
choices=offerings)
offering = propget.get(prop)
print '\nYou have chosen this offering:'
offering.describe()
unit_price = float(offering.fixed_price)
total_price = unit_price * params['quantity']
print '!!! You are about to purchase %d of these offerings for a total of $%.2f !!!' % (params['quantity'], total_price)
answer = raw_input('Are you sure you want to do this? If so, enter YES: ')
if answer.strip().lower() == 'yes':
offering.purchase(params['quantity'])
else:
print 'Purchase cancelled'
| mit |
vileopratama/vitech | src/openerp/report/printscreen/ps_list.py | 48 | 11008 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import openerp
from openerp.report.interface import report_int
import openerp.tools as tools
from openerp.tools.safe_eval import safe_eval as eval
from lxml import etree
from openerp.report import render, report_sxw
import locale
import time, os
from operator import itemgetter
from datetime import datetime
class report_printscreen_list(report_int):
def __init__(self, name):
report_int.__init__(self, name)
self.context = {}
self.groupby = []
self.cr=''
def _parse_node(self, root_node):
result = []
for node in root_node:
field_name = node.get('name')
if not eval(str(node.attrib.get('invisible',False)),{'context':self.context}):
if node.tag == 'field':
if field_name in self.groupby:
continue
result.append(field_name)
else:
result.extend(self._parse_node(node))
return result
def _parse_string(self, view):
try:
dom = etree.XML(view.encode('utf-8'))
except Exception:
dom = etree.XML(view)
return self._parse_node(dom)
def create(self, cr, uid, ids, datas, context=None):
if not context:
context={}
self.cr=cr
self.context = context
self.groupby = context.get('group_by',[])
self.groupby_no_leaf = context.get('group_by_no_leaf',False)
registry = openerp.registry(cr.dbname)
model = registry[datas['model']]
model_id = registry['ir.model'].search(cr, uid, [('model','=',model._name)])
model_desc = model._description
if model_id:
model_desc = registry['ir.model'].browse(cr, uid, model_id[0], context).name
self.title = model_desc
datas['ids'] = ids
result = model.fields_view_get(cr, uid, view_type='tree', context=context)
fields_order = self.groupby + self._parse_string(result['arch'])
if self.groupby:
rows = []
def get_groupby_data(groupby = [], domain = []):
records = model.read_group(cr, uid, domain, fields_order, groupby , 0, None, context)
for rec in records:
rec['__group'] = True
rec['__no_leaf'] = self.groupby_no_leaf
rec['__grouped_by'] = groupby[0] if (isinstance(groupby, list) and groupby) else groupby
for f in fields_order:
if f not in rec:
rec.update({f:False})
elif isinstance(rec[f], tuple):
rec[f] = rec[f][1]
rows.append(rec)
inner_groupby = (rec.get('__context', {})).get('group_by',[])
inner_domain = rec.get('__domain', [])
if inner_groupby:
get_groupby_data(inner_groupby, inner_domain)
else:
if self.groupby_no_leaf:
continue
child_ids = model.search(cr, uid, inner_domain)
res = model.read(cr, uid, child_ids, result['fields'].keys(), context)
res.sort(lambda x,y: cmp(ids.index(x['id']), ids.index(y['id'])))
rows.extend(res)
dom = [('id','in',ids)]
if self.groupby_no_leaf and len(ids) and not ids[0]:
dom = datas.get('_domain',[])
get_groupby_data(self.groupby, dom)
else:
rows = model.read(cr, uid, datas['ids'], result['fields'].keys(), context)
ids2 = map(itemgetter('id'), rows) # getting the ids from read result
if datas['ids'] != ids2: # sorted ids were not taken into consideration for print screen
rows_new = []
for id in datas['ids']:
rows_new += [elem for elem in rows if elem['id'] == id]
rows = rows_new
res = self._create_table(uid, datas['ids'], result['fields'], fields_order, rows, context, model_desc)
return self.obj.get(), 'pdf'
def _create_table(self, uid, ids, fields, fields_order, results, context, title=''):
pageSize=[297.0, 210.0]
new_doc = etree.Element("report")
config = etree.SubElement(new_doc, 'config')
def _append_node(name, text):
n = etree.SubElement(config, name)
n.text = text
#_append_node('date', time.strftime('%d/%m/%Y'))
_append_node('date', time.strftime(str(locale.nl_langinfo(locale.D_FMT).replace('%y', '%Y'))))
_append_node('PageSize', '%.2fmm,%.2fmm' % tuple(pageSize))
_append_node('PageWidth', '%.2f' % (pageSize[0] * 2.8346,))
_append_node('PageHeight', '%.2f' %(pageSize[1] * 2.8346,))
_append_node('report-header', title)
registry = openerp.registry(self.cr.dbname)
_append_node('company', registry['res.users'].browse(self.cr,uid,uid).company_id.name)
rpt_obj = registry['res.users']
rml_obj=report_sxw.rml_parse(self.cr, uid, rpt_obj._name,context)
_append_node('header-date', str(rml_obj.formatLang(time.strftime("%Y-%m-%d"),date=True))+' ' + str(time.strftime("%H:%M")))
l = []
t = 0
strmax = (pageSize[0]-40) * 2.8346
temp = []
tsum = []
for i in range(0, len(fields_order)):
temp.append(0)
tsum.append(0)
ince = -1
for f in fields_order:
s = 0
ince += 1
if fields[f]['type'] in ('date','time','datetime','float','integer'):
s = 60
strmax -= s
if fields[f]['type'] in ('float','integer'):
temp[ince] = 1
else:
t += fields[f].get('size', 80) / 28 + 1
l.append(s)
for pos in range(len(l)):
if not l[pos]:
s = fields[fields_order[pos]].get('size', 80) / 28 + 1
l[pos] = strmax * s / t
_append_node('tableSize', ','.join(map(str,l)) )
header = etree.SubElement(new_doc, 'header')
for f in fields_order:
field = etree.SubElement(header, 'field')
field.text = tools.ustr(fields[f]['string'] or '')
lines = etree.SubElement(new_doc, 'lines')
for line in results:
node_line = etree.SubElement(lines, 'row')
count = -1
for f in fields_order:
float_flag = 0
count += 1
if fields[f]['type']=='many2one' and line[f]:
if not line.get('__group'):
line[f] = line[f][1]
if fields[f]['type']=='selection' and line[f]:
for key, value in fields[f]['selection']:
if key == line[f]:
line[f] = value
break
if fields[f]['type'] in ('one2many','many2many') and line[f]:
line[f] = '( '+tools.ustr(len(line[f])) + ' )'
if fields[f]['type'] == 'float' and line[f]:
precision=(('digits' in fields[f]) and fields[f]['digits'][1]) or 2
prec ='%.' + str(precision) +'f'
line[f]=prec%(line[f])
float_flag = 1
if fields[f]['type'] == 'date' and line[f]:
new_d1 = line[f]
if not line.get('__group'):
format = str(locale.nl_langinfo(locale.D_FMT).replace('%y', '%Y'))
d1 = datetime.strptime(line[f],'%Y-%m-%d')
new_d1 = d1.strftime(format)
line[f] = new_d1
if fields[f]['type'] == 'time' and line[f]:
new_d1 = line[f]
if not line.get('__group'):
format = str(locale.nl_langinfo(locale.T_FMT))
d1 = datetime.strptime(line[f], '%H:%M:%S')
new_d1 = d1.strftime(format)
line[f] = new_d1
if fields[f]['type'] == 'datetime' and line[f]:
new_d1 = line[f]
if not line.get('__group'):
format = str(locale.nl_langinfo(locale.D_FMT).replace('%y', '%Y'))+' '+str(locale.nl_langinfo(locale.T_FMT))
d1 = datetime.strptime(line[f], '%Y-%m-%d %H:%M:%S')
new_d1 = d1.strftime(format)
line[f] = new_d1
if line.get('__group'):
col = etree.SubElement(node_line, 'col', para='group', tree='no')
else:
col = etree.SubElement(node_line, 'col', para='yes', tree='no')
# Prevent empty labels in groups
if f == line.get('__grouped_by') and line.get('__group') and not line[f] and not float_flag and not temp[count]:
col.text = line[f] = 'Undefined'
col.set('tree', 'undefined')
if line[f] is not None:
col.text = tools.ustr(line[f] or '')
if float_flag:
col.set('tree','float')
if line.get('__no_leaf') and temp[count] == 1 and f != 'id' and not line['__context']['group_by']:
tsum[count] = float(tsum[count]) + float(line[f])
if not line.get('__group') and f != 'id' and temp[count] == 1:
tsum[count] = float(tsum[count]) + float(line[f])
else:
col.text = '/'
node_line = etree.SubElement(lines, 'row')
for f in range(0, len(fields_order)):
col = etree.SubElement(node_line, 'col', para='group', tree='no')
col.set('tree', 'float')
if tsum[f] is not None:
if tsum[f] != 0.0:
digits = fields[fields_order[f]].get('digits', (16, 2))
prec = '%%.%sf' % (digits[1], )
total = prec % (tsum[f], )
txt = str(total or '')
else:
txt = str(tsum[f] or '')
else:
txt = '/'
if f == 0:
txt ='Total'
col.set('tree','no')
col.text = tools.ustr(txt or '')
transform = etree.XSLT(
etree.parse(os.path.join(tools.config['root_path'],
'addons/base/report/custom_new.xsl')))
rml = etree.tostring(transform(new_doc))
self.obj = render.rml(rml, title=self.title)
self.obj.render()
return True
report_printscreen_list('report.printscreen.list')
| mit |
bigmlcom/python | bigml/tests/test_34_time_series.py | 2 | 3565 | # -*- coding: utf-8 -*-
#
# Copyright 2017-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Creating time series forecasts
"""
from .world import world, setup_module, teardown_module
from . import create_source_steps as source_create
from . import create_dataset_steps as dataset_create
from . import create_time_series_steps as time_series_create
from . import create_forecast_steps as forecast_create
class TestTimeSeries(object):
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def teardown(self):
"""
Debug information
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully creating forecasts from a dataset:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create time-series from a dataset
And I wait until the time series is ready less than <time_3> secs
And I update the time series name to "<time_series_name>"
When I wait until the time series is ready less than <time_4> secs
Then the time series name is "<time_series_name>"
And I create a forecast for "<input_data>"
Then the forecasts are "<forecast_points>"
Examples:
| data | time_1 | time_2 | time_3 | time_4 | time_series_name |input_data | forecast_points
| ../data/grades.csv | 10 | 10 | 20 | 50 | my new time_series name |
{"000005": {"horizon": 5}], {}}
"""
print(self.test_scenario1.__doc__)
examples = [
['data/grades.csv', '30', '30', '50', '50', 'my new time series name',
'{"000005": {"horizon": 5}}', '{"000005": [{"point_forecast": [73.96192, 74.04106, 74.12029, 74.1996, 74.27899], "model": "M,M,N"}]}']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
time_series_create.i_create_a_time_series(self)
time_series_create.the_time_series_is_finished_in_less_than(self, example[3])
time_series_create.i_update_time_series_name(self, example[5])
time_series_create.the_time_series_is_finished_in_less_than(self, example[4])
time_series_create.i_check_time_series_name(self, example[5])
forecast_create.i_create_a_forecast(self, example[6])
forecast_create.the_forecast_is(self, example[7])
| apache-2.0 |
shsingh/ansible | lib/ansible/modules/network/nxos/nxos_vpc_interface.py | 18 | 10331 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_vpc_interface
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages interface VPC configuration
description:
- Manages interface VPC configuration
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Either vpc or peer_link param is required, but not both.
- C(state=absent) removes whatever VPC config is on a port-channel
if one exists.
- Re-assigning a vpc or peerlink from one portchannel to another is not
supported. The module will force the user to unconfigure an existing
vpc/pl before configuring the same value on a new portchannel
options:
portchannel:
description:
- Group number of the portchannel that will be configured.
required: true
vpc:
description:
- VPC group/id that will be configured on associated portchannel.
peer_link:
description:
- Set to true/false for peer link config on associated portchannel.
type: bool
state:
description:
- Manages desired state of the resource.
required: true
choices: ['present','absent']
default: present
'''
EXAMPLES = '''
- nxos_vpc_interface:
portchannel: 10
vpc: 100
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["interface port-channel100", "vpc 10"]
'''
from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec
from ansible.module_utils.basic import AnsibleModule
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_portchannel_list(module):
portchannels = []
pc_list = []
try:
body = run_commands(module, ['show port-channel summary | json'])[0]
pc_list = body['TABLE_channel']['ROW_channel']
except (KeyError, AttributeError, TypeError):
return portchannels
if pc_list:
if isinstance(pc_list, dict):
pc_list = [pc_list]
for pc in pc_list:
portchannels.append(pc['group'])
return portchannels
def get_existing_portchannel_to_vpc_mappings(module):
pc_vpc_mapping = {}
try:
body = run_commands(module, ['show vpc brief | json'])[0]
vpc_table = body['TABLE_vpc']['ROW_vpc']
except (KeyError, AttributeError, TypeError):
vpc_table = None
if vpc_table:
if isinstance(vpc_table, dict):
vpc_table = [vpc_table]
for vpc in vpc_table:
pc_vpc_mapping[str(vpc['vpc-id'])] = str(vpc['vpc-ifindex'])
return pc_vpc_mapping
def peer_link_exists(module):
found = False
run = get_config(module, flags=['vpc'])
vpc_list = run.split('\n')
for each in vpc_list:
if 'peer-link' in each:
found = True
return found
def get_active_vpc_peer_link(module):
peer_link = None
try:
body = run_commands(module, ['show vpc brief | json'])[0]
peer_link = body['TABLE_peerlink']['ROW_peerlink']['peerlink-ifindex']
except (KeyError, AttributeError, TypeError):
return peer_link
return peer_link
def get_portchannel_vpc_config(module, portchannel):
peer_link_pc = None
peer_link = False
vpc = ""
pc = ""
config = {}
try:
body = run_commands(module, ['show vpc brief | json'])[0]
table = body['TABLE_peerlink']['ROW_peerlink']
except (KeyError, AttributeError, TypeError):
table = {}
if table:
peer_link_pc = table.get('peerlink-ifindex', None)
if peer_link_pc:
plpc = str(peer_link_pc[2:])
if portchannel == plpc:
config['portchannel'] = portchannel
config['peer-link'] = True
config['vpc'] = vpc
mapping = get_existing_portchannel_to_vpc_mappings(module)
for existing_vpc, port_channel in mapping.items():
port_ch = str(port_channel[2:])
if port_ch == portchannel:
pc = port_ch
vpc = str(existing_vpc)
config['portchannel'] = pc
config['peer-link'] = peer_link
config['vpc'] = vpc
return config
def get_commands_to_config_vpc_interface(portchannel, delta, config_value, existing):
commands = []
if not delta.get('peer-link') and existing.get('peer-link'):
commands.append('no vpc peer-link')
commands.insert(0, 'interface port-channel{0}'.format(portchannel))
elif delta.get('peer-link') and not existing.get('peer-link'):
commands.append('vpc peer-link')
commands.insert(0, 'interface port-channel{0}'.format(portchannel))
elif delta.get('vpc') and not existing.get('vpc'):
command = 'vpc {0}'.format(config_value)
commands.append(command)
commands.insert(0, 'interface port-channel{0}'.format(portchannel))
return commands
def state_present(portchannel, delta, config_value, existing):
commands = []
command = get_commands_to_config_vpc_interface(
portchannel,
delta,
config_value,
existing
)
commands.append(command)
return commands
def state_absent(portchannel, existing):
commands = []
if existing.get('vpc'):
command = 'no vpc'
commands.append(command)
elif existing.get('peer-link'):
command = 'no vpc peer-link'
commands.append(command)
if commands:
commands.insert(0, 'interface port-channel{0}'.format(portchannel))
return commands
def main():
argument_spec = dict(
portchannel=dict(required=True, type='str'),
vpc=dict(required=False, type='str'),
peer_link=dict(required=False, type='bool'),
state=dict(choices=['absent', 'present'], default='present')
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[['vpc', 'peer_link']],
supports_check_mode=True)
warnings = list()
commands = []
results = {'changed': False, 'warnings': warnings}
portchannel = module.params['portchannel']
vpc = module.params['vpc']
peer_link = module.params['peer_link']
state = module.params['state']
args = {'portchannel': portchannel, 'vpc': vpc, 'peer-link': peer_link}
active_peer_link = None
if portchannel not in get_portchannel_list(module):
if not portchannel.isdigit() or int(portchannel) not in get_portchannel_list(module):
module.fail_json(msg="The portchannel you are trying to make a"
" VPC or PL is not created yet. "
"Create it first!")
if vpc:
mapping = get_existing_portchannel_to_vpc_mappings(module)
if vpc in mapping and portchannel != mapping[vpc].strip('Po'):
module.fail_json(msg="This vpc is already configured on "
"another portchannel. Remove it first "
"before trying to assign it here. ",
existing_portchannel=mapping[vpc])
for vpcid, existing_pc in mapping.items():
if portchannel == existing_pc.strip('Po') and vpcid != vpc:
module.fail_json(msg="This portchannel already has another"
" VPC configured. Remove it first "
"before assigning this one",
existing_vpc=vpcid)
if peer_link_exists(module):
active_peer_link = get_active_vpc_peer_link(module)
if active_peer_link[-2:] == portchannel:
module.fail_json(msg="That port channel is the current "
"PEER LINK. Remove it if you want it"
" to be a VPC")
config_value = vpc
elif peer_link is not None:
if peer_link_exists(module):
active_peer_link = get_active_vpc_peer_link(module)[2::]
if active_peer_link != portchannel:
if peer_link:
module.fail_json(msg="A peer link already exists on"
" the device. Remove it first",
current_peer_link='Po{0}'.format(active_peer_link))
config_value = 'peer-link'
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_portchannel_vpc_config(module, portchannel)
if state == 'present':
delta = dict(set(proposed.items()).difference(existing.items()))
if delta:
commands = state_present(portchannel, delta, config_value, existing)
elif state == 'absent' and existing:
commands = state_absent(portchannel, existing)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
load_config(module, cmds)
results['changed'] = True
if 'configure' in cmds:
cmds.pop(0)
results['commands'] = cmds
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
QTek/QRadio | tramatego/src/tramatego/transforms/ipv4_to_score.py | 1 | 1161 | #!/usr/bin/env python
from canari.maltego.utils import debug, progress
from canari.framework import configure #, superuser
from canari.maltego.entities import IPv4Address, Phrase
from common.launchers import get_qradio_data
__author__ = 'Zappus'
__copyright__ = 'Copyright 2016, TramaTego Project'
__credits__ = []
__license__ = 'GPL'
__version__ = '0.1'
__maintainer__ = 'Zappus'
__email__ = '[email protected]'
__status__ = 'Development'
__all__ = [
'dotransform',
#'onterminate' # comment out this line if you don't need this function.
]
#@superuser
@configure(
label='IPv4 to Score',
description='Converts IPv4 into Score using QRadio.',
uuids=[ 'TramaTego.v1.IPv4ToScore' ],
inputs=[ ( 'TramaTego', IPv4Address ) ],
debug=True
)
def dotransform(request, response, config):
command = "--ipv4_to_score " + request.value
qradio_output = get_qradio_data(command, 3)
for entry in qradio_output:
response += Phrase(entry)
return response
def onterminate():
"""
TODO: Write your cleanup logic below or delete the onterminate function and remove it from the __all__ variable
"""
pass | apache-2.0 |
qifeigit/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 230 | 5234 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
| bsd-3-clause |
coinkite/connectrum | connectrum/findall.py | 1 | 4527 | #!/usr/bin/env python3
#
#
import bottom, random, time, asyncio
from .svr_info import ServerInfo
import logging
logger = logging.getLogger('connectrum')
class IrcListener(bottom.Client):
def __init__(self, irc_nickname=None, irc_password=None, ssl=True):
self.my_nick = irc_nickname or 'XC%d' % random.randint(1E11, 1E12)
self.password = irc_password or None
self.results = {} # by hostname
self.servers = set()
self.all_done = asyncio.Event()
super(IrcListener, self).__init__(host='irc.freenode.net', port=6697 if ssl else 6667, ssl=ssl)
# setup event handling
self.on('CLIENT_CONNECT', self.connected)
self.on('PING', self.keepalive)
self.on('JOIN', self.joined)
self.on('RPL_NAMREPLY', self.got_users)
self.on('RPL_WHOREPLY', self.got_who_reply)
self.on("client_disconnect", self.reconnect)
self.on('RPL_ENDOFNAMES', self.got_end_of_names)
async def collect_data(self):
# start it process
self.loop.create_task(self.connect())
# wait until done
await self.all_done.wait()
# return the results
return self.results
def connected(self, **kwargs):
logger.debug("Connected")
self.send('NICK', nick=self.my_nick)
self.send('USER', user=self.my_nick, realname='Connectrum Client')
# long delay here as it does an failing Ident probe (10 seconds min)
self.send('JOIN', channel='#electrum')
#self.send('WHO', mask='E_*')
def keepalive(self, message, **kwargs):
self.send('PONG', message=message)
async def joined(self, nick=None, **kwargs):
# happens when we or someone else joins the channel
# seem to take 10 seconds or longer for me to join
logger.debug('Joined: %r' % kwargs)
if nick != self.my_nick:
await self.add_server(nick)
async def got_who_reply(self, nick=None, real_name=None, **kws):
'''
Server replied to one of our WHO requests, with details.
'''
#logger.debug('who reply: %r' % kws)
nick = nick[2:] if nick[0:2] == 'E_' else nick
host, ports = real_name.split(' ', 1)
self.servers.remove(nick)
logger.debug("Found: '%s' at %s with port list: %s",nick, host, ports)
self.results[host.lower()] = ServerInfo(nick, host, ports)
if not self.servers:
self.all_done.set()
async def got_users(self, users=[], **kws):
# After successful join to channel, we are given a list of
# users on the channel. Happens a few times for busy channels.
logger.debug('Got %d (more) users in channel', len(users))
for nick in users:
await self.add_server(nick)
async def add_server(self, nick):
# ignore everyone but electrum servers
if nick.startswith('E_'):
self.servers.add(nick[2:])
async def who_worker(self):
# Fetch details on each Electrum server nick we see
logger.debug('who task starts')
copy = self.servers.copy()
for nn in copy:
logger.debug('do WHO for: ' + nn)
self.send('WHO', mask='E_'+nn)
logger.debug('who task done')
def got_end_of_names(self, *a, **k):
logger.debug('Got all the user names')
assert self.servers, "No one on channel!"
# ask for details on all of those users
self.loop.create_task(self.who_worker())
async def reconnect(self, **kwargs):
# Trigger an event that may cascade to a client_connect.
# Don't continue until a client_connect occurs, which may be never.
logger.warn("Disconnected (will reconnect)")
# Note that we're not in a coroutine, so we don't have access
# to await and asyncio.sleep
time.sleep(3)
# After this line we won't necessarily be connected.
# We've simply scheduled the connect to happen in the future
self.loop.create_task(self.connect())
logger.debug("Reconnect scheduled.")
if __name__ == '__main__':
import logging
logging.getLogger('bottom').setLevel(logging.DEBUG)
logging.getLogger('connectrum').setLevel(logging.DEBUG)
logging.getLogger('asyncio').setLevel(logging.DEBUG)
bot = IrcListener(ssl=False)
bot.loop.set_debug(True)
fut = bot.collect_data()
#bot.loop.create_task(bot.connect())
rv = bot.loop.run_until_complete(fut)
print(rv)
| mit |
popazerty/e2_sh4 | tools/host_tools/FormatConverter/datasource.py | 112 | 2916 | from input import inputChoices
class datasource:
def __init__(self):
self.clear()
def setDatasources(self, datasources):
self.datasources = datasources
def getCapabilities(self):
return []
def getName(self):
return "N/A"
def getStatus(self):
text = str(len(self.transponderlist.keys())) + " Satellites" + "\n"
return text
def printAll(self):
for sat in self.transponderlist.keys():
print "***********"
print "sat:", sat, self.satnames[sat]
for transponder in self.transponderlist[sat]:
print transponder
def clear(self):
self.transponderlist = {}
self.satnames = {}
def read(self):
pass
def write(self):
pass
def addSat(self, satname, satpos):
if not self.transponderlist.has_key(satpos):
self.transponderlist[satpos] = []
self.satnames[satpos] = satname
def addTransponder(self, satpos, transponder):
if len(transponder.keys()) >= 6:
self.transponderlist[satpos].append(transponder)
class genericdatasource(datasource):
def __init__(self):
datasource.__init__(self)
self.source = self.destination = None
def getName(self):
return "Generic Datasource"
def getCapabilities(self):
return [("copy data from one source to another", self.copy), ("merge data from one source into another", self.merge)]
def copy(self):
self.copymerge(action = "copy")
def merge(self):
self.copymerge(action = "merge")
def copymerge(self, action = "copy"):
choice = -1
while choice is not None:
choice = inputChoices(["select source", "select destination", "copy now!"])
if choice == 0:
print "\nselect source:"
self.source = self.selectDatasource()
elif choice == 1:
print "\nselect destination"
self.destination = self.selectDatasource()
elif choice == 2:
self.docopymerge(action)
def docopymerge(self, action = "copy"):
if self.source is None:
print "select a source first!"
elif self.destination is None:
print "select a destination first!"
else:
if action == "copy":
print "copying ",
elif action == "merge":
print "merging ",
print "from %s to %s" % (self.source.getName(), self.destination.getName())
countsat = 0
counttransponder = 0
if action == "copy":
self.destination.clear()
for satpos in self.source.transponderlist.keys():
countsat += 1
self.destination.addSat(self.source.satnames[satpos], satpos)
for transponder in self.source.transponderlist[satpos]:
counttransponder += 1
self.destination.addTransponder(satpos, transponder)
print "copied %d sats with %d transponders" % (countsat, counttransponder)
def selectDatasource(self):
list = []
sources = []
for source in self.datasources:
if source != self:
list.append(source.getName() + (" (%d sats)" % len(source.transponderlist.keys())))
sources.append(source)
choice = inputChoices(list)
if choice is None:
return None
return sources[choice] | gpl-2.0 |
wilima/cryptography | tests/test.py | 1 | 3828 | import unittest
from cryptography import (eratosthenes, euler, extended_gcd, factorization,
gcd, modular_multiplicative_inverse)
from cryptography.ciphers import affine, shift, substitution, vigener
from .context import cryptography
class GcdTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_gcd(self):
self.assertEqual(
gcd.gcd(1071, 462),
21)
def test_gcd2(self):
self.assertEqual(
gcd.gcd(270, 192),
6)
class ExtendedGcdTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_extended_gcd(self):
self.assertEqual(
extended_gcd.extended_gcd(1914, 899),
(29, 8, -17))
class ModularInverseTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_modular_inverse(self):
self.assertEqual(
modular_multiplicative_inverse.inverse(5, 26),
21)
class FactorizationTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_factorization(self):
self.assertEqual(
factorization.integer_factorization(315),
[3, 3, 5, 7])
class EratosthenesTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_eratosthenes_sieve(self):
self.assertEqual(
eratosthenes.eratosthenes_sieve(20),
[2, 3, 5, 7, 11, 13, 17, 19])
class EulerFunctionTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_euler_function(self):
self.assertEqual(
euler.euler_function(1),
1)
def test_euler_function2(self):
self.assertEqual(
euler.euler_function(5),
4)
class ShiftCipherFunctionTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_shift_encrypt_function(self):
self.assertEqual(
shift.encrypt('BARBARIUTOCI', 3),
'eduedulxwrfl'.upper())
def test_shift_decrypt_function(self):
self.assertEqual(
shift.decrypt('eduedulxwrfl', 3),
'BARBARIUTOCI')
def test_shift_crack_function(self):
self.assertEqual(
'BARBARIUTOCI' in shift.crack('eduedulxwrfl', 26),
True)
class AffineCipherFunctionTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_affine_encrypt_function(self):
self.assertEqual(
affine.encrypt('THEINITIAL', (5, 9)),
'ASDXWXAXJM')
def test_affine_decrypt_function(self):
self.assertEqual(
affine.decrypt('ASDXWXAXJM', (5, 9)),
'THEINITIAL')
def test_affine_crack_function(self):
self.assertEqual(
'THEINITIAL' in affine.crack('ASDXWXAXJM', 26),
True)
class SubstitutionCipherFunctionTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_substitution_encrypt_function(self):
self.assertEqual(
substitution.encrypt('FLEEATONCEWEAREDISCOVERED', ('ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'ZEBRASCDFGHIJKLMNOPQTUVWXY')),
'SIAAZQLKBAVAZOARFPBLUAOAR')
def test_substitution_decrypt_function(self):
self.assertEqual(
substitution.decrypt('SIAAZQLKBAVAZOARFPBLUAOAR', ('ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'ZEBRASCDFGHIJKLMNOPQTUVWXY')),
'FLEEATONCEWEAREDISCOVERED')
class VigenerCipherFunctionTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_vigener_encrypt_function(self):
self.assertEqual(
vigener.encrypt('KULTURNIATASEJESPION', 'PES'),
'ZYDIYJCMSIEKTNWHTADR')
def test_vigener_decrypt_function(self):
self.assertEqual(
vigener.decrypt('ZYDIYJCMSIEKTNWHTADR', 'PES'),
'KULTURNIATASEJESPION')
if __name__ == '__main__':
unittest.main()
| mit |
scottdangelo/RemoveVolumeMangerLocks | cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_7mode.py | 5 | 5546 | # Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for the NetApp 7mode NFS storage driver
"""
import ddt
import mock
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_utils import units
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
from cinder.tests.unit.volume.drivers.netapp import fakes as na_fakes
from cinder import utils
from cinder.volume.drivers.netapp.dataontap import nfs_7mode
from cinder.volume.drivers.netapp import utils as na_utils
@ddt.ddt
class NetApp7modeNfsDriverTestCase(test.TestCase):
def setUp(self):
super(NetApp7modeNfsDriverTestCase, self).setUp()
kwargs = {'configuration': self.get_config_7mode()}
with mock.patch.object(utils, 'get_root_helper',
return_value=mock.Mock()):
with mock.patch.object(remotefs_brick, 'RemoteFsClient',
return_value=mock.Mock()):
self.driver = nfs_7mode.NetApp7modeNfsDriver(**kwargs)
self.driver._mounted_shares = [fake.NFS_SHARE]
self.driver.ssc_vols = True
self.driver.zapi_client = mock.Mock()
def get_config_7mode(self):
config = na_fakes.create_configuration_cmode()
config.netapp_storage_protocol = 'nfs'
config.netapp_login = 'root'
config.netapp_password = 'pass'
config.netapp_server_hostname = '127.0.0.1'
config.netapp_transport_type = 'http'
config.netapp_server_port = '80'
return config
@ddt.data({'nfs_sparsed_volumes': True},
{'nfs_sparsed_volumes': False})
@ddt.unpack
def test_get_pool_stats(self, nfs_sparsed_volumes):
self.driver.configuration.nfs_sparsed_volumes = nfs_sparsed_volumes
thick = not nfs_sparsed_volumes
total_capacity_gb = na_utils.round_down(
fake.TOTAL_BYTES / units.Gi, '0.01')
free_capacity_gb = na_utils.round_down(
fake.AVAILABLE_BYTES / units.Gi, '0.01')
provisioned_capacity_gb = total_capacity_gb - free_capacity_gb
capacity = {
'reserved_percentage': fake.RESERVED_PERCENTAGE,
'max_over_subscription_ratio': fake.MAX_OVER_SUBSCRIPTION_RATIO,
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
'provisioned_capacity_gb': provisioned_capacity_gb,
}
self.mock_object(self.driver,
'_get_share_capacity_info',
mock.Mock(return_value=capacity))
result = self.driver._get_pool_stats()
expected = [{'pool_name': '192.168.99.24:/fake/export/path',
'QoS_support': False,
'thick_provisioning_support': thick,
'thin_provisioning_support': not thick,
'free_capacity_gb': 12.0,
'total_capacity_gb': 4468.0,
'reserved_percentage': 7,
'max_over_subscription_ratio': 19.0,
'provisioned_capacity_gb': 4456.0}]
self.assertEqual(expected, result)
def test_shortlist_del_eligible_files(self):
mock_get_path_for_export = self.mock_object(
self.driver.zapi_client, 'get_actual_path_for_export')
mock_get_path_for_export.return_value = fake.FLEXVOL
mock_get_file_usage = self.mock_object(
self.driver.zapi_client, 'get_file_usage')
mock_get_file_usage.return_value = fake.CAPACITY_VALUES[0]
expected = [(old_file, fake.CAPACITY_VALUES[0]) for old_file
in fake.FILE_LIST]
result = self.driver._shortlist_del_eligible_files(
fake.NFS_SHARE, fake.FILE_LIST)
self.assertEqual(expected, result)
def test_shortlist_del_eligible_files_empty_list(self):
mock_get_export_ip_path = self.mock_object(
self.driver, '_get_export_ip_path')
mock_get_export_ip_path.return_value = ('', '/export_path')
mock_get_path_for_export = self.mock_object(
self.driver.zapi_client, 'get_actual_path_for_export')
mock_get_path_for_export.return_value = fake.FLEXVOL
result = self.driver._shortlist_del_eligible_files(
fake.NFS_SHARE, [])
self.assertEqual([], result)
@ddt.data({'has_space': True, 'expected': True},
{'has_space': False, 'expected': False})
@ddt.unpack
def test_is_share_clone_compatible(self, has_space, expected):
mock_share_has_space_for_clone = self.mock_object(
self.driver, '_share_has_space_for_clone')
mock_share_has_space_for_clone.return_value = has_space
result = self.driver._is_share_clone_compatible(fake.VOLUME,
fake.NFS_SHARE)
self.assertEqual(expected, result)
| apache-2.0 |
mvesper/invenio | modules/websubmit/lib/functions/Test_Status.py | 3 | 3087 | # This file is part of Invenio.
# Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
## Description: function Test_Status
## This function checks whether the document is still waiting
## for approval or not.
## Author: T.Baron
##
## PARAMETERS: -
from invenio.dbquery import run_sql
from invenio.websubmit_config import InvenioWebSubmitFunctionStop
def Test_Status(parameters, curdir, form, user_info=None):
"""
This function checks whether the considered document has been
requested for approval and is still waiting for approval. It also
checks whether the password stored in file 'password' of the
submission directory corresponds to the password associated with
the document.
"""
global rn
res = run_sql("SELECT status, access FROM sbmAPPROVAL WHERE rn=%s", (rn,))
if len(res) == 0:
raise InvenioWebSubmitFunctionStop(printNotRequested(rn))
else:
if res[0][0] == "approved":
raise InvenioWebSubmitFunctionStop(printApproved(rn))
elif res[0][0] == "rejected":
raise InvenioWebSubmitFunctionStop(printRejected(rn))
return ""
def printNotRequested(rn):
t="""
<SCRIPT>
document.forms[0].action="/submit";
document.forms[0].curpage.value = 1;
document.forms[0].step.value = 0;
user_must_confirm_before_leaving_page = false;
alert('The document %s has never been asked for approval.\\nAnyway, you can still choose another document if you wish.');
document.forms[0].submit();
</SCRIPT>""" % rn
return t
def printApproved(rn):
t="""
<SCRIPT>
document.forms[0].action="/submit";
document.forms[0].curpage.value = 1;
document.forms[0].step.value = 0;
user_must_confirm_before_leaving_page = false;
alert('The document %s has already been approved.\\nAnyway, you can still choose another document if you wish.');
document.forms[0].submit();
</SCRIPT>""" % rn
return t
def printRejected(rn):
t="""
<SCRIPT>
document.forms[0].action="/submit";
document.forms[0].curpage.value = 1;
document.forms[0].step.value = 0;
user_must_confirm_before_leaving_page = false;
alert('The document %s has already been rejected.\\nAnyway, you can still choose another document if you wish.');
document.forms[0].submit();
</SCRIPT>""" % rn
return t
| gpl-2.0 |
librasungirl/openthread | tools/harness-automation/cases_R140/leader_9_2_4.py | 18 | 1877 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class Leader_9_2_4(HarnessCase):
role = HarnessCase.ROLE_LEADER
case = '9 2 4'
golden_devices_required = 1
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
barbagroup/PetIBM | examples/ibpm/cylinder2dRe40/scripts/plotVorticity.py | 4 | 1401 | """
Computes, plots, and saves the 2D vorticity field from a PetIBM simulation
after 2000 time steps (20 non-dimensional time-units).
"""
import pathlib
import h5py
import numpy
from matplotlib import pyplot
simu_dir = pathlib.Path(__file__).absolute().parents[1]
data_dir = simu_dir / 'output'
# Read vorticity field and its grid from files.
name = 'wz'
filepath = data_dir / 'grid.h5'
f = h5py.File(filepath, 'r')
x, y = f[name]['x'][:], f[name]['y'][:]
X, Y = numpy.meshgrid(x, y)
timestep = 2000
filepath = data_dir / '{:0>7}.h5'.format(timestep)
f = h5py.File(filepath, 'r')
wz = f[name][:]
# Read body coordinates from file.
filepath = simu_dir / 'circle.body'
with open(filepath, 'r') as infile:
xb, yb = numpy.loadtxt(infile, dtype=numpy.float64,
unpack=True, skiprows=1)
pyplot.rc('font', family='serif', size=16)
# Plot the filled contour of the vorticity.
fig, ax = pyplot.subplots(figsize=(6.0, 6.0))
ax.grid()
ax.set_xlabel('x')
ax.set_ylabel('y')
levels = numpy.linspace(-3.0, 3.0, 16)
ax.contour(X, Y, wz, levels=levels, colors='black')
ax.plot(xb, yb, color='red')
ax.set_xlim(-1.0, 4.0)
ax.set_ylim(-2.0, 2.0)
ax.set_aspect('equal')
fig.tight_layout()
pyplot.show()
# Save figure.
fig_dir = simu_dir / 'figures'
fig_dir.mkdir(parents=True, exist_ok=True)
filepath = fig_dir / 'wz{:0>7}.png'.format(timestep)
fig.savefig(str(filepath), dpi=300)
| bsd-3-clause |
bitcity/django | tests/defer_regress/models.py | 282 | 2692 | """
Regression tests for defer() / only() behavior.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Item(models.Model):
name = models.CharField(max_length=15)
text = models.TextField(default="xyzzy")
value = models.IntegerField()
other_value = models.IntegerField(default=0)
def __str__(self):
return self.name
class RelatedItem(models.Model):
item = models.ForeignKey(Item, models.CASCADE)
class ProxyRelated(RelatedItem):
class Meta:
proxy = True
class Child(models.Model):
name = models.CharField(max_length=10)
value = models.IntegerField()
@python_2_unicode_compatible
class Leaf(models.Model):
name = models.CharField(max_length=10)
child = models.ForeignKey(Child, models.CASCADE)
second_child = models.ForeignKey(Child, models.SET_NULL, related_name="other", null=True)
value = models.IntegerField(default=42)
def __str__(self):
return self.name
class ResolveThis(models.Model):
num = models.FloatField()
name = models.CharField(max_length=16)
class Proxy(Item):
class Meta:
proxy = True
@python_2_unicode_compatible
class SimpleItem(models.Model):
name = models.CharField(max_length=15)
value = models.IntegerField()
def __str__(self):
return self.name
class Feature(models.Model):
item = models.ForeignKey(SimpleItem, models.CASCADE)
class SpecialFeature(models.Model):
feature = models.ForeignKey(Feature, models.CASCADE)
class OneToOneItem(models.Model):
item = models.OneToOneField(Item, models.CASCADE, related_name="one_to_one_item")
name = models.CharField(max_length=15)
class ItemAndSimpleItem(models.Model):
item = models.ForeignKey(Item, models.CASCADE)
simple = models.ForeignKey(SimpleItem, models.CASCADE)
class Profile(models.Model):
profile1 = models.CharField(max_length=1000, default='profile1')
class Location(models.Model):
location1 = models.CharField(max_length=1000, default='location1')
class Request(models.Model):
profile = models.ForeignKey(Profile, models.SET_NULL, null=True, blank=True)
location = models.ForeignKey(Location, models.CASCADE)
items = models.ManyToManyField(Item)
request1 = models.CharField(default='request1', max_length=1000)
request2 = models.CharField(default='request2', max_length=1000)
request3 = models.CharField(default='request3', max_length=1000)
request4 = models.CharField(default='request4', max_length=1000)
class Base(models.Model):
text = models.TextField()
class Derived(Base):
other_text = models.TextField()
| bsd-3-clause |
zstyblik/infernal-twin | sql_insert.py | 1 | 3025 | import MySQLdb
import db_connect_creds
from datetime import datetime
username, password = db_connect_creds.read_creds()
cxn = MySQLdb.connect('localhost', user=username, passwd=password)
date = datetime.now()
cxn.query('CREATE DATABASE IF NOT EXISTS InfernalWireless')
cxn.commit()
cxn.close()
cxn = MySQLdb.connect(db='InfernalWireless')
cur = cxn.cursor()
current_project_id = 0
#~ cxn = MySQLdb.connect('localhost','root',"")
#~
#~ date = datetime.now()
#~
#~
#~ cxn.query('CREATE DATABASE IF NOT EXISTS InfernalWireless')
#~
#~ cxn.commit()
#~ cxn.close()
#~
#~ cxn = MySQLdb.connect(db='InfernalWireless')
#~
#~ cur = cxn.cursor()
#~
#~ current_project_id = 0
def create_project_table():
##############3333 THIS IS GOING TO CRAETE A TABLE FOR PROJECT
#~ cur.execute("CREATE TABLE mytable (id AUTO_INCREMENT")
PROJECT_TITLE = '''CREATE TABLE IF NOT EXISTS Projects (
ProjectId MEDIUMINT NOT NULL AUTO_INCREMENT, ProjectName TEXT, PRIMARY KEY (ProjectId), AuditorName TEXT, TargetName TEXT, date TEXT)'''
cur.execute(PROJECT_TITLE)
create_project_table()
def project_details(projectname, Authors_name, TargetName, date):
PROJECT_DETAILS = 'INSERT INTO Projects (ProjectName, AuditorName, TargetName, date) VALUES ("%s","%s","%s","%s")'%(projectname, Authors_name, TargetName, date)
cur.execute(PROJECT_DETAILS)
current_project_id_tmp = cur.lastrowid
current_project_id = current_project_id_tmp
print "report is generated"
return current_project_id_tmp
def create_report_table():
##############3333 THIS IS GOING TO CRAETE A TABLE FOR PROJECT
report_table = '''CREATE TABLE IF NOT EXISTS Reports (findingID MEDIUMINT NOT NULL AUTO_INCREMENT, finding_name TEXT, phase TEXT, PRIMARY KEY (findingID), risk_level TEXT, risk_category TEXT, Findings_detail TEXT, Notes TEXT, Project_fk_Id MEDIUMINT, FOREIGN KEY (Project_fk_Id) REFERENCES Projects (ProjectId))'''
cur.execute(report_table)
create_report_table()
def create_report(self, finding_name, phase, risk_level, risk_category, Findings_detail, Notes, Project_fk_Id):
########## THIS IS GOING TO INSERT DATA INTO FINDINGS TABLE
pID = current_project_id
REPORT_DETAILS = 'INSERT INTO Reports (finding_name, phase, risk_level, risk_category, Findings_detail, Notes, Project_fk_Id) VALUES ("%s","%s","%s","%s","%s","%s","%s")'%( finding_name, phase, risk_level, risk_category, Findings_detail, Notes, Project_fk_Id)
cur.execute(REPORT_DETAILS)
print pID
def print_hello(test_data):
print test_data
################ DB POPULATE DATABASE ###########
#~ prID = project_details('test','est','23s','12/12/12')
#~
#~ create_report('Title of the finding','Choose a phase','Choose a category','Choose risk level','Enter the findings details','Notes on the findings',int(prID))
################################################################### DUMMY DATABASE QUERIES ##############
#~ print type(prID)
cur.close()
cxn.commit()
cxn.close()
print "DB has been updated"
| gpl-3.0 |
javierag/samba | python/samba/tests/__init__.py | 3 | 8238 | # Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <[email protected]> 2007-2010
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Samba Python tests."""
import os
import ldb
import samba
import samba.auth
from samba import param
from samba.samdb import SamDB
import subprocess
import tempfile
samba.ensure_external_module("mimeparse", "mimeparse")
samba.ensure_external_module("extras", "extras")
samba.ensure_external_module("testtools", "testtools")
# Other modules import these two classes from here, for convenience:
from testtools.testcase import (
TestCase as TesttoolsTestCase,
TestSkipped,
)
class TestCase(TesttoolsTestCase):
"""A Samba test case."""
def setUp(self):
super(TestCase, self).setUp()
test_debug_level = os.getenv("TEST_DEBUG_LEVEL")
if test_debug_level is not None:
test_debug_level = int(test_debug_level)
self._old_debug_level = samba.get_debug_level()
samba.set_debug_level(test_debug_level)
self.addCleanup(samba.set_debug_level, test_debug_level)
def get_loadparm(self):
return env_loadparm()
def get_credentials(self):
return cmdline_credentials
class LdbTestCase(TesttoolsTestCase):
"""Trivial test case for running tests against a LDB."""
def setUp(self):
super(LdbTestCase, self).setUp()
self.filename = os.tempnam()
self.ldb = samba.Ldb(self.filename)
def set_modules(self, modules=[]):
"""Change the modules for this Ldb."""
m = ldb.Message()
m.dn = ldb.Dn(self.ldb, "@MODULES")
m["@LIST"] = ",".join(modules)
self.ldb.add(m)
self.ldb = samba.Ldb(self.filename)
class TestCaseInTempDir(TestCase):
def setUp(self):
super(TestCaseInTempDir, self).setUp()
self.tempdir = tempfile.mkdtemp()
self.addCleanup(self._remove_tempdir)
def _remove_tempdir(self):
self.assertEquals([], os.listdir(self.tempdir))
os.rmdir(self.tempdir)
self.tempdir = None
def env_loadparm():
lp = param.LoadParm()
try:
lp.load(os.environ["SMB_CONF_PATH"])
except KeyError:
raise KeyError("SMB_CONF_PATH not set")
return lp
def env_get_var_value(var_name):
"""Returns value for variable in os.environ
Function throws AssertionError if variable is defined.
Unit-test based python tests require certain input params
to be set in environment, otherwise they can't be run
"""
assert var_name in os.environ.keys(), "Please supply %s in environment" % var_name
return os.environ[var_name]
cmdline_credentials = None
class RpcInterfaceTestCase(TestCase):
"""DCE/RPC Test case."""
class ValidNetbiosNameTests(TestCase):
def test_valid(self):
self.assertTrue(samba.valid_netbios_name("FOO"))
def test_too_long(self):
self.assertFalse(samba.valid_netbios_name("FOO"*10))
def test_invalid_characters(self):
self.assertFalse(samba.valid_netbios_name("*BLA"))
class BlackboxProcessError(Exception):
"""This is raised when check_output() process returns a non-zero exit status
Exception instance should contain the exact exit code (S.returncode),
command line (S.cmd), process output (S.stdout) and process error stream
(S.stderr)
"""
def __init__(self, returncode, cmd, stdout, stderr):
self.returncode = returncode
self.cmd = cmd
self.stdout = stdout
self.stderr = stderr
def __str__(self):
return "Command '%s'; exit status %d; stdout: '%s'; stderr: '%s'" % (self.cmd, self.returncode,
self.stdout, self.stderr)
class BlackboxTestCase(TestCase):
"""Base test case for blackbox tests."""
def _make_cmdline(self, line):
bindir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../../bin"))
parts = line.split(" ")
if os.path.exists(os.path.join(bindir, parts[0])):
parts[0] = os.path.join(bindir, parts[0])
line = " ".join(parts)
return line
def check_run(self, line):
line = self._make_cmdline(line)
p = subprocess.Popen(line, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
retcode = p.wait()
if retcode:
raise BlackboxProcessError(retcode, line, p.stdout.read(), p.stderr.read())
def check_output(self, line):
line = self._make_cmdline(line)
p = subprocess.Popen(line, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, close_fds=True)
retcode = p.wait()
if retcode:
raise BlackboxProcessError(retcode, line, p.stdout.read(), p.stderr.read())
return p.stdout.read()
def connect_samdb(samdb_url, lp=None, session_info=None, credentials=None,
flags=0, ldb_options=None, ldap_only=False, global_schema=True):
"""Create SamDB instance and connects to samdb_url database.
:param samdb_url: Url for database to connect to.
:param lp: Optional loadparm object
:param session_info: Optional session information
:param credentials: Optional credentials, defaults to anonymous.
:param flags: Optional LDB flags
:param ldap_only: If set, only remote LDAP connection will be created.
:param global_schema: Whether to use global schema.
Added value for tests is that we have a shorthand function
to make proper URL for ldb.connect() while using default
parameters for connection based on test environment
"""
samdb_url = samdb_url.lower()
if not "://" in samdb_url:
if not ldap_only and os.path.isfile(samdb_url):
samdb_url = "tdb://%s" % samdb_url
else:
samdb_url = "ldap://%s" % samdb_url
# use 'paged_search' module when connecting remotely
if samdb_url.startswith("ldap://"):
ldb_options = ["modules:paged_searches"]
elif ldap_only:
raise AssertionError("Trying to connect to %s while remote "
"connection is required" % samdb_url)
# set defaults for test environment
if lp is None:
lp = env_loadparm()
if session_info is None:
session_info = samba.auth.system_session(lp)
if credentials is None:
credentials = cmdline_credentials
return SamDB(url=samdb_url,
lp=lp,
session_info=session_info,
credentials=credentials,
flags=flags,
options=ldb_options,
global_schema=global_schema)
def connect_samdb_ex(samdb_url, lp=None, session_info=None, credentials=None,
flags=0, ldb_options=None, ldap_only=False):
"""Connects to samdb_url database
:param samdb_url: Url for database to connect to.
:param lp: Optional loadparm object
:param session_info: Optional session information
:param credentials: Optional credentials, defaults to anonymous.
:param flags: Optional LDB flags
:param ldap_only: If set, only remote LDAP connection will be created.
:return: (sam_db_connection, rootDse_record) tuple
"""
sam_db = connect_samdb(samdb_url, lp, session_info, credentials,
flags, ldb_options, ldap_only)
# fetch RootDse
res = sam_db.search(base="", expression="", scope=ldb.SCOPE_BASE,
attrs=["*"])
return (sam_db, res[0])
def delete_force(samdb, dn):
try:
samdb.delete(dn)
except ldb.LdbError, (num, _):
assert(num == ldb.ERR_NO_SUCH_OBJECT)
| gpl-3.0 |
martinbuc/missionplanner | packages/IronPython.StdLib.2.7.4/content/Lib/rlcompleter.py | 61 | 6036 | """Word completion for GNU readline 2.0.
This requires the latest extension to the readline module. The completer
completes keywords, built-ins and globals in a selectable namespace (which
defaults to __main__); when completing NAME.NAME..., it evaluates (!) the
expression up to the last dot and completes its attributes.
It's very cool to do "import sys" type "sys.", hit the
completion key (twice), and see the list of names defined by the
sys module!
Tip: to use the tab key as the completion key, call
readline.parse_and_bind("tab: complete")
Notes:
- Exceptions raised by the completer function are *ignored* (and
generally cause the completion to fail). This is a feature -- since
readline sets the tty device in raw (or cbreak) mode, printing a
traceback wouldn't work well without some complicated hoopla to save,
reset and restore the tty state.
- The evaluation of the NAME.NAME... form may cause arbitrary
application defined code to be executed if an object with a
__getattr__ hook is found. Since it is the responsibility of the
application (or the user) to enable this feature, I consider this an
acceptable risk. More complicated expressions (e.g. function calls or
indexing operations) are *not* evaluated.
- GNU readline is also used by the built-in functions input() and
raw_input(), and thus these also benefit/suffer from the completer
features. Clearly an interactive application can benefit by
specifying its own completer function and using raw_input() for all
its input.
- When the original stdin is not a tty device, GNU readline is never
used, and this module (and the readline module) are silently inactive.
"""
import __builtin__
import __main__
__all__ = ["Completer"]
class Completer:
def __init__(self, namespace = None):
"""Create a new completer for the command line.
Completer([namespace]) -> completer instance.
If unspecified, the default namespace where completions are performed
is __main__ (technically, __main__.__dict__). Namespaces should be
given as dictionaries.
Completer instances should be used as the completion mechanism of
readline via the set_completer() call:
readline.set_completer(Completer(my_namespace).complete)
"""
if namespace and not isinstance(namespace, dict):
raise TypeError,'namespace must be a dictionary'
# Don't bind to namespace quite yet, but flag whether the user wants a
# specific namespace or to use __main__.__dict__. This will allow us
# to bind to __main__.__dict__ at completion time, not now.
if namespace is None:
self.use_main_ns = 1
else:
self.use_main_ns = 0
self.namespace = namespace
def complete(self, text, state):
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if self.use_main_ns:
self.namespace = __main__.__dict__
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None
def _callable_postfix(self, val, word):
if hasattr(val, '__call__'):
word = word + "("
return word
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace that match.
"""
import keyword
matches = []
n = len(text)
for word in keyword.kwlist:
if word[:n] == text:
matches.append(word)
for nspace in [__builtin__.__dict__, self.namespace]:
for word, val in nspace.items():
if word[:n] == text and word != "__builtins__":
matches.append(self._callable_postfix(val, word))
return matches
def attr_matches(self, text):
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluatable in self.namespace, it will be evaluated and its attributes
(as revealed by dir()) are used as possible completions. (For class
instances, class members are also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
import re
m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
if not m:
return []
expr, attr = m.group(1, 3)
try:
thisobject = eval(expr, self.namespace)
except Exception:
return []
# get the content of the object, except __builtins__
words = dir(thisobject)
if "__builtins__" in words:
words.remove("__builtins__")
if hasattr(thisobject, '__class__'):
words.append('__class__')
words.extend(get_class_members(thisobject.__class__))
matches = []
n = len(attr)
for word in words:
if word[:n] == attr and hasattr(thisobject, word):
val = getattr(thisobject, word)
word = self._callable_postfix(val, "%s.%s" % (expr, word))
matches.append(word)
return matches
def get_class_members(klass):
ret = dir(klass)
if hasattr(klass,'__bases__'):
for base in klass.__bases__:
ret = ret + get_class_members(base)
return ret
try:
import readline
except ImportError:
pass
else:
readline.set_completer(Completer().complete)
| gpl-3.0 |
jonyroda97/redbot-amigosprovaveis | lib/matplotlib/units.py | 2 | 6084 | """
The classes here provide support for using custom classes with
matplotlib, e.g., those that do not expose the array interface but know
how to convert themselves to arrays. It also supports classes with
units and units conversion. Use cases include converters for custom
objects, e.g., a list of datetime objects, as well as for objects that
are unit aware. We don't assume any particular units implementation;
rather a units implementation must provide the register with the Registry
converter dictionary and a ConversionInterface. For example,
here is a complete implementation which supports plotting with native
datetime objects::
import matplotlib.units as units
import matplotlib.dates as dates
import matplotlib.ticker as ticker
import datetime
class DateConverter(units.ConversionInterface):
@staticmethod
def convert(value, unit, axis):
'convert value to a scalar or array'
return dates.date2num(value)
@staticmethod
def axisinfo(unit, axis):
'return major and minor tick locators and formatters'
if unit!='date': return None
majloc = dates.AutoDateLocator()
majfmt = dates.AutoDateFormatter(majloc)
return AxisInfo(majloc=majloc,
majfmt=majfmt,
label='date')
@staticmethod
def default_units(x, axis):
'return the default unit for x or None'
return 'date'
# finally we register our object type with a converter
units.registry[datetime.date] = DateConverter()
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib.cbook import iterable, is_numlike, safe_first_element
import numpy as np
class AxisInfo(object):
"""information to support default axis labeling and tick labeling, and
default limits"""
def __init__(self, majloc=None, minloc=None,
majfmt=None, minfmt=None, label=None,
default_limits=None):
"""
majloc and minloc: TickLocators for the major and minor ticks
majfmt and minfmt: TickFormatters for the major and minor ticks
label: the default axis label
default_limits: the default min, max of the axis if no data is present
If any of the above are None, the axis will simply use the default
"""
self.majloc = majloc
self.minloc = minloc
self.majfmt = majfmt
self.minfmt = minfmt
self.label = label
self.default_limits = default_limits
class ConversionInterface(object):
"""
The minimal interface for a converter to take custom instances (or
sequences) and convert them to values mpl can use
"""
@staticmethod
def axisinfo(unit, axis):
'return an units.AxisInfo instance for axis with the specified units'
return None
@staticmethod
def default_units(x, axis):
'return the default unit for x or None for the given axis'
return None
@staticmethod
def convert(obj, unit, axis):
"""
convert obj using unit for the specified axis. If obj is a sequence,
return the converted sequence. The output must be a sequence of
scalars that can be used by the numpy array layer
"""
return obj
@staticmethod
def is_numlike(x):
"""
The matplotlib datalim, autoscaling, locators etc work with
scalars which are the units converted to floats given the
current unit. The converter may be passed these floats, or
arrays of them, even when units are set. Derived conversion
interfaces may opt to pass plain-ol unitless numbers through
the conversion interface and this is a helper function for
them.
"""
if iterable(x):
for thisx in x:
return is_numlike(thisx)
else:
return is_numlike(x)
class Registry(dict):
"""
register types with conversion interface
"""
def __init__(self):
dict.__init__(self)
self._cached = {}
def get_converter(self, x):
'get the converter interface instance for x, or None'
if not len(self):
return None # nothing registered
# DISABLED idx = id(x)
# DISABLED cached = self._cached.get(idx)
# DISABLED if cached is not None: return cached
converter = None
classx = getattr(x, '__class__', None)
if classx is not None:
converter = self.get(classx)
if isinstance(x, np.ndarray) and x.size:
xravel = x.ravel()
try:
# pass the first value of x that is not masked back to
# get_converter
if not np.all(xravel.mask):
# some elements are not masked
converter = self.get_converter(
xravel[np.argmin(xravel.mask)])
return converter
except AttributeError:
# not a masked_array
# Make sure we don't recurse forever -- it's possible for
# ndarray subclasses to continue to return subclasses and
# not ever return a non-subclass for a single element.
next_item = xravel[0]
if (not isinstance(next_item, np.ndarray) or
next_item.shape != x.shape):
converter = self.get_converter(next_item)
return converter
if converter is None:
try:
thisx = safe_first_element(x)
except (TypeError, StopIteration):
pass
else:
if classx and classx != getattr(thisx, '__class__', None):
converter = self.get_converter(thisx)
return converter
# DISABLED self._cached[idx] = converter
return converter
registry = Registry()
| gpl-3.0 |
dkerwin/ansible-modules-core | network/cumulus/cl_bond.py | 5 | 15552 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <[email protected]>
#
# This file is part of Ansible
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cl_bond
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configures a bond port on Cumulus Linux
description:
- Configures a bond interface on Cumulus Linux To configure a bridge port
use the cl_bridge module. To configure any other type of interface use the
cl_interface module. Follow the guidelines for bonding found in the
Cumulus User Guide at http://docs.cumulusnetworks.com
options:
name:
description:
- name of the interface
required: true
alias_name:
description:
- add a port description
ipv4:
description:
- list of IPv4 addresses to configure on the interface.
use X.X.X.X/YY syntax.
ipv6:
description:
- list of IPv6 addresses to configure on the interface.
use X:X:X::X/YYY syntax
addr_method:
description:
- configures the port to use DHCP.
To enable this feature use the option 'dhcp'
choices: ['dhcp']
mtu:
description:
- set MTU. Configure Jumbo Frame by setting MTU to 9000.
virtual_ip:
description:
- define IPv4 virtual IP used by the Cumulus Linux VRR feature
virtual_mac:
description:
- define Ethernet mac associated with Cumulus Linux VRR feature
vids:
description:
- in vlan aware mode, lists vlans defined under the interface
mstpctl_bpduguard:
description:
- Enables BPDU Guard on a port in vlan-aware mode
mstpctl_portnetwork:
description:
- Enables bridge assurance in vlan-aware mode
mstpctl_portadminedge:
description:
- Enables admin edge port
clag_id:
description:
- specify a unique clag_id for every dual connected bond on each
peer switch. The value must be between 1 and 65535 and must be the
same on both peer switches in order for the bond to be considered
dual-connected
pvid:
description:
- in vlan aware mode, defines vlan that is the untagged vlan
miimon:
description:
- mii link monitoring interval
default: 100
mode:
description:
- bond mode. as of Cumulus Linux 2.5 only LACP bond mode is
supported
default: '802.3ad'
min_links:
description:
- minimum number of links
default: 1
lacp_bypass_allow:
description:
- Enable LACP bypass.
lacp_bypass_period:
description:
- Period for enabling LACP bypass. Max value is 900.
lacp_bypass_priority:
description:
- List of ports and priorities. Example "swp1=10, swp2=20"
lacp_bypass_all_active:
description:
- Activate all interfaces for bypass.
It is recommended to configure all_active instead
of using bypass_priority.
lacp_rate:
description:
- lacp rate
default: 1
slaves:
description:
- bond members
required: True
xmit_hash_policy:
description:
- transmit load balancing algorithm. As of Cumulus Linux 2.5 only
layer3+4 policy is supported
default: layer3+4
location:
description:
- interface directory location
default:
- /etc/network/interfaces.d
requirements: [ Alternate Debian network interface manager - \
ifupdown2 @ github.com/CumulusNetworks/ifupdown2 ]
notes:
- because the module writes the interface directory location. Ensure that
``/etc/network/interfaces`` has a 'source /etc/network/interfaces.d/*' or
whatever path is mentioned in the ``location`` attribute.
- For the config to be activated, i.e installed in the kernel,
"service networking reload" needs be be executed. See EXAMPLES section.
'''
EXAMPLES = '''
# Options ['virtual_mac', 'virtual_ip'] are required together
# configure a bond interface with IP address
cl_bond: name=bond0 slaves="swp4-5" ipv4=10.1.1.1/24
notify: reload networking
# configure bond as a dual-connected clag bond
cl_bond: name=bond1 slaves="swp1s0 swp2s0" clag_id=1
notify: reload networking
# define cl_bond once in tasks file
# then write inteface config in variables file
# with just the options you want.
cl_bond:
name: "{{ item.key }}"
slaves: "{{ item.value.slaves }}"
clag_id: "{{ item.value.clag_id|default(omit) }}"
ipv4: "{{ item.value.ipv4|default(omit) }}"
ipv6: "{{ item.value.ipv6|default(omit) }}"
alias_name: "{{ item.value.alias_name|default(omit) }}"
addr_method: "{{ item.value.addr_method|default(omit) }}"
mtu: "{{ item.value.mtu|default(omit) }}"
vids: "{{ item.value.vids|default(omit) }}"
virtual_ip: "{{ item.value.virtual_ip|default(omit) }}"
virtual_mac: "{{ item.value.virtual_mac|default(omit) }}"
mstpctl_portnetwork: "{{ item.value.mstpctl_portnetwork|default('no') }}"
mstpctl_portadminedge: "{{ item.value.mstpctl_portadminedge|default('no') }}"
mstpctl_bpduguard: "{{ item.value.mstpctl_bpduguard|default('no') }}"
with_dict: cl_bonds
notify: reload networking
# In vars file
# ============
cl_bonds:
bond0:
alias_name: 'uplink to isp'
slaves: ['swp1', 'swp3']
ipv4: '10.1.1.1/24'
bond2:
vids: [1, 50]
clag_id: 1
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
returned: always
type: string
sample: "interface bond0 config updated"
'''
# handy helper for calling system calls.
# calls AnsibleModule.run_command and prints a more appropriate message
# exec_path - path to file to execute, with all its arguments.
# E.g "/sbin/ip -o link show"
# failure_msg - what message to print on failure
def run_cmd(module, exec_path):
(_rc, out, _err) = module.run_command(exec_path)
if _rc > 0:
if re.search('cannot find interface', _err):
return '[{}]'
failure_msg = "Failed; %s Error: %s" % (exec_path, _err)
module.fail_json(msg=failure_msg)
else:
return out
def current_iface_config(module):
# due to a bug in ifquery, have to check for presence of interface file
# and not rely solely on ifquery. when bug is fixed, this check can be
# removed
_ifacename = module.params.get('name')
_int_dir = module.params.get('location')
module.custom_current_config = {}
if os.path.exists(_int_dir + '/' + _ifacename):
_cmd = "/sbin/ifquery -o json %s" % (module.params.get('name'))
module.custom_current_config = module.from_json(
run_cmd(module, _cmd))[0]
def build_address(module):
# if addr_method == 'dhcp', dont add IP address
if module.params.get('addr_method') == 'dhcp':
return
_ipv4 = module.params.get('ipv4')
_ipv6 = module.params.get('ipv6')
_addresslist = []
if _ipv4 and len(_ipv4) > 0:
_addresslist += _ipv4
if _ipv6 and len(_ipv6) > 0:
_addresslist += _ipv6
if len(_addresslist) > 0:
module.custom_desired_config['config']['address'] = ' '.join(
_addresslist)
def build_vids(module):
_vids = module.params.get('vids')
if _vids and len(_vids) > 0:
module.custom_desired_config['config']['bridge-vids'] = ' '.join(_vids)
def build_pvid(module):
_pvid = module.params.get('pvid')
if _pvid:
module.custom_desired_config['config']['bridge-pvid'] = str(_pvid)
def conv_bool_to_str(_value):
if isinstance(_value, bool):
if _value is True:
return 'yes'
else:
return 'no'
return _value
def conv_array_to_str(_value):
if isinstance(_value, list):
return ' '.join(_value)
return _value
def build_generic_attr(module, _attr):
_value = module.params.get(_attr)
_value = conv_bool_to_str(_value)
_value = conv_array_to_str(_value)
if _value:
module.custom_desired_config['config'][
re.sub('_', '-', _attr)] = str(_value)
def build_alias_name(module):
alias_name = module.params.get('alias_name')
if alias_name:
module.custom_desired_config['config']['alias'] = alias_name
def build_addr_method(module):
_addr_method = module.params.get('addr_method')
if _addr_method:
module.custom_desired_config['addr_family'] = 'inet'
module.custom_desired_config['addr_method'] = _addr_method
def build_vrr(module):
_virtual_ip = module.params.get('virtual_ip')
_virtual_mac = module.params.get('virtual_mac')
vrr_config = []
if _virtual_ip:
vrr_config.append(_virtual_mac)
vrr_config.append(_virtual_ip)
module.custom_desired_config.get('config')['address-virtual'] = \
' '.join(vrr_config)
def add_glob_to_array(_bondmems):
"""
goes through each bond member if it sees a dash add glob
before it
"""
result = []
if isinstance(_bondmems, list):
for _entry in _bondmems:
if re.search('-', _entry):
_entry = 'glob ' + _entry
result.append(_entry)
return ' '.join(result)
return _bondmems
def build_bond_attr(module, _attr):
_value = module.params.get(_attr)
_value = conv_bool_to_str(_value)
_value = add_glob_to_array(_value)
if _value:
module.custom_desired_config['config'][
'bond-' + re.sub('_', '-', _attr)] = str(_value)
def build_desired_iface_config(module):
"""
take parameters defined and build ifupdown2 compatible hash
"""
module.custom_desired_config = {
'addr_family': None,
'auto': True,
'config': {},
'name': module.params.get('name')
}
for _attr in ['slaves', 'mode', 'xmit_hash_policy',
'miimon', 'lacp_rate', 'lacp_bypass_allow',
'lacp_bypass_period', 'lacp_bypass_all_active',
'min_links']:
build_bond_attr(module, _attr)
build_addr_method(module)
build_address(module)
build_vids(module)
build_pvid(module)
build_alias_name(module)
build_vrr(module)
for _attr in ['mtu', 'mstpctl_portnetwork', 'mstpctl_portadminedge'
'mstpctl_bpduguard', 'clag_id',
'lacp_bypass_priority']:
build_generic_attr(module, _attr)
def config_dict_changed(module):
"""
return true if 'config' dict in hash is different
between desired and current config
"""
current_config = module.custom_current_config.get('config')
desired_config = module.custom_desired_config.get('config')
return current_config != desired_config
def config_changed(module):
"""
returns true if config has changed
"""
if config_dict_changed(module):
return True
# check if addr_method is changed
return module.custom_desired_config.get('addr_method') != \
module.custom_current_config.get('addr_method')
def replace_config(module):
temp = tempfile.NamedTemporaryFile()
desired_config = module.custom_desired_config
# by default it will be something like /etc/network/interfaces.d/swp1
final_location = module.params.get('location') + '/' + \
module.params.get('name')
final_text = ''
_fh = open(final_location, 'w')
# make sure to put hash in array or else ifquery will fail
# write to temp file
try:
temp.write(module.jsonify([desired_config]))
# need to seek to 0 so that data is written to tempfile.
temp.seek(0)
_cmd = "/sbin/ifquery -a -i %s -t json" % (temp.name)
final_text = run_cmd(module, _cmd)
finally:
temp.close()
try:
_fh.write(final_text)
finally:
_fh.close()
def main():
module = AnsibleModule(
argument_spec=dict(
slaves=dict(required=True, type='list'),
name=dict(required=True, type='str'),
ipv4=dict(type='list'),
ipv6=dict(type='list'),
alias_name=dict(type='str'),
addr_method=dict(type='str',
choices=['', 'dhcp']),
mtu=dict(type='str'),
virtual_ip=dict(type='str'),
virtual_mac=dict(type='str'),
vids=dict(type='list'),
pvid=dict(type='str'),
mstpctl_portnetwork=dict(type='bool', choices=BOOLEANS),
mstpctl_portadminedge=dict(type='bool', choices=BOOLEANS),
mstpctl_bpduguard=dict(type='bool', choices=BOOLEANS),
clag_id=dict(type='str'),
min_links=dict(type='int', default=1),
mode=dict(type='str', default='802.3ad'),
miimon=dict(type='int', default=100),
xmit_hash_policy=dict(type='str', default='layer3+4'),
lacp_rate=dict(type='int', default=1),
lacp_bypass_allow=dict(type='int', choices=[0, 1]),
lacp_bypass_all_active=dict(type='int', choices=[0, 1]),
lacp_bypass_priority=dict(type='list'),
lacp_bypass_period=dict(type='int'),
location=dict(type='str',
default='/etc/network/interfaces.d')
),
mutually_exclusive=[['lacp_bypass_priority', 'lacp_bypass_all_active']],
required_together=[['virtual_ip', 'virtual_mac']]
)
# if using the jinja default filter, this resolves to
# create an list with an empty string ['']. The following
# checks all lists and removes it, so that functions expecting
# an empty list, get this result. May upstream this fix into
# the AnsibleModule code to have it check for this.
for k, _param in module.params.iteritems():
if isinstance(_param, list):
module.params[k] = [x for x in _param if x]
_location = module.params.get('location')
if not os.path.exists(_location):
_msg = "%s does not exist." % (_location)
module.fail_json(msg=_msg)
return # for testing purposes only
ifacename = module.params.get('name')
_changed = False
_msg = "interface %s config not changed" % (ifacename)
current_iface_config(module)
build_desired_iface_config(module)
if config_changed(module):
replace_config(module)
_msg = "interface %s config updated" % (ifacename)
_changed = True
module.exit_json(changed=_changed, msg=_msg)
# import module snippets
from ansible.module_utils.basic import *
import tempfile
import os
import re
if __name__ == '__main__':
main()
| gpl-3.0 |
JianyuWang/nova | nova/tests/unit/network/security_group/test_neutron_driver.py | 9 | 18614 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from mox3 import mox
from neutronclient.common import exceptions as n_exc
from neutronclient.v2_0 import client
from six.moves import range
from nova import context
from nova import exception
from nova.network.neutronv2 import api as neutronapi
from nova.network.security_group import neutron_driver
from nova import test
class TestNeutronDriver(test.NoDBTestCase):
def setUp(self):
super(TestNeutronDriver, self).setUp()
self.mox.StubOutWithMock(neutronapi, 'get_client')
self.moxed_client = self.mox.CreateMock(client.Client)
neutronapi.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
self.context = context.RequestContext('userid', 'my_tenantid')
setattr(self.context,
'auth_token',
'bff4a5a6b9eb4ea2a6efec6eefb77936')
def test_list_with_project(self):
project_id = '0af70a4d22cf4652824ddc1f2435dd85'
security_groups_list = {'security_groups': []}
self.moxed_client.list_security_groups(tenant_id=project_id).AndReturn(
security_groups_list)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
sg_api.list(self.context, project=project_id)
def test_list_with_all_tenants_and_admin_context(self):
project_id = '0af70a4d22cf4652824ddc1f2435dd85'
search_opts = {'all_tenants': 1}
security_groups_list = {'security_groups': []}
admin_context = context.RequestContext('user1', project_id, True)
self.mox.ReplayAll()
with mock.patch.object(
self.moxed_client,
'list_security_groups',
return_value=security_groups_list) as mock_list_secgroup:
sg_api = neutron_driver.SecurityGroupAPI()
sg_api.list(admin_context,
project=project_id,
search_opts=search_opts)
mock_list_secgroup.assert_called_once_with()
def test_list_without_all_tenants_and_admin_context(self):
project_id = '0af70a4d22cf4652824ddc1f2435dd85'
security_groups_list = {'security_groups': []}
admin_context = context.RequestContext('user1', project_id, True)
self.mox.ReplayAll()
with mock.patch.object(
self.moxed_client,
'list_security_groups',
return_value=security_groups_list) as mock_list_secgroup:
sg_api = neutron_driver.SecurityGroupAPI()
sg_api.list(admin_context, project=project_id)
mock_list_secgroup.assert_called_once_with(tenant_id=project_id)
def test_list_with_all_tenants_sec_name_and_admin_context(self):
project_id = '0af70a4d22cf4652824ddc1f2435dd85'
search_opts = {'all_tenants': 1}
security_group_names = ['secgroup_ssh']
security_groups_list = {'security_groups': []}
admin_context = context.RequestContext('user1', project_id, True)
self.mox.ReplayAll()
with mock.patch.object(
self.moxed_client,
'list_security_groups',
return_value=security_groups_list) as mock_list_secgroup:
sg_api = neutron_driver.SecurityGroupAPI()
sg_api.list(admin_context, project=project_id,
names=security_group_names,
search_opts=search_opts)
mock_list_secgroup.assert_called_once_with(
name=security_group_names,
tenant_id=project_id)
def test_list_with_all_tenants_sec_name_ids_and_admin_context(self):
project_id = '0af70a4d22cf4652824ddc1f2435dd85'
search_opts = {'all_tenants': 1}
security_group_names = ['secgroup_ssh']
security_group_ids = ['id1']
security_groups_list = {'security_groups': []}
admin_context = context.RequestContext('user1', project_id, True)
self.mox.ReplayAll()
with mock.patch.object(
self.moxed_client,
'list_security_groups',
return_value=security_groups_list) as mock_list_secgroup:
sg_api = neutron_driver.SecurityGroupAPI()
sg_api.list(admin_context, project=project_id,
names=security_group_names,
ids=security_group_ids,
search_opts=search_opts)
mock_list_secgroup.assert_called_once_with(
name=security_group_names,
id=security_group_ids,
tenant_id=project_id)
def test_list_with_all_tenants_not_admin(self):
search_opts = {'all_tenants': 1}
security_groups_list = {'security_groups': []}
self.mox.ReplayAll()
with mock.patch.object(
self.moxed_client,
'list_security_groups',
return_value=security_groups_list) as mock_list_secgroup:
sg_api = neutron_driver.SecurityGroupAPI()
sg_api.list(self.context, project=self.context.tenant,
search_opts=search_opts)
mock_list_secgroup.assert_called_once_with(
tenant_id=self.context.tenant)
def test_get_with_name_duplicated(self):
sg_name = 'web_server'
expected_sg_id = '85cc3048-abc3-43cc-89b3-377341426ac5'
list_security_groups = {'security_groups':
[{'name': sg_name,
'id': expected_sg_id,
'tenant_id': self.context.tenant,
'description': 'server',
'rules': []}
]}
self.moxed_client.list_security_groups(name=sg_name, fields='id',
tenant_id=self.context.tenant).AndReturn(list_security_groups)
expected_sg = {'security_group': {'name': sg_name,
'id': expected_sg_id,
'tenant_id': self.context.tenant,
'description': 'server', 'rules': []}}
self.moxed_client.show_security_group(expected_sg_id).AndReturn(
expected_sg)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
observed_sg = sg_api.get(self.context, name=sg_name)
expected_sg['security_group']['project_id'] = self.context.tenant
del expected_sg['security_group']['tenant_id']
self.assertEqual(expected_sg['security_group'], observed_sg)
def test_get_with_invalid_name(self):
sg_name = 'invalid_name'
expected_sg_id = '85cc3048-abc3-43cc-89b3-377341426ac5'
list_security_groups = {'security_groups':
[{'name': sg_name,
'id': expected_sg_id,
'tenant_id': self.context.tenant,
'description': 'server',
'rules': []}
]}
self.moxed_client.list_security_groups(name=sg_name, fields='id',
tenant_id=self.context.tenant).AndReturn(list_security_groups)
self.moxed_client.show_security_group(expected_sg_id).AndRaise(
TypeError)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
self.assertRaises(exception.SecurityGroupNotFound,
sg_api.get, self.context, name=sg_name)
def test_create_security_group_with_bad_request(self):
name = 'test-security-group'
description = None
body = {'security_group': {'name': name,
'description': description}}
message = "Invalid input. Reason: 'None' is not a valid string."
self.moxed_client.create_security_group(
body).AndRaise(n_exc.BadRequest(message=message))
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
self.assertRaises(exception.Invalid,
sg_api.create_security_group, self.context, name,
description)
def test_create_security_group_exceed_quota(self):
name = 'test-security-group'
description = 'test-security-group'
body = {'security_group': {'name': name,
'description': description}}
message = "Quota exceeded for resources: ['security_group']"
self.moxed_client.create_security_group(
body).AndRaise(n_exc.NeutronClientException(status_code=409,
message=message))
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
self.assertRaises(exception.SecurityGroupLimitExceeded,
sg_api.create_security_group, self.context, name,
description)
def test_create_security_group_rules_exceed_quota(self):
vals = {'protocol': 'tcp', 'cidr': '0.0.0.0/0',
'parent_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47',
'group_id': None, 'from_port': 1025, 'to_port': 1025}
body = {'security_group_rules': [{'remote_group_id': None,
'direction': 'ingress', 'protocol': 'tcp', 'ethertype': 'IPv4',
'port_range_max': 1025, 'port_range_min': 1025,
'security_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47',
'remote_ip_prefix': '0.0.0.0/0'}]}
name = 'test-security-group'
message = "Quota exceeded for resources: ['security_group_rule']"
self.moxed_client.create_security_group_rule(
body).AndRaise(n_exc.NeutronClientException(status_code=409,
message=message))
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
self.assertRaises(exception.SecurityGroupLimitExceeded,
sg_api.add_rules, self.context, None, name, [vals])
def test_create_security_group_rules_bad_request(self):
vals = {'protocol': 'icmp', 'cidr': '0.0.0.0/0',
'parent_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47',
'group_id': None, 'to_port': 255}
body = {'security_group_rules': [{'remote_group_id': None,
'direction': 'ingress', 'protocol': 'icmp',
'ethertype': 'IPv4', 'port_range_max': 255,
'security_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47',
'remote_ip_prefix': '0.0.0.0/0'}]}
name = 'test-security-group'
message = "ICMP code (port-range-max) 255 is provided but ICMP type" \
" (port-range-min) is missing"
self.moxed_client.create_security_group_rule(
body).AndRaise(n_exc.NeutronClientException(status_code=400,
message=message))
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
self.assertRaises(exception.Invalid, sg_api.add_rules,
self.context, None, name, [vals])
def test_list_security_group_with_no_port_range_and_not_tcp_udp_icmp(self):
sg1 = {'description': 'default',
'id': '07f1362f-34f6-4136-819a-2dcde112269e',
'name': 'default',
'tenant_id': 'c166d9316f814891bcb66b96c4c891d6',
'security_group_rules':
[{'direction': 'ingress',
'ethertype': 'IPv4',
'id': '0a4647f1-e1aa-488d-90e1-97a7d0293beb',
'port_range_max': None,
'port_range_min': None,
'protocol': '51',
'remote_group_id': None,
'remote_ip_prefix': None,
'security_group_id':
'07f1362f-34f6-4136-819a-2dcde112269e',
'tenant_id': 'c166d9316f814891bcb66b96c4c891d6'}]}
self.moxed_client.list_security_groups().AndReturn(
{'security_groups': [sg1]})
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
result = sg_api.list(self.context)
expected = [{'rules':
[{'from_port': -1, 'protocol': '51', 'to_port': -1,
'parent_group_id': '07f1362f-34f6-4136-819a-2dcde112269e',
'cidr': '0.0.0.0/0', 'group_id': None,
'id': '0a4647f1-e1aa-488d-90e1-97a7d0293beb'}],
'project_id': 'c166d9316f814891bcb66b96c4c891d6',
'id': '07f1362f-34f6-4136-819a-2dcde112269e',
'name': 'default', 'description': 'default'}]
self.assertEqual(expected, result)
def test_instances_security_group_bindings(self):
server_id = 'c5a20e8d-c4b0-47cf-9dca-ebe4f758acb1'
port1_id = '4c505aec-09aa-47bc-bcc0-940477e84dc0'
port2_id = 'b3b31a53-6e29-479f-ae5c-00b7b71a6d44'
sg1_id = '2f7ce969-1a73-4ef9-bbd6-c9a91780ecd4'
sg2_id = '20c89ce5-9388-4046-896e-64ffbd3eb584'
servers = [{'id': server_id}]
ports = [{'id': port1_id, 'device_id': server_id,
'security_groups': [sg1_id]},
{'id': port2_id, 'device_id': server_id,
'security_groups': [sg2_id]}]
port_list = {'ports': ports}
sg1 = {'id': sg1_id, 'name': 'wol'}
sg2 = {'id': sg2_id, 'name': 'eor'}
security_groups_list = {'security_groups': [sg1, sg2]}
sg_bindings = {server_id: [{'name': 'wol'}, {'name': 'eor'}]}
self.moxed_client.list_ports(device_id=[server_id]).AndReturn(
port_list)
self.moxed_client.list_security_groups(
id=mox.SameElementsAs([sg2_id, sg1_id])).AndReturn(
security_groups_list)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
result = sg_api.get_instances_security_groups_bindings(
self.context, servers)
self.assertEqual(result, sg_bindings)
def _test_instances_security_group_bindings_scale(self, num_servers):
max_query = 150
sg1_id = '2f7ce969-1a73-4ef9-bbd6-c9a91780ecd4'
sg2_id = '20c89ce5-9388-4046-896e-64ffbd3eb584'
sg1 = {'id': sg1_id, 'name': 'wol'}
sg2 = {'id': sg2_id, 'name': 'eor'}
security_groups_list = {'security_groups': [sg1, sg2]}
servers = []
device_ids = []
ports = []
sg_bindings = {}
for i in range(0, num_servers):
server_id = "server-%d" % i
port_id = "port-%d" % i
servers.append({'id': server_id})
device_ids.append(server_id)
ports.append({'id': port_id,
'device_id': server_id,
'security_groups': [sg1_id, sg2_id]})
sg_bindings[server_id] = [{'name': 'wol'}, {'name': 'eor'}]
for x in range(0, num_servers, max_query):
self.moxed_client.list_ports(
device_id=device_ids[x:x + max_query]).\
AndReturn({'ports': ports[x:x + max_query]})
self.moxed_client.list_security_groups(
id=mox.SameElementsAs([sg2_id, sg1_id])).AndReturn(
security_groups_list)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
result = sg_api.get_instances_security_groups_bindings(
self.context, servers)
self.assertEqual(result, sg_bindings)
def test_instances_security_group_bindings_less_than_max(self):
self._test_instances_security_group_bindings_scale(100)
def test_instances_security_group_bindings_max(self):
self._test_instances_security_group_bindings_scale(150)
def test_instances_security_group_bindings_more_then_max(self):
self._test_instances_security_group_bindings_scale(300)
def test_instances_security_group_bindings_with_hidden_sg(self):
servers = [{'id': 'server_1'}]
ports = [{'id': '1', 'device_id': 'dev_1', 'security_groups': ['1']},
{'id': '2', 'device_id': 'dev_1', 'security_groups': ['2']}]
port_list = {'ports': ports}
sg1 = {'id': '1', 'name': 'wol'}
# User doesn't have access to sg2
security_groups_list = {'security_groups': [sg1]}
sg_bindings = {'dev_1': [{'name': 'wol'}]}
self.moxed_client.list_ports(device_id=['server_1']).AndReturn(
port_list)
self.moxed_client.\
list_security_groups(id=mox.SameElementsAs(['1', '2'])).AndReturn(
security_groups_list)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
result = sg_api.get_instances_security_groups_bindings(
self.context, servers)
self.assertEqual(result, sg_bindings)
def test_instance_empty_security_groups(self):
port_list = {'ports': [{'id': 1, 'device_id': '1',
'security_groups': []}]}
self.moxed_client.list_ports(device_id=['1']).AndReturn(port_list)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
result = sg_api.get_instance_security_groups(self.context, '1')
self.assertEqual([], result)
class TestNeutronDriverWithoutMock(test.NoDBTestCase):
def test_validate_property(self):
sg_api = neutron_driver.SecurityGroupAPI()
sg_api.validate_property('foo', 'name', None)
sg_api.validate_property('', 'name', None)
self.assertRaises(exception.Invalid, sg_api.validate_property,
'a' * 256, 'name', None)
self.assertRaises(exception.Invalid, sg_api.validate_property,
None, 'name', None)
| apache-2.0 |
kevinlee12/oppia | core/domain/draft_upgrade_services_test.py | 1 | 56055 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for draft upgrade services."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import draft_upgrade_services
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.tests import test_utils
import feconf
import python_utils
import utils
class DraftUpgradeUnitTests(test_utils.GenericTestBase):
"""Test the draft upgrade services module."""
EXP_ID = 'exp_id'
USER_ID = 'user_id'
OTHER_CHANGE_LIST = [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'title',
'new_value': 'New title'
})]
EXP_MIGRATION_CHANGE_LIST = [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION,
'from_version': '0',
'to_version': python_utils.UNICODE(
feconf.CURRENT_STATE_SCHEMA_VERSION)
})]
DRAFT_CHANGELIST = [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'old_value': None,
'new_value': 'Updated title'})]
def setUp(self):
super(DraftUpgradeUnitTests, self).setUp()
self.save_new_valid_exploration(self.EXP_ID, self.USER_ID)
def test_try_upgrade_with_no_version_difference(self):
self.assertIsNone(
draft_upgrade_services.try_upgrading_draft_to_exp_version(
self.DRAFT_CHANGELIST, 1, 1, self.EXP_ID))
def test_try_upgrade_raises_exception_if_versions_are_invalid(self):
with self.assertRaisesRegexp(
utils.InvalidInputException,
'Current draft version is greater than the exploration version.'):
draft_upgrade_services.try_upgrading_draft_to_exp_version(
self.DRAFT_CHANGELIST, 2, 1, self.EXP_ID)
exp_services.update_exploration(
self.USER_ID, self.EXP_ID, self.OTHER_CHANGE_LIST,
'Changed exploration title.')
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
self.assertEqual(exploration.version, 2)
self.assertIsNone(
draft_upgrade_services.try_upgrading_draft_to_exp_version(
self.DRAFT_CHANGELIST, 1, exploration.version, self.EXP_ID))
def test_try_upgrade_failure_due_to_unsupported_commit_type(self):
exp_services.update_exploration(
self.USER_ID, self.EXP_ID, self.OTHER_CHANGE_LIST,
'Changed exploration title.')
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
self.assertEqual(exploration.version, 2)
self.assertIsNone(
draft_upgrade_services.try_upgrading_draft_to_exp_version(
self.DRAFT_CHANGELIST, 1, exploration.version, self.EXP_ID))
def test_try_upgrade_failure_due_to_unimplemented_upgrade_methods(self):
exp_services.update_exploration(
self.USER_ID, self.EXP_ID, self.EXP_MIGRATION_CHANGE_LIST,
'Ran Exploration Migration job.')
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
self.assertEqual(exploration.version, 2)
self.assertIsNone(
draft_upgrade_services.try_upgrading_draft_to_exp_version(
self.DRAFT_CHANGELIST, 1, exploration.version, self.EXP_ID))
def test_extract_html_from_draft_change_list(self):
html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
draft_change_list = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state2',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
html_content,
'<p>4</p>'
]
},
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
}
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'answer_groups',
'state_name': 'State 1',
'new_value': [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'ContainsAtLeastOneOf',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'IsProperSubsetOf',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'DoesNotContainAtLeastOneOf',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'Equals',
'inputs': {
'x': 1
}
}, {
'rule_type': 'HasElementXAtPositionY',
'inputs': {
'x': html_content,
'y': 2
}
}, {
'rule_type': 'IsEqualToOrdering',
'inputs': {
'x': [[html_content]]
}
}, {
'rule_type': 'HasElementXBeforeElementY',
'inputs': {
'x': html_content,
'y': html_content
}
}, {
'rule_type': (
'IsEqualToOrderingWithOneItemAtIncorrectPosition'),
'inputs': {
'x': [[html_content]]
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': html_content
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': {
'content_id': 'content',
'html': html_content
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'written_translations',
'new_value': {
'translations_mapping': {
'content1': {
'en': {
'data_format': 'html',
'translation': html_content,
'needs_update': True
},
'hi': {
'data_format': 'html',
'translation': 'Hey!',
'needs_update': False
}
},
'feedback_1': {
'hi': {
'data_format': 'html',
'translation': html_content,
'needs_update': False
},
'en': {
'data_format': 'html',
'translation': 'hello!',
'needs_update': False
}
}
}
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'solution',
'new_value': {
'answer_is_exclusive': False,
'correct_answer': 'helloworld!',
'explanation': {
'content_id': 'solution',
'html': html_content
},
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'solution',
'new_value': {
'answer_is_exclusive': True,
'correct_answer': [
[html_content],
['<p>2</p>'],
['<p>3</p>'],
['<p>4</p>']
],
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'default_outcome',
'new_value': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': html_content
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'hints',
'new_value': [{
'hint_content': {
'content_id': 'hint1',
'html': html_content
}
}]
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_RENAME_STATE,
'old_state_name': 'Intro',
'new_state_name': 'Introduction',
})
]
list_of_html = (
draft_upgrade_services.extract_html_from_draft_change_list(
draft_change_list))
self.assertEqual(len(list_of_html), 27)
expected_html_strings = [
html_content, '<p>1</p>', '<p>2</p>', '<p>3</p>', '<p>4</p>',
'<p>This is solution for state1</p>', 'Hey!', 'hello!']
for html in list_of_html:
self.assertTrue(html in expected_html_strings)
class DraftUpgradeUtilUnitTests(test_utils.GenericTestBase):
"""Test the DraftUpgradeUtil module."""
EXP_ID = 'exp_id'
USER_ID = 'user_id'
EXP_MIGRATION_CHANGE_LIST = [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION,
'from_version': '34',
'to_version': '35'
})]
# EXP_ID and USER_ID used to create default explorations.
EXP_ID = 'exp_id'
USER_ID = 'user_id'
def create_and_migrate_new_exploration(
self, current_schema_version, target_schema_version):
"""Creates an exploration and applies a state schema migration to it.
Creates an exploration and migrates its state schema from version
current_schema_version to target_schema_version. Asserts that the
exploration was successfully migrated.
Args:
current_schema_version: string. The current schema version of the
exploration (eg. '29').
target_schema_version: string. The schema version to upgrade
the exploration to (eg. '30').
"""
# Create an exploration change list with the command that will migrate
# the schema from current_schema_version to target_schema_version.
exp_migration_change_list = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION,
'from_version': current_schema_version,
'to_version': target_schema_version
})
]
# The migration will automatically migrate the exploration to the latest
# state schema version, so we set the latest schema version to be the
# target_schema_version.
with self.swap(
feconf, 'CURRENT_STATE_SCHEMA_VERSION',
int(target_schema_version)):
# Create and migrate the exploration.
self.save_new_valid_exploration(self.EXP_ID, self.USER_ID)
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
exp_services.update_exploration(
self.USER_ID, self.EXP_ID, exp_migration_change_list,
'Ran Exploration Migration job.')
# Assert that the update was applied and that the exploration state
# schema was successfully updated.
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
self.assertEqual(exploration.version, 2)
self.assertEqual(
python_utils.UNICODE(
exploration.states_schema_version),
target_schema_version)
def test_convert_to_latest_schema_version_implemented(self):
state_schema_version = feconf.CURRENT_STATE_SCHEMA_VERSION
conversion_fn_name = '_convert_states_v%s_dict_to_v%s_dict' % (
state_schema_version - 1, state_schema_version)
self.assertTrue(
hasattr(
draft_upgrade_services.DraftUpgradeUtil, conversion_fn_name),
msg='Current schema version is %d but DraftUpgradeUtil.%s is '
'unimplemented.' % (state_schema_version, conversion_fn_name))
def test_convert_states_v36_dict_to_v37_dict(self):
draft_change_list_v36 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': 'Intro',
'property_name': 'answer_groups',
'new_value': [{
'rule_specs': [{
'rule_type': 'CaseSensitiveEquals',
'inputs': {
'x': 'test'
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': '<p>Content</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
})
]
draft_change_list_v37 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': 'Intro',
'property_name': 'answer_groups',
'new_value': [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {
'x': 'test'
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': '<p>Content</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
})
]
# Migrate exploration to state schema version 37.
self.create_and_migrate_new_exploration('36', '37')
migrated_draft_change_list_v37 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v36, 1, 2, self.EXP_ID))
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_v37_dict_list = [
change.to_dict() for change in draft_change_list_v37
]
migrated_draft_change_list_v37_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v37
]
self.assertEqual(
draft_change_list_v37_dict_list,
migrated_draft_change_list_v37_dict_list)
def test_convert_states_v35_dict_to_v36_dict(self):
draft_change_list_1_v35 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_id',
'new_value': 'MathExpressionInput'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_customization_args',
'new_value': {}
})
]
draft_change_list_2_v35 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_id',
'new_value': 'MathExpressionInput'
})
]
# Migrate exploration to state schema version 36.
self.create_and_migrate_new_exploration('35', '36')
migrated_draft_change_list_1_v36 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_1_v35, 1, 2, self.EXP_ID))
self.assertIsNone(migrated_draft_change_list_1_v36)
migrated_draft_change_list_2_v36 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_2_v35, 1, 2, self.EXP_ID))
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_2_v35_dict_list = [
change.to_dict() for change in draft_change_list_2_v35
]
migrated_draft_change_list_2_v36_dict_list = [
change.to_dict() for change in migrated_draft_change_list_2_v36
]
self.assertEqual(
draft_change_list_2_v35_dict_list,
migrated_draft_change_list_2_v36_dict_list)
def test_convert_states_v34_dict_to_v35_dict(self):
draft_change_list_1_v34 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_id',
'new_value': 'MathExpressionInput'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'answer_groups',
'new_value': [{
'rule_specs': [{
'rule_type': 'IsMathematicallyEquivalentTo',
'inputs': {
'x': 'x+y/2'
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': '<p>Content</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
})
]
draft_change_list_2_v34 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
})
]
# Migrate exploration to state schema version 35.
self.create_and_migrate_new_exploration('34', '35')
migrated_draft_change_list_1_v35 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_1_v34, 1, 2, self.EXP_ID))
self.assertIsNone(migrated_draft_change_list_1_v35)
migrated_draft_change_list_2_v35 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_2_v34, 1, 2, self.EXP_ID))
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_2_v34_dict_list = [
change.to_dict() for change in draft_change_list_2_v34
]
migrated_draft_change_list_2_v35_dict_list = [
change.to_dict() for change in migrated_draft_change_list_2_v35
]
self.assertEqual(
draft_change_list_2_v34_dict_list,
migrated_draft_change_list_2_v35_dict_list)
def test_convert_states_v33_dict_to_v34_dict(self):
html_content = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
expected_html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
draft_change_list = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state2',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
html_content,
'<p>4</p>'
]
},
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
}
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'answer_groups',
'state_name': 'State 1',
'new_value': [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'ContainsAtLeastOneOf',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'IsProperSubsetOf',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'DoesNotContainAtLeastOneOf',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'Equals',
'inputs': {
'x': 1
}
}, {
'rule_type': 'HasElementXAtPositionY',
'inputs': {
'x': html_content,
'y': 2
}
}, {
'rule_type': 'IsEqualToOrdering',
'inputs': {
'x': [[html_content]]
}
}, {
'rule_type': 'HasElementXBeforeElementY',
'inputs': {
'x': html_content,
'y': html_content
}
}, {
'rule_type': (
'IsEqualToOrderingWithOneItemAtIncorrectPosition'),
'inputs': {
'x': [[html_content]]
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': html_content
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': {
'content_id': 'content',
'html': html_content
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'written_translations',
'new_value': {
'translations_mapping': {
'content1': {
'en': {
'html': html_content,
'needs_update': True
},
'hi': {
'html': 'Hey!',
'needs_update': False
}
},
'feedback_1': {
'hi': {
'html': html_content,
'needs_update': False
},
'en': {
'html': 'hello!',
'needs_update': False
}
}
}
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'solution',
'new_value': {
'answer_is_exclusive': False,
'correct_answer': 'helloworld!',
'explanation': {
'content_id': 'solution',
'html': html_content
},
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'solution',
'new_value': {
'answer_is_exclusive': True,
'correct_answer': [
[html_content],
['<p>2</p>'],
['<p>3</p>'],
['<p>4</p>']
],
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'default_outcome',
'new_value': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': html_content
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'hints',
'new_value': [{
'hint_content': {
'content_id': 'hint1',
'html': html_content
}
}]
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_RENAME_STATE,
'old_state_name': 'Intro',
'new_state_name': 'Introduction',
})
]
self.create_and_migrate_new_exploration('33', '34')
migrated_draft_change_list = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list, 1, 2, self.EXP_ID))
self.assertEqual(
migrated_draft_change_list[0].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state2',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
expected_html_content,
'<p>4</p>'
]
},
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
}
}
}).to_dict())
self.assertEqual(
migrated_draft_change_list[1].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'answer_groups',
'state_name': 'State 1',
'new_value': [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {
'x': [expected_html_content]
}
}, {
'rule_type': 'ContainsAtLeastOneOf',
'inputs': {
'x': [expected_html_content]
}
}, {
'rule_type': 'IsProperSubsetOf',
'inputs': {
'x': [expected_html_content]
}
}, {
'rule_type': 'DoesNotContainAtLeastOneOf',
'inputs': {
'x': [expected_html_content]
}
}, {
'rule_type': 'Equals',
'inputs': {
'x': 1
}
}, {
'rule_type': 'HasElementXAtPositionY',
'inputs': {
'x': expected_html_content,
'y': 2
}
}, {
'rule_type': 'IsEqualToOrdering',
'inputs': {
'x': [[expected_html_content]]
}
}, {
'rule_type': 'HasElementXBeforeElementY',
'inputs': {
'x': expected_html_content,
'y': expected_html_content
}
}, {
'rule_type': (
'IsEqualToOrderingWithOneItemAtIncorrectPosition'),
'inputs': {
'x': [[expected_html_content]]
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': expected_html_content
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
}).to_dict())
self.assertEqual(
migrated_draft_change_list[2].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': {
'content_id': 'content',
'html': expected_html_content
}
}).to_dict())
self.assertEqual(
migrated_draft_change_list[3].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'written_translations',
'new_value': {
'translations_mapping': {
'content1': {
'en': {
'html': expected_html_content,
'needs_update': True
},
'hi': {
'html': 'Hey!',
'needs_update': False
}
},
'feedback_1': {
'hi': {
'html': expected_html_content,
'needs_update': False
},
'en': {
'html': 'hello!',
'needs_update': False
}
}
}
}
}).to_dict())
self.assertEqual(
migrated_draft_change_list[4].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'solution',
'new_value': {
'answer_is_exclusive': False,
'correct_answer': 'helloworld!',
'explanation': {
'content_id': 'solution',
'html': expected_html_content
},
}
}).to_dict())
self.assertEqual(
migrated_draft_change_list[5].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'solution',
'new_value': {
'answer_is_exclusive': True,
'correct_answer': [
[expected_html_content],
['<p>2</p>'],
['<p>3</p>'],
['<p>4</p>']
],
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
}
}).to_dict())
self.assertEqual(
migrated_draft_change_list[6].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'default_outcome',
'new_value': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': expected_html_content
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
}
}).to_dict())
self.assertEqual(
migrated_draft_change_list[7].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'hints',
'new_value': [{
'hint_content': {
'content_id': 'hint1',
'html': expected_html_content
}
}]
}).to_dict())
def test_convert_states_v32_dict_to_v33_dict(self):
draft_change_list_v32 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state1',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
'<p>3</p>',
'<p>4</p>'
]
}
}
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state2',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
'<p>3</p>',
'<p>4</p>'
]
},
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
}
}
})
]
# Version 33 adds a showChoicesInShuffledOrder bool, which doesn't
# impact the second ExplorationChange because it will only impact
# it if 'choices' is the only key for new_value.
expected_draft_change_list_v33 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state1',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
'<p>3</p>',
'<p>4</p>'
]
},
'showChoicesInShuffledOrder': {
'value': False
}
}
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state2',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
'<p>3</p>',
'<p>4</p>'
]
},
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
}
}
})
]
# Migrate exploration to state schema version 33.
self.create_and_migrate_new_exploration('32', '33')
# Migrate the draft change list's state schema to the migrated
# exploration's schema.
migrated_draft_change_list_v33 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v32, 1, 2, self.EXP_ID)
)
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
expected_draft_change_list_v33_dict_list = [
change.to_dict() for change in expected_draft_change_list_v33
]
migrated_draft_change_list_v33_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v33
]
self.assertEqual(
expected_draft_change_list_v33_dict_list,
migrated_draft_change_list_v33_dict_list)
def test_convert_states_v31_dict_to_v32_dict(self):
draft_change_list_v31 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
})
]
# Migrate exploration to state schema version 32.
self.create_and_migrate_new_exploration('31', '32')
# Migrate the draft change list's state schema to the migrated
# exploration's schema. In this case there are no changes to the
# draft change list since version 32 adds a customization arg
# for the "Add" button text in SetInput interaction for the
# exploration, for which there should be no changes to drafts.
migrated_draft_change_list_v32 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v31, 1, 2, self.EXP_ID)
)
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_v31_dict_list = [
change.to_dict() for change in draft_change_list_v31
]
migrated_draft_change_list_v32_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v32
]
self.assertEqual(
draft_change_list_v31_dict_list,
migrated_draft_change_list_v32_dict_list)
def test_convert_states_v30_dict_to_v31_dict(self):
draft_change_list_v30 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'recorded_voiceovers',
'new_value': {
'voiceovers_mapping': {
'content': {
'en': {
'file_size_name': 100,
'filename': 'atest.mp3',
'needs_update': False
}
}
}
}
})
]
# Version 31 adds the duration_secs property.
expected_draft_change_list_v31 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'recorded_voiceovers',
'new_value': {
'voiceovers_mapping': {
'content': {
'en': {
'file_size_name': 100,
'filename': 'atest.mp3',
'needs_update': False,
'duration_secs': 0.0
}
}
}
}
})
]
# Migrate exploration to state schema version 31.
self.create_and_migrate_new_exploration('30', '31')
# Migrate the draft change list's state schema to the migrated
# exploration's schema.
migrated_draft_change_list_v31 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v30, 1, 2, self.EXP_ID)
)
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
expected_draft_change_list_v31_dict_list = [
change.to_dict() for change in expected_draft_change_list_v31
]
migrated_draft_change_list_v31_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v31
]
self.assertEqual(
expected_draft_change_list_v31_dict_list,
migrated_draft_change_list_v31_dict_list)
def test_convert_states_v29_dict_to_v30_dict(self):
draft_change_list_v29 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'answer_groups',
'state_name': 'State 1',
'new_value': {
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value1 for ItemSelection</p>'
]}
}, {
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value2 for ItemSelection</p>'
]}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': '<p>Outcome for state1</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_misconception_id': None
}
})
]
# Version 30 replaces the tagged_misconception_id in version 29
# with tagged_skill_misconception_id.
expected_draft_change_list_v30 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'answer_groups',
'state_name': 'State 1',
'new_value': {
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value1 for ItemSelection</p>'
]}
}, {
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value2 for ItemSelection</p>'
]}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': '<p>Outcome for state1</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}
})
]
# Migrate exploration to state schema version 30.
self.create_and_migrate_new_exploration('29', '30')
# Migrate the draft change list's state schema to the migrated
# exploration's schema.
migrated_draft_change_list_v30 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v29, 1, 2, self.EXP_ID)
)
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
expected_draft_change_list_v30_dict_list = [
change.to_dict() for change in expected_draft_change_list_v30
]
migrated_draft_change_list_v30_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v30
]
self.assertEqual(
expected_draft_change_list_v30_dict_list,
migrated_draft_change_list_v30_dict_list)
def test_convert_states_v28_dict_to_v29_dict(self):
draft_change_list_v28 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
})
]
# Migrate exploration to state schema version 29.
self.create_and_migrate_new_exploration('28', '29')
# Migrate the draft change list's state schema to the migrated
# exploration's schema. In this case there are no change to the
# draft change list since version 29 adds the
# solicit_answer_details boolean variable to the exploration
# state, for which there should be no changes to drafts.
migrated_draft_change_list_v29 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v28, 1, 2, self.EXP_ID)
)
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_v28_dict_list = [
change.to_dict() for change in draft_change_list_v28
]
migrated_draft_change_list_v29_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v29
]
self.assertEqual(
draft_change_list_v28_dict_list,
migrated_draft_change_list_v29_dict_list)
def test_convert_states_v27_dict_to_v28_dict(self):
draft_change_list_v27 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'content_ids_to_audio_translations',
'state_name': 'State B',
'new_value': 'new value',
})
]
# Version 28 adds voiceovers_mapping.
expected_draft_change_list_v28 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'recorded_voiceovers',
'state_name': 'State B',
'new_value': {'voiceovers_mapping': 'new value'}
})
]
# Migrate exploration to state schema version 28.
self.create_and_migrate_new_exploration('27', '28')
# Migrate the draft change list's state schema to the migrated
# exploration's schema.
migrated_draft_change_list_v28 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v27, 1, 2, self.EXP_ID)
)
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
expected_draft_change_list_v28_dict_list = [
change.to_dict() for change in expected_draft_change_list_v28
]
migrated_draft_change_list_v28_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v28
]
self.assertEqual(
expected_draft_change_list_v28_dict_list,
migrated_draft_change_list_v28_dict_list)
| apache-2.0 |
axinging/chromium-crosswalk | third_party/protobuf/python/google/protobuf/internal/text_format_test.py | 15 | 41879 | #! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for google.protobuf.text_format."""
__author__ = '[email protected] (Kenton Varda)'
import re
import six
import string
try:
import unittest2 as unittest
except ImportError:
import unittest
from google.protobuf.internal import _parameterized
from google.protobuf import map_unittest_pb2
from google.protobuf import unittest_mset_pb2
from google.protobuf import unittest_pb2
from google.protobuf import unittest_proto3_arena_pb2
from google.protobuf.internal import api_implementation
from google.protobuf.internal import test_util
from google.protobuf.internal import message_set_extensions_pb2
from google.protobuf import text_format
# Low-level nuts-n-bolts tests.
class SimpleTextFormatTests(unittest.TestCase):
# The members of _QUOTES are formatted into a regexp template that
# expects single characters. Therefore it's an error (in addition to being
# non-sensical in the first place) to try to specify a "quote mark" that is
# more than one character.
def TestQuoteMarksAreSingleChars(self):
for quote in text_format._QUOTES:
self.assertEqual(1, len(quote))
# Base class with some common functionality.
class TextFormatBase(unittest.TestCase):
def ReadGolden(self, golden_filename):
with test_util.GoldenFile(golden_filename) as f:
return (f.readlines() if str is bytes else # PY3
[golden_line.decode('utf-8') for golden_line in f])
def CompareToGoldenFile(self, text, golden_filename):
golden_lines = self.ReadGolden(golden_filename)
self.assertMultiLineEqual(text, ''.join(golden_lines))
def CompareToGoldenText(self, text, golden_text):
self.assertEqual(text, golden_text)
def RemoveRedundantZeros(self, text):
# Some platforms print 1e+5 as 1e+005. This is fine, but we need to remove
# these zeros in order to match the golden file.
text = text.replace('e+0','e+').replace('e+0','e+') \
.replace('e-0','e-').replace('e-0','e-')
# Floating point fields are printed with .0 suffix even if they are
# actualy integer numbers.
text = re.compile('\.0$', re.MULTILINE).sub('', text)
return text
@_parameterized.Parameters(
(unittest_pb2),
(unittest_proto3_arena_pb2))
class TextFormatTest(TextFormatBase):
def testPrintExotic(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'repeated_int64: -9223372036854775808\n'
'repeated_uint64: 18446744073709551615\n'
'repeated_double: 123.456\n'
'repeated_double: 1.23e+22\n'
'repeated_double: 1.23e-18\n'
'repeated_string:'
' "\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n'
'repeated_string: "\\303\\274\\352\\234\\237"\n')
def testPrintExoticUnicodeSubclass(self, message_module):
class UnicodeSub(six.text_type):
pass
message = message_module.TestAllTypes()
message.repeated_string.append(UnicodeSub(u'\u00fc\ua71f'))
self.CompareToGoldenText(
text_format.MessageToString(message),
'repeated_string: "\\303\\274\\352\\234\\237"\n')
def testPrintNestedMessageAsOneLine(self, message_module):
message = message_module.TestAllTypes()
msg = message.repeated_nested_message.add()
msg.bb = 42
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'repeated_nested_message { bb: 42 }')
def testPrintRepeatedFieldsAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int32.append(1)
message.repeated_int32.append(1)
message.repeated_int32.append(3)
message.repeated_string.append('Google')
message.repeated_string.append('Zurich')
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'repeated_int32: 1 repeated_int32: 1 repeated_int32: 3 '
'repeated_string: "Google" repeated_string: "Zurich"')
def testPrintNestedNewLineInStringAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.optional_string = 'a\nnew\nline'
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'optional_string: "a\\nnew\\nline"')
def testPrintExoticAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
self.CompareToGoldenText(
self.RemoveRedundantZeros(
text_format.MessageToString(message, as_one_line=True)),
'repeated_int64: -9223372036854775808'
' repeated_uint64: 18446744073709551615'
' repeated_double: 123.456'
' repeated_double: 1.23e+22'
' repeated_double: 1.23e-18'
' repeated_string: '
'"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""'
' repeated_string: "\\303\\274\\352\\234\\237"')
def testRoundTripExoticAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
# Test as_utf8 = False.
wire_text = text_format.MessageToString(
message, as_one_line=True, as_utf8=False)
parsed_message = message_module.TestAllTypes()
r = text_format.Parse(wire_text, parsed_message)
self.assertIs(r, parsed_message)
self.assertEqual(message, parsed_message)
# Test as_utf8 = True.
wire_text = text_format.MessageToString(
message, as_one_line=True, as_utf8=True)
parsed_message = message_module.TestAllTypes()
r = text_format.Parse(wire_text, parsed_message)
self.assertIs(r, parsed_message)
self.assertEqual(message, parsed_message,
'\n%s != %s' % (message, parsed_message))
def testPrintRawUtf8String(self, message_module):
message = message_module.TestAllTypes()
message.repeated_string.append(u'\u00fc\ua71f')
text = text_format.MessageToString(message, as_utf8=True)
self.CompareToGoldenText(text, 'repeated_string: "\303\274\352\234\237"\n')
parsed_message = message_module.TestAllTypes()
text_format.Parse(text, parsed_message)
self.assertEqual(message, parsed_message,
'\n%s != %s' % (message, parsed_message))
def testPrintFloatFormat(self, message_module):
# Check that float_format argument is passed to sub-message formatting.
message = message_module.NestedTestAllTypes()
# We use 1.25 as it is a round number in binary. The proto 32-bit float
# will not gain additional imprecise digits as a 64-bit Python float and
# show up in its str. 32-bit 1.2 is noisy when extended to 64-bit:
# >>> struct.unpack('f', struct.pack('f', 1.2))[0]
# 1.2000000476837158
# >>> struct.unpack('f', struct.pack('f', 1.25))[0]
# 1.25
message.payload.optional_float = 1.25
# Check rounding at 15 significant digits
message.payload.optional_double = -.000003456789012345678
# Check no decimal point.
message.payload.repeated_float.append(-5642)
# Check no trailing zeros.
message.payload.repeated_double.append(.000078900)
formatted_fields = ['optional_float: 1.25',
'optional_double: -3.45678901234568e-6',
'repeated_float: -5642',
'repeated_double: 7.89e-5']
text_message = text_format.MessageToString(message, float_format='.15g')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_message),
'payload {{\n {0}\n {1}\n {2}\n {3}\n}}\n'.format(*formatted_fields))
# as_one_line=True is a separate code branch where float_format is passed.
text_message = text_format.MessageToString(message, as_one_line=True,
float_format='.15g')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_message),
'payload {{ {0} {1} {2} {3} }}'.format(*formatted_fields))
def testMessageToString(self, message_module):
message = message_module.ForeignMessage()
message.c = 123
self.assertEqual('c: 123\n', str(message))
def testParseAllFields(self, message_module):
message = message_module.TestAllTypes()
test_util.SetAllFields(message)
ascii_text = text_format.MessageToString(message)
parsed_message = message_module.TestAllTypes()
text_format.Parse(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
if message_module is unittest_pb2:
test_util.ExpectAllFieldsSet(self, message)
def testParseExotic(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_int64: -9223372036854775808\n'
'repeated_uint64: 18446744073709551615\n'
'repeated_double: 123.456\n'
'repeated_double: 1.23e+22\n'
'repeated_double: 1.23e-18\n'
'repeated_string: \n'
'"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n'
'repeated_string: "foo" \'corge\' "grault"\n'
'repeated_string: "\\303\\274\\352\\234\\237"\n'
'repeated_string: "\\xc3\\xbc"\n'
'repeated_string: "\xc3\xbc"\n')
text_format.Parse(text, message)
self.assertEqual(-9223372036854775808, message.repeated_int64[0])
self.assertEqual(18446744073709551615, message.repeated_uint64[0])
self.assertEqual(123.456, message.repeated_double[0])
self.assertEqual(1.23e22, message.repeated_double[1])
self.assertEqual(1.23e-18, message.repeated_double[2])
self.assertEqual(
'\000\001\a\b\f\n\r\t\v\\\'"', message.repeated_string[0])
self.assertEqual('foocorgegrault', message.repeated_string[1])
self.assertEqual(u'\u00fc\ua71f', message.repeated_string[2])
self.assertEqual(u'\u00fc', message.repeated_string[3])
def testParseTrailingCommas(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_int64: 100;\n'
'repeated_int64: 200;\n'
'repeated_int64: 300,\n'
'repeated_string: "one",\n'
'repeated_string: "two";\n')
text_format.Parse(text, message)
self.assertEqual(100, message.repeated_int64[0])
self.assertEqual(200, message.repeated_int64[1])
self.assertEqual(300, message.repeated_int64[2])
self.assertEqual(u'one', message.repeated_string[0])
self.assertEqual(u'two', message.repeated_string[1])
def testParseRepeatedScalarShortFormat(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_int64: [100, 200];\n'
'repeated_int64: 300,\n'
'repeated_string: ["one", "two"];\n')
text_format.Parse(text, message)
self.assertEqual(100, message.repeated_int64[0])
self.assertEqual(200, message.repeated_int64[1])
self.assertEqual(300, message.repeated_int64[2])
self.assertEqual(u'one', message.repeated_string[0])
self.assertEqual(u'two', message.repeated_string[1])
def testParseEmptyText(self, message_module):
message = message_module.TestAllTypes()
text = ''
text_format.Parse(text, message)
self.assertEqual(message_module.TestAllTypes(), message)
def testParseInvalidUtf8(self, message_module):
message = message_module.TestAllTypes()
text = 'repeated_string: "\\xc3\\xc3"'
self.assertRaises(text_format.ParseError, text_format.Parse, text, message)
def testParseSingleWord(self, message_module):
message = message_module.TestAllTypes()
text = 'foo'
six.assertRaisesRegex(self,
text_format.ParseError,
(r'1:1 : Message type "\w+.TestAllTypes" has no field named '
r'"foo".'),
text_format.Parse, text, message)
def testParseUnknownField(self, message_module):
message = message_module.TestAllTypes()
text = 'unknown_field: 8\n'
six.assertRaisesRegex(self,
text_format.ParseError,
(r'1:1 : Message type "\w+.TestAllTypes" has no field named '
r'"unknown_field".'),
text_format.Parse, text, message)
def testParseBadEnumValue(self, message_module):
message = message_module.TestAllTypes()
text = 'optional_nested_enum: BARR'
six.assertRaisesRegex(self,
text_format.ParseError,
(r'1:23 : Enum type "\w+.TestAllTypes.NestedEnum" '
r'has no value named BARR.'),
text_format.Parse, text, message)
message = message_module.TestAllTypes()
text = 'optional_nested_enum: 100'
six.assertRaisesRegex(self,
text_format.ParseError,
(r'1:23 : Enum type "\w+.TestAllTypes.NestedEnum" '
r'has no value with number 100.'),
text_format.Parse, text, message)
def testParseBadIntValue(self, message_module):
message = message_module.TestAllTypes()
text = 'optional_int32: bork'
six.assertRaisesRegex(self,
text_format.ParseError,
('1:17 : Couldn\'t parse integer: bork'),
text_format.Parse, text, message)
def testParseStringFieldUnescape(self, message_module):
message = message_module.TestAllTypes()
text = r'''repeated_string: "\xf\x62"
repeated_string: "\\xf\\x62"
repeated_string: "\\\xf\\\x62"
repeated_string: "\\\\xf\\\\x62"
repeated_string: "\\\\\xf\\\\\x62"
repeated_string: "\x5cx20"'''
text_format.Parse(text, message)
SLASH = '\\'
self.assertEqual('\x0fb', message.repeated_string[0])
self.assertEqual(SLASH + 'xf' + SLASH + 'x62', message.repeated_string[1])
self.assertEqual(SLASH + '\x0f' + SLASH + 'b', message.repeated_string[2])
self.assertEqual(SLASH + SLASH + 'xf' + SLASH + SLASH + 'x62',
message.repeated_string[3])
self.assertEqual(SLASH + SLASH + '\x0f' + SLASH + SLASH + 'b',
message.repeated_string[4])
self.assertEqual(SLASH + 'x20', message.repeated_string[5])
def testMergeDuplicateScalars(self, message_module):
message = message_module.TestAllTypes()
text = ('optional_int32: 42 '
'optional_int32: 67')
r = text_format.Merge(text, message)
self.assertIs(r, message)
self.assertEqual(67, message.optional_int32)
def testMergeDuplicateNestedMessageScalars(self, message_module):
message = message_module.TestAllTypes()
text = ('optional_nested_message { bb: 1 } '
'optional_nested_message { bb: 2 }')
r = text_format.Merge(text, message)
self.assertTrue(r is message)
self.assertEqual(2, message.optional_nested_message.bb)
def testParseOneof(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m2 = message_module.TestAllTypes()
text_format.Parse(text_format.MessageToString(m), m2)
self.assertEqual('oneof_uint32', m2.WhichOneof('oneof_field'))
# These are tests that aren't fundamentally specific to proto2, but are at
# the moment because of differences between the proto2 and proto3 test schemas.
# Ideally the schemas would be made more similar so these tests could pass.
class OnlyWorksWithProto2RightNowTests(TextFormatBase):
def testPrintAllFieldsPointy(self):
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(
text_format.MessageToString(message, pointy_brackets=True)),
'text_format_unittest_data_pointy_oneof.txt')
def testParseGolden(self):
golden_text = '\n'.join(self.ReadGolden('text_format_unittest_data.txt'))
parsed_message = unittest_pb2.TestAllTypes()
r = text_format.Parse(golden_text, parsed_message)
self.assertIs(r, parsed_message)
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.assertEqual(message, parsed_message)
def testPrintAllFields(self):
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'text_format_unittest_data_oneof_implemented.txt')
def testPrintAllFieldsPointy(self):
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(
text_format.MessageToString(message, pointy_brackets=True)),
'text_format_unittest_data_pointy_oneof.txt')
def testPrintInIndexOrder(self):
message = unittest_pb2.TestFieldOrderings()
message.my_string = '115'
message.my_int = 101
message.my_float = 111
message.optional_nested_message.oo = 0
message.optional_nested_message.bb = 1
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(
message, use_index_order=True)),
'my_string: \"115\"\nmy_int: 101\nmy_float: 111\n'
'optional_nested_message {\n oo: 0\n bb: 1\n}\n')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(
message)),
'my_int: 101\nmy_string: \"115\"\nmy_float: 111\n'
'optional_nested_message {\n bb: 1\n oo: 0\n}\n')
def testMergeLinesGolden(self):
opened = self.ReadGolden('text_format_unittest_data.txt')
parsed_message = unittest_pb2.TestAllTypes()
r = text_format.MergeLines(opened, parsed_message)
self.assertIs(r, parsed_message)
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.assertEqual(message, parsed_message)
def testParseLinesGolden(self):
opened = self.ReadGolden('text_format_unittest_data.txt')
parsed_message = unittest_pb2.TestAllTypes()
r = text_format.ParseLines(opened, parsed_message)
self.assertIs(r, parsed_message)
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.assertEqual(message, parsed_message)
def testPrintMap(self):
message = map_unittest_pb2.TestMap()
message.map_int32_int32[-123] = -456
message.map_int64_int64[-2**33] = -2**34
message.map_uint32_uint32[123] = 456
message.map_uint64_uint64[2**33] = 2**34
message.map_string_string["abc"] = "123"
message.map_int32_foreign_message[111].c = 5
# Maps are serialized to text format using their underlying repeated
# representation.
self.CompareToGoldenText(
text_format.MessageToString(message),
'map_int32_int32 {\n'
' key: -123\n'
' value: -456\n'
'}\n'
'map_int64_int64 {\n'
' key: -8589934592\n'
' value: -17179869184\n'
'}\n'
'map_uint32_uint32 {\n'
' key: 123\n'
' value: 456\n'
'}\n'
'map_uint64_uint64 {\n'
' key: 8589934592\n'
' value: 17179869184\n'
'}\n'
'map_string_string {\n'
' key: "abc"\n'
' value: "123"\n'
'}\n'
'map_int32_foreign_message {\n'
' key: 111\n'
' value {\n'
' c: 5\n'
' }\n'
'}\n')
def testMapOrderEnforcement(self):
message = map_unittest_pb2.TestMap()
for letter in string.ascii_uppercase[13:26]:
message.map_string_string[letter] = 'dummy'
for letter in reversed(string.ascii_uppercase[0:13]):
message.map_string_string[letter] = 'dummy'
golden = ''.join((
'map_string_string {\n key: "%c"\n value: "dummy"\n}\n' % (letter,)
for letter in string.ascii_uppercase))
self.CompareToGoldenText(text_format.MessageToString(message), golden)
def testMapOrderSemantics(self):
golden_lines = self.ReadGolden('map_test_data.txt')
# The C++ implementation emits defaulted-value fields, while the Python
# implementation does not. Adjusting for this is awkward, but it is
# valuable to test against a common golden file.
line_blacklist = (' key: 0\n',
' value: 0\n',
' key: false\n',
' value: false\n')
golden_lines = [line for line in golden_lines if line not in line_blacklist]
message = map_unittest_pb2.TestMap()
text_format.ParseLines(golden_lines, message)
candidate = text_format.MessageToString(message)
# The Python implementation emits "1.0" for the double value that the C++
# implementation emits as "1".
candidate = candidate.replace('1.0', '1', 2)
self.assertMultiLineEqual(candidate, ''.join(golden_lines))
# Tests of proto2-only features (MessageSet, extensions, etc.).
class Proto2Tests(TextFormatBase):
def testPrintMessageSet(self):
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
self.CompareToGoldenText(
text_format.MessageToString(message),
'message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
message = message_set_extensions_pb2.TestMessageSet()
ext = message_set_extensions_pb2.message_set_extension3
message.Extensions[ext].text = 'bar'
self.CompareToGoldenText(
text_format.MessageToString(message),
'[google.protobuf.internal.TestMessageSetExtension3] {\n'
' text: \"bar\"\n'
'}\n')
def testPrintMessageSetAsOneLine(self):
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'message_set {'
' [protobuf_unittest.TestMessageSetExtension1] {'
' i: 23'
' }'
' [protobuf_unittest.TestMessageSetExtension2] {'
' str: \"foo\"'
' }'
' }')
def testParseMessageSet(self):
message = unittest_pb2.TestAllTypes()
text = ('repeated_uint64: 1\n'
'repeated_uint64: 2\n')
text_format.Parse(text, message)
self.assertEqual(1, message.repeated_uint64[0])
self.assertEqual(2, message.repeated_uint64[1])
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
text_format.Parse(text, message)
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
self.assertEqual(23, message.message_set.Extensions[ext1].i)
self.assertEqual('foo', message.message_set.Extensions[ext2].str)
def testPrintAllExtensions(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'text_format_unittest_extensions_data.txt')
def testPrintAllExtensionsPointy(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(
message, pointy_brackets=True)),
'text_format_unittest_extensions_data_pointy.txt')
def testParseGoldenExtensions(self):
golden_text = '\n'.join(self.ReadGolden(
'text_format_unittest_extensions_data.txt'))
parsed_message = unittest_pb2.TestAllExtensions()
text_format.Parse(golden_text, parsed_message)
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.assertEqual(message, parsed_message)
def testParseAllExtensions(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
ascii_text = text_format.MessageToString(message)
parsed_message = unittest_pb2.TestAllExtensions()
text_format.Parse(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
def testParseAllowedUnknownExtension(self):
# Skip over unknown extension correctly.
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [unknown_extension] {\n'
' i: 23\n'
' [nested_unknown_ext]: {\n'
' i: 23\n'
' test: "test_string"\n'
' floaty_float: -0.315\n'
' num: -inf\n'
' multiline_str: "abc"\n'
' "def"\n'
' "xyz."\n'
' [nested_unknown_ext]: <\n'
' i: 23\n'
' i: 24\n'
' pointfloat: .3\n'
' test: "test_string"\n'
' floaty_float: -0.315\n'
' num: -inf\n'
' long_string: "test" "test2" \n'
' >\n'
' }\n'
' }\n'
' [unknown_extension]: 5\n'
'}\n')
text_format.Parse(text, message, allow_unknown_extension=True)
golden = 'message_set {\n}\n'
self.CompareToGoldenText(text_format.MessageToString(message), golden)
# Catch parse errors in unknown extension.
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [unknown_extension] {\n'
' i:\n' # Missing value.
' }\n'
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'Invalid field value: }',
text_format.Parse, malformed, message,
allow_unknown_extension=True)
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [unknown_extension] {\n'
' str: "malformed string\n' # Missing closing quote.
' }\n'
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'Invalid field value: "',
text_format.Parse, malformed, message,
allow_unknown_extension=True)
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [unknown_extension] {\n'
' str: "malformed\n multiline\n string\n'
' }\n'
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'Invalid field value: "',
text_format.Parse, malformed, message,
allow_unknown_extension=True)
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [malformed_extension] <\n'
' i: -5\n'
' \n' # Missing '>' here.
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'5:1 : Expected ">".',
text_format.Parse, malformed, message,
allow_unknown_extension=True)
# Don't allow unknown fields with allow_unknown_extension=True.
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' unknown_field: true\n'
' \n' # Missing '>' here.
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
('2:3 : Message type '
'"proto2_wireformat_unittest.TestMessageSet" has no'
' field named "unknown_field".'),
text_format.Parse, malformed, message,
allow_unknown_extension=True)
# Parse known extension correcty.
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
text_format.Parse(text, message, allow_unknown_extension=True)
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
self.assertEqual(23, message.message_set.Extensions[ext1].i)
self.assertEqual('foo', message.message_set.Extensions[ext2].str)
def testParseBadExtension(self):
message = unittest_pb2.TestAllExtensions()
text = '[unknown_extension]: 8\n'
six.assertRaisesRegex(self,
text_format.ParseError,
'1:2 : Extension "unknown_extension" not registered.',
text_format.Parse, text, message)
message = unittest_pb2.TestAllTypes()
six.assertRaisesRegex(self,
text_format.ParseError,
('1:2 : Message type "protobuf_unittest.TestAllTypes" does not have '
'extensions.'),
text_format.Parse, text, message)
def testMergeDuplicateExtensionScalars(self):
message = unittest_pb2.TestAllExtensions()
text = ('[protobuf_unittest.optional_int32_extension]: 42 '
'[protobuf_unittest.optional_int32_extension]: 67')
text_format.Merge(text, message)
self.assertEqual(
67,
message.Extensions[unittest_pb2.optional_int32_extension])
def testParseDuplicateExtensionScalars(self):
message = unittest_pb2.TestAllExtensions()
text = ('[protobuf_unittest.optional_int32_extension]: 42 '
'[protobuf_unittest.optional_int32_extension]: 67')
six.assertRaisesRegex(self,
text_format.ParseError,
('1:96 : Message type "protobuf_unittest.TestAllExtensions" '
'should not have multiple '
'"protobuf_unittest.optional_int32_extension" extensions.'),
text_format.Parse, text, message)
def testParseDuplicateNestedMessageScalars(self):
message = unittest_pb2.TestAllTypes()
text = ('optional_nested_message { bb: 1 } '
'optional_nested_message { bb: 2 }')
six.assertRaisesRegex(self,
text_format.ParseError,
('1:65 : Message type "protobuf_unittest.TestAllTypes.NestedMessage" '
'should not have multiple "bb" fields.'),
text_format.Parse, text, message)
def testParseDuplicateScalars(self):
message = unittest_pb2.TestAllTypes()
text = ('optional_int32: 42 '
'optional_int32: 67')
six.assertRaisesRegex(self,
text_format.ParseError,
('1:36 : Message type "protobuf_unittest.TestAllTypes" should not '
'have multiple "optional_int32" fields.'),
text_format.Parse, text, message)
def testParseGroupNotClosed(self):
message = unittest_pb2.TestAllTypes()
text = 'RepeatedGroup: <'
six.assertRaisesRegex(self,
text_format.ParseError, '1:16 : Expected ">".',
text_format.Parse, text, message)
text = 'RepeatedGroup: {'
six.assertRaisesRegex(self,
text_format.ParseError, '1:16 : Expected "}".',
text_format.Parse, text, message)
def testParseEmptyGroup(self):
message = unittest_pb2.TestAllTypes()
text = 'OptionalGroup: {}'
text_format.Parse(text, message)
self.assertTrue(message.HasField('optionalgroup'))
message.Clear()
message = unittest_pb2.TestAllTypes()
text = 'OptionalGroup: <>'
text_format.Parse(text, message)
self.assertTrue(message.HasField('optionalgroup'))
# Maps aren't really proto2-only, but our test schema only has maps for
# proto2.
def testParseMap(self):
text = ('map_int32_int32 {\n'
' key: -123\n'
' value: -456\n'
'}\n'
'map_int64_int64 {\n'
' key: -8589934592\n'
' value: -17179869184\n'
'}\n'
'map_uint32_uint32 {\n'
' key: 123\n'
' value: 456\n'
'}\n'
'map_uint64_uint64 {\n'
' key: 8589934592\n'
' value: 17179869184\n'
'}\n'
'map_string_string {\n'
' key: "abc"\n'
' value: "123"\n'
'}\n'
'map_int32_foreign_message {\n'
' key: 111\n'
' value {\n'
' c: 5\n'
' }\n'
'}\n')
message = map_unittest_pb2.TestMap()
text_format.Parse(text, message)
self.assertEqual(-456, message.map_int32_int32[-123])
self.assertEqual(-2**34, message.map_int64_int64[-2**33])
self.assertEqual(456, message.map_uint32_uint32[123])
self.assertEqual(2**34, message.map_uint64_uint64[2**33])
self.assertEqual("123", message.map_string_string["abc"])
self.assertEqual(5, message.map_int32_foreign_message[111].c)
class TokenizerTest(unittest.TestCase):
def testSimpleTokenCases(self):
text = ('identifier1:"string1"\n \n\n'
'identifier2 : \n \n123 \n identifier3 :\'string\'\n'
'identifiER_4 : 1.1e+2 ID5:-0.23 ID6:\'aaaa\\\'bbbb\'\n'
'ID7 : "aa\\"bb"\n\n\n\n ID8: {A:inf B:-inf C:true D:false}\n'
'ID9: 22 ID10: -111111111111111111 ID11: -22\n'
'ID12: 2222222222222222222 ID13: 1.23456f ID14: 1.2e+2f '
'false_bool: 0 true_BOOL:t \n true_bool1: 1 false_BOOL1:f ')
tokenizer = text_format._Tokenizer(text.splitlines())
methods = [(tokenizer.ConsumeIdentifier, 'identifier1'),
':',
(tokenizer.ConsumeString, 'string1'),
(tokenizer.ConsumeIdentifier, 'identifier2'),
':',
(tokenizer.ConsumeInt32, 123),
(tokenizer.ConsumeIdentifier, 'identifier3'),
':',
(tokenizer.ConsumeString, 'string'),
(tokenizer.ConsumeIdentifier, 'identifiER_4'),
':',
(tokenizer.ConsumeFloat, 1.1e+2),
(tokenizer.ConsumeIdentifier, 'ID5'),
':',
(tokenizer.ConsumeFloat, -0.23),
(tokenizer.ConsumeIdentifier, 'ID6'),
':',
(tokenizer.ConsumeString, 'aaaa\'bbbb'),
(tokenizer.ConsumeIdentifier, 'ID7'),
':',
(tokenizer.ConsumeString, 'aa\"bb'),
(tokenizer.ConsumeIdentifier, 'ID8'),
':',
'{',
(tokenizer.ConsumeIdentifier, 'A'),
':',
(tokenizer.ConsumeFloat, float('inf')),
(tokenizer.ConsumeIdentifier, 'B'),
':',
(tokenizer.ConsumeFloat, -float('inf')),
(tokenizer.ConsumeIdentifier, 'C'),
':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'D'),
':',
(tokenizer.ConsumeBool, False),
'}',
(tokenizer.ConsumeIdentifier, 'ID9'),
':',
(tokenizer.ConsumeUint32, 22),
(tokenizer.ConsumeIdentifier, 'ID10'),
':',
(tokenizer.ConsumeInt64, -111111111111111111),
(tokenizer.ConsumeIdentifier, 'ID11'),
':',
(tokenizer.ConsumeInt32, -22),
(tokenizer.ConsumeIdentifier, 'ID12'),
':',
(tokenizer.ConsumeUint64, 2222222222222222222),
(tokenizer.ConsumeIdentifier, 'ID13'),
':',
(tokenizer.ConsumeFloat, 1.23456),
(tokenizer.ConsumeIdentifier, 'ID14'),
':',
(tokenizer.ConsumeFloat, 1.2e+2),
(tokenizer.ConsumeIdentifier, 'false_bool'),
':',
(tokenizer.ConsumeBool, False),
(tokenizer.ConsumeIdentifier, 'true_BOOL'),
':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'true_bool1'),
':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'false_BOOL1'),
':',
(tokenizer.ConsumeBool, False)]
i = 0
while not tokenizer.AtEnd():
m = methods[i]
if type(m) == str:
token = tokenizer.token
self.assertEqual(token, m)
tokenizer.NextToken()
else:
self.assertEqual(m[1], m[0]())
i += 1
def testConsumeIntegers(self):
# This test only tests the failures in the integer parsing methods as well
# as the '0' special cases.
int64_max = (1 << 63) - 1
uint32_max = (1 << 32) - 1
text = '-1 %d %d' % (uint32_max + 1, int64_max + 1)
tokenizer = text_format._Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeUint32)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeUint64)
self.assertEqual(-1, tokenizer.ConsumeInt32())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeUint32)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeInt32)
self.assertEqual(uint32_max + 1, tokenizer.ConsumeInt64())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeInt64)
self.assertEqual(int64_max + 1, tokenizer.ConsumeUint64())
self.assertTrue(tokenizer.AtEnd())
text = '-0 -0 0 0'
tokenizer = text_format._Tokenizer(text.splitlines())
self.assertEqual(0, tokenizer.ConsumeUint32())
self.assertEqual(0, tokenizer.ConsumeUint64())
self.assertEqual(0, tokenizer.ConsumeUint32())
self.assertEqual(0, tokenizer.ConsumeUint64())
self.assertTrue(tokenizer.AtEnd())
def testConsumeByteString(self):
text = '"string1\''
tokenizer = text_format._Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = 'string1"'
tokenizer = text_format._Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\xt"'
tokenizer = text_format._Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\"'
tokenizer = text_format._Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\x"'
tokenizer = text_format._Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
def testConsumeBool(self):
text = 'not-a-bool'
tokenizer = text_format._Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeBool)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
coderanger/pychef | chef/tests/test_search.py | 5 | 2531 | from unittest2 import skip
from chef import Search, Node
from chef.exceptions import ChefError
from chef.tests import ChefTestCase, mockSearch
class SearchTestCase(ChefTestCase):
def test_search_all(self):
s = Search('node')
self.assertGreaterEqual(len(s), 3)
self.assertIn('test_1', s)
self.assertIn('test_2', s)
self.assertIn('test_3', s)
def test_search_query(self):
s = Search('node', 'role:test_1')
self.assertGreaterEqual(len(s), 2)
self.assertIn('test_1', s)
self.assertNotIn('test_2', s)
self.assertIn('test_3', s)
def test_list(self):
searches = Search.list()
self.assertIn('node', searches)
self.assertIn('role', searches)
def test_search_set_query(self):
s = Search('node').query('role:test_1')
self.assertGreaterEqual(len(s), 2)
self.assertIn('test_1', s)
self.assertNotIn('test_2', s)
self.assertIn('test_3', s)
def test_search_call(self):
s = Search('node')('role:test_1')
self.assertGreaterEqual(len(s), 2)
self.assertIn('test_1', s)
self.assertNotIn('test_2', s)
self.assertIn('test_3', s)
def test_rows(self):
s = Search('node', rows=1)
self.assertEqual(len(s), 1)
self.assertGreaterEqual(s.total, 3)
def test_start(self):
s = Search('node', start=1)
self.assertEqual(len(s), s.total-1)
self.assertGreaterEqual(s.total, 3)
def test_slice(self):
s = Search('node')[1:2]
self.assertEqual(len(s), 1)
self.assertGreaterEqual(s.total, 3)
s2 = s[1:2]
self.assertEqual(len(s2), 1)
self.assertGreaterEqual(s2.total, 3)
self.assertNotEqual(s[0]['name'], s2[0]['name'])
s3 = Search('node')[2:3]
self.assertEqual(len(s3), 1)
self.assertGreaterEqual(s3.total, 3)
self.assertEqual(s2[0]['name'], s3[0]['name'])
def test_object(self):
s = Search('node', 'name:test_1')
self.assertEqual(len(s), 1)
node = s[0].object
self.assertEqual(node.name, 'test_1')
self.assertEqual(node.run_list, ['role[test_1]'])
class MockSearchTestCase(ChefTestCase):
@mockSearch({
('node', '*:*'): [Node('fake_1', skip_load=True).to_dict()]
})
def test_single_node(self, MockSearch):
import chef.search
s = chef.search.Search('node')
self.assertEqual(len(s), 1)
self.assertIn('fake_1', s)
| apache-2.0 |
thesuperzapper/tensorflow | tensorflow/python/estimator/run_config.py | 7 | 1949 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Environment configuration object for Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class TaskType(object):
MASTER = 'master'
PS = 'ps'
WORKER = 'worker'
class RunConfig(object):
"""This class specifies the configurations for an `Estimator` run."""
@property
def cluster_spec(self):
return None
@property
def evaluation_master(self):
return ''
@property
def is_chief(self):
return True
@property
def master(self):
return ''
@property
def num_ps_replicas(self):
return 0
@property
def num_worker_replicas(self):
return 1
@property
def task_id(self):
return 0
@property
def task_type(self):
return TaskType.WORKER
@property
def tf_random_seed(self):
return 1
@property
def save_summary_steps(self):
return 100
@property
def save_checkpoints_secs(self):
return 600
@property
def session_config(self):
return None
@property
def save_checkpoints_steps(self):
return None
@property
def keep_checkpoint_max(self):
return 5
@property
def keep_checkpoint_every_n_hours(self):
return 10000
@property
def model_dir(self):
return None
| apache-2.0 |
ksteinfe/decodes | src/decodes/core/dc_mesh.py | 1 | 6004 | from decodes.core import *
from . import dc_base, dc_vec, dc_point, dc_has_pts #here we may only import modules that have been loaded before this one. see core/__init__.py for proper order
if VERBOSE_FS: print("mesh.py loaded")
import copy, collections
class Mesh(HasPts):
"""
a very simple mesh class
"""
subclass_attr = [] # this list of props is unset any time this HasPts object changes
def __init__(self, vertices=None, faces=None, basis=None):
""" Mesh Constructor.
:param vertices: The vertices of the mesh.
:type vertices: [Point]
:param faces: List of ordered faces.
:type faces: [int]
:param basis: The (optional) basis of the mesh.
:type basis: Basis
:result: Mesh object.
:rtype: Mesh
::
pts=[
Point(0,0,0),
Point(0,1,0),
Point(1,1,0),
Point(1,0,0),
Point(0,0,1),
Point(0,1,1),
Point(1,1,1),
Point(1,0,1),
]
quad_faces=[[0,1,2,3],[4,5,6,7],[0,4,5,1],[3,7,6,2]]
quadmesh=Mesh(pts,quad_faces)
"""
super(Mesh,self).__init__(vertices,basis) #HasPts constructor handles initalization of verts and basis
self._faces = [] if (faces is None) else faces
@property
def faces(self):
""" Returns a list of mesh faces.
:result: List of mesh faces.
:rtype: list
"""
return self._faces
def add_face(self,a,b,c,d=-1):
""" Adds a face to the mesh.
:param a,b,c,d: Face to be added to the list of faces.
:type a,b,c,d: int.
:result: Modifies list of faces.
:rtype: None
::
quadmesh.add_face(4,5,6,7)
"""
#TODO: add lists of faces just the same
if max(a,b,c,d) < len(self.pts):
if (d>=0) : self._faces.append([a,b,c,d])
else: self._faces.append([a,b,c])
def face_pts(self,index):
""" Returns the points of a given face.
:param index: Face's index
:type index: int
:returns: Vertices.
:rtype: Point
::
quadmesh.face_pts(0)
"""
return [self.pts[i] for i in self.faces[index]]
def face_centroid(self,index):
""" Returns the centroids of individual mesh faces.
:param index: Index of a face.
:type index: int
:returns: The centroid of a face.
:rtype: Point
::
quadmesh.face_centroid(0)
"""
return Point.centroid(self.face_pts(index))
def face_normal(self,index):
""" Returns the normal vector of a face.
:param index: Index of a face.
:type index: int
:returns: Normal vector.
:rtype: Vec
::
quadmesh.face_normal(0)
"""
verts = self.face_pts(index)
if len(verts) == 3 : return Vec(verts[0],verts[1]).cross(Vec(verts[0],verts[2])).normalized()
else :
v0 = Vec(verts[0],verts[1]).cross(Vec(verts[0],verts[3])).normalized()
v1 = Vec(verts[2],verts[3]).cross(Vec(verts[2],verts[1])).normalized()
return Vec.bisector(v0,v1).normalized()
def __repr__(self):
return "msh[{0}v,{1}f]".format(len(self._verts),len(self._faces))
@staticmethod
def explode(msh):
""" Explodes a mesh into individual faces.
:param msh: Mesh to explode.
:type msh: Mesh
:returns: List of meshes.
:type: [Mesh]
::
Mesh.explode(quadmesh)
"""
exploded_meshes = []
for face in msh.faces:
pts = [msh.pts[v] for v in face]
nface = [0,1,2] if len(face)==3 else [0,1,2,3]
exploded_meshes.append(Mesh(pts,[nface]))
return exploded_meshes
def to_pt_graph(self):
""" Returns a Graph representation of the mesh points by index.
:returns: A Graph of point indexes.
:rtype: Graph
::
quadmesh.to_pt_graph()
"""
graph = Graph()
for index in range(len(self.pts)):
for face in self.faces:
for px in face:
if index in face and index!=px: graph.add_edge(index, px)
return graph
def to_face_graph(self, val=1):
""" Returns a Graph representation of the mesh faces by index.
:param val: number of coincident points for neighborness.
:type val: int
:returns: A Graph of face indexes.
:rtype: Graph
::
quadmesh.to_face_graph(2)
"""
from decodes.extensions.graph import Graph
graph = Graph()
graph.naked_nodes = []
for f1 in range(len(self.faces)):
for f2 in range(len(self.faces)):
if f1 != f2:
count = 0
for index in self.faces[f2]:
if index in self.faces[f1]:
count+=1
if count >= val:
graph.add_edge(f1,f2)
if len(graph.edges[f1]) < len(self.faces[f1]):
if f1 not in graph.naked_nodes:
graph.naked_nodes.append(f1)
return graph
| gpl-3.0 |
louyihua/edx-platform | lms/djangoapps/mobile_api/video_outlines/tests.py | 17 | 33728 | # -*- coding: utf-8 -*-
"""
Tests for video outline API
"""
import itertools
from uuid import uuid4
from collections import namedtuple
import ddt
from nose.plugins.attrib import attr
from edxval import api
from xmodule.modulestore.tests.factories import ItemFactory
from xmodule.video_module import transcripts_utils
from xmodule.modulestore.django import modulestore
from xmodule.partitions.partitions import Group, UserPartition
from milestones.tests.utils import MilestonesTestCaseMixin
from mobile_api.models import MobileApiConfig
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory
from openedx.core.djangoapps.course_groups.models import CourseUserGroupPartitionGroup
from openedx.core.djangoapps.course_groups.cohorts import add_user_to_cohort, remove_user_from_cohort
from mobile_api.testutils import MobileAPITestCase, MobileAuthTestMixin, MobileCourseAccessTestMixin
class TestVideoAPITestCase(MobileAPITestCase):
"""
Base test class for video related mobile APIs
"""
def setUp(self):
super(TestVideoAPITestCase, self).setUp()
self.section = ItemFactory.create(
parent=self.course,
category="chapter",
display_name=u"test factory section omega \u03a9",
)
self.sub_section = ItemFactory.create(
parent=self.section,
category="sequential",
display_name=u"test subsection omega \u03a9",
)
self.unit = ItemFactory.create(
parent=self.sub_section,
category="vertical",
metadata={'graded': True, 'format': 'Homework'},
display_name=u"test unit omega \u03a9",
)
self.other_unit = ItemFactory.create(
parent=self.sub_section,
category="vertical",
metadata={'graded': True, 'format': 'Homework'},
display_name=u"test unit omega 2 \u03a9",
)
self.nameless_unit = ItemFactory.create(
parent=self.sub_section,
category="vertical",
metadata={'graded': True, 'format': 'Homework'},
display_name=None,
)
self.edx_video_id = 'testing-123'
self.video_url = 'http://val.edx.org/val/video.mp4'
self.video_url_high = 'http://val.edx.org/val/video_high.mp4'
self.youtube_url = 'http://val.edx.org/val/youtube.mp4'
self.html5_video_url = 'http://video.edx.org/html5/video.mp4'
api.create_profile('youtube')
api.create_profile('mobile_high')
api.create_profile('mobile_low')
# create the video in VAL
api.create_video({
'edx_video_id': self.edx_video_id,
'status': 'test',
'client_video_id': u"test video omega \u03a9",
'duration': 12,
'courses': [unicode(self.course.id)],
'encoded_videos': [
{
'profile': 'youtube',
'url': 'xyz123',
'file_size': 0,
'bitrate': 1500
},
{
'profile': 'mobile_low',
'url': self.video_url,
'file_size': 12345,
'bitrate': 250
},
{
'profile': 'mobile_high',
'url': self.video_url_high,
'file_size': 99999,
'bitrate': 250
},
]})
# Set requested profiles
MobileApiConfig(video_profiles="mobile_low,mobile_high,youtube").save()
class TestVideoAPIMixin(object):
"""
Mixin class that provides helpers for testing video related mobile APIs
"""
def _create_video_with_subs(self, custom_subid=None):
"""
Creates and returns a video with stored subtitles.
"""
subid = custom_subid or uuid4().hex
transcripts_utils.save_subs_to_store(
{
'start': [100, 200, 240, 390, 1000],
'end': [200, 240, 380, 1000, 1500],
'text': [
'subs #1',
'subs #2',
'subs #3',
'subs #4',
'subs #5'
]
},
subid,
self.course)
return ItemFactory.create(
parent=self.unit,
category="video",
edx_video_id=self.edx_video_id,
display_name=u"test video omega \u03a9",
sub=subid
)
def _verify_paths(self, course_outline, path_list, outline_index=0):
"""
Takes a path_list and compares it against the course_outline
Attributes:
course_outline (list): A list of dictionaries that includes a 'path'
and 'named_path' field which we will be comparing path_list to
path_list (list): A list of the expected strings
outline_index (int): Index into the course_outline list for which the
path is being tested.
"""
path = course_outline[outline_index]['path']
self.assertEqual(len(path), len(path_list))
for i in range(len(path_list)):
self.assertEqual(path_list[i], path[i]['name'])
#named_path will be deprecated eventually
named_path = course_outline[outline_index]['named_path']
self.assertEqual(len(named_path), len(path_list))
for i in range(len(path_list)):
self.assertEqual(path_list[i], named_path[i])
def _setup_course_partitions(self, scheme_id='random', is_cohorted=False):
"""Helper method to configure the user partitions in the course."""
self.partition_id = 0 # pylint: disable=attribute-defined-outside-init
self.course.user_partitions = [
UserPartition(
self.partition_id, 'first_partition', 'First Partition',
[Group(0, 'alpha'), Group(1, 'beta')],
scheme=None, scheme_id=scheme_id
),
]
self.course.cohort_config = {'cohorted': is_cohorted}
self.store.update_item(self.course, self.user.id)
def _setup_group_access(self, xblock, partition_id, group_ids):
"""Helper method to configure the partition and group mapping for the given xblock."""
xblock.group_access = {partition_id: group_ids}
self.store.update_item(xblock, self.user.id)
def _setup_split_module(self, sub_block_category):
"""Helper method to configure a split_test unit with children of type sub_block_category."""
self._setup_course_partitions()
self.split_test = ItemFactory.create( # pylint: disable=attribute-defined-outside-init
parent=self.unit,
category="split_test",
display_name=u"split test unit",
user_partition_id=0,
)
sub_block_a = ItemFactory.create(
parent=self.split_test,
category=sub_block_category,
display_name=u"split test block a",
)
sub_block_b = ItemFactory.create(
parent=self.split_test,
category=sub_block_category,
display_name=u"split test block b",
)
self.split_test.group_id_to_child = {
str(index): url for index, url in enumerate([sub_block_a.location, sub_block_b.location])
}
self.store.update_item(self.split_test, self.user.id)
return sub_block_a, sub_block_b
@attr(shard=2)
class TestNonStandardCourseStructure(MobileAPITestCase, TestVideoAPIMixin, MilestonesTestCaseMixin):
"""
Tests /api/mobile/v0.5/video_outlines/courses/{course_id} with no course set
"""
REVERSE_INFO = {'name': 'video-summary-list', 'params': ['course_id']}
def setUp(self):
super(TestNonStandardCourseStructure, self).setUp()
self.chapter_under_course = ItemFactory.create(
parent=self.course,
category="chapter",
display_name=u"test factory chapter under course omega \u03a9",
)
self.section_under_course = ItemFactory.create(
parent=self.course,
category="sequential",
display_name=u"test factory section under course omega \u03a9",
)
self.section_under_chapter = ItemFactory.create(
parent=self.chapter_under_course,
category="sequential",
display_name=u"test factory section under chapter omega \u03a9",
)
self.vertical_under_course = ItemFactory.create(
parent=self.course,
category="vertical",
display_name=u"test factory vertical under course omega \u03a9",
)
self.vertical_under_section = ItemFactory.create(
parent=self.section_under_chapter,
category="vertical",
display_name=u"test factory vertical under section omega \u03a9",
)
def test_structure_course_video(self):
"""
Tests when there is a video without a vertical directly under course
"""
self.login_and_enroll()
ItemFactory.create(
parent=self.course,
category="video",
display_name=u"test factory video omega \u03a9",
)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 1)
section_url = course_outline[0]["section_url"]
unit_url = course_outline[0]["unit_url"]
self.assertRegexpMatches(section_url, r'courseware$')
self.assertEqual(section_url, unit_url)
self._verify_paths(course_outline, [])
def test_structure_course_vert_video(self):
"""
Tests when there is a video under vertical directly under course
"""
self.login_and_enroll()
ItemFactory.create(
parent=self.vertical_under_course,
category="video",
display_name=u"test factory video omega \u03a9",
)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 1)
section_url = course_outline[0]["section_url"]
unit_url = course_outline[0]["unit_url"]
self.assertRegexpMatches(
section_url,
r'courseware/test_factory_vertical_under_course_omega_%CE%A9/$'
)
self.assertEqual(section_url, unit_url)
self._verify_paths(
course_outline,
[
u'test factory vertical under course omega \u03a9'
]
)
def test_structure_course_chap_video(self):
"""
Tests when there is a video directly under chapter
"""
self.login_and_enroll()
ItemFactory.create(
parent=self.chapter_under_course,
category="video",
display_name=u"test factory video omega \u03a9",
)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 1)
section_url = course_outline[0]["section_url"]
unit_url = course_outline[0]["unit_url"]
self.assertRegexpMatches(
section_url,
r'courseware/test_factory_chapter_under_course_omega_%CE%A9/$'
)
self.assertEqual(section_url, unit_url)
self._verify_paths(
course_outline,
[
u'test factory chapter under course omega \u03a9',
]
)
def test_structure_course_section_video(self):
"""
Tests when chapter is none, and video under section under course
"""
self.login_and_enroll()
ItemFactory.create(
parent=self.section_under_course,
category="video",
display_name=u"test factory video omega \u03a9",
)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 1)
section_url = course_outline[0]["section_url"]
unit_url = course_outline[0]["unit_url"]
self.assertRegexpMatches(
section_url,
r'courseware/test_factory_section_under_course_omega_%CE%A9/$'
)
self.assertEqual(section_url, unit_url)
self._verify_paths(
course_outline,
[
u'test factory section under course omega \u03a9',
]
)
def test_structure_course_chap_section_video(self):
"""
Tests when chapter and sequential exists, with a video with no vertical.
"""
self.login_and_enroll()
ItemFactory.create(
parent=self.section_under_chapter,
category="video",
display_name=u"meow factory video omega \u03a9",
)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 1)
section_url = course_outline[0]["section_url"]
unit_url = course_outline[0]["unit_url"]
self.assertRegexpMatches(
section_url,
(
r'courseware/test_factory_chapter_under_course_omega_%CE%A9/' +
'test_factory_section_under_chapter_omega_%CE%A9/$'
)
)
self.assertEqual(section_url, unit_url)
self._verify_paths(
course_outline,
[
u'test factory chapter under course omega \u03a9',
u'test factory section under chapter omega \u03a9',
]
)
def test_structure_course_section_vert_video(self):
"""
Tests chapter->section->vertical->unit
"""
self.login_and_enroll()
ItemFactory.create(
parent=self.vertical_under_section,
category="video",
display_name=u"test factory video omega \u03a9",
)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 1)
section_url = course_outline[0]["section_url"]
unit_url = course_outline[0]["unit_url"]
self.assertRegexpMatches(
section_url,
(
r'courseware/test_factory_chapter_under_course_omega_%CE%A9/' +
'test_factory_section_under_chapter_omega_%CE%A9/$'
)
)
self.assertRegexpMatches(
unit_url,
(
r'courseware/test_factory_chapter_under_course_omega_%CE%A9/' +
'test_factory_section_under_chapter_omega_%CE%A9/1$'
)
)
self._verify_paths(
course_outline,
[
u'test factory chapter under course omega \u03a9',
u'test factory section under chapter omega \u03a9',
u'test factory vertical under section omega \u03a9'
]
)
@attr(shard=2)
@ddt.ddt
class TestVideoSummaryList(TestVideoAPITestCase, MobileAuthTestMixin, MobileCourseAccessTestMixin,
TestVideoAPIMixin, MilestonesTestCaseMixin):
"""
Tests for /api/mobile/v0.5/video_outlines/courses/{course_id}..
"""
REVERSE_INFO = {'name': 'video-summary-list', 'params': ['course_id']}
def test_only_on_web(self):
self.login_and_enroll()
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 0)
subid = uuid4().hex
transcripts_utils.save_subs_to_store(
{
'start': [100],
'end': [200],
'text': [
'subs #1',
]
},
subid,
self.course)
ItemFactory.create(
parent=self.unit,
category="video",
display_name=u"test video",
only_on_web=True,
subid=subid
)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 1)
self.assertIsNone(course_outline[0]["summary"]["video_url"])
self.assertIsNone(course_outline[0]["summary"]["video_thumbnail_url"])
self.assertEqual(course_outline[0]["summary"]["duration"], 0)
self.assertEqual(course_outline[0]["summary"]["size"], 0)
self.assertEqual(course_outline[0]["summary"]["name"], "test video")
self.assertEqual(course_outline[0]["summary"]["transcripts"], {})
self.assertIsNone(course_outline[0]["summary"]["language"])
self.assertEqual(course_outline[0]["summary"]["category"], "video")
self.assertTrue(course_outline[0]["summary"]["only_on_web"])
def test_mobile_api_config(self):
"""
Tests VideoSummaryList with different MobileApiConfig video_profiles
"""
self.login_and_enroll()
edx_video_id = "testing_mobile_high"
api.create_video({
'edx_video_id': edx_video_id,
'status': 'test',
'client_video_id': u"test video omega \u03a9",
'duration': 12,
'courses': [unicode(self.course.id)],
'encoded_videos': [
{
'profile': 'youtube',
'url': self.youtube_url,
'file_size': 2222,
'bitrate': 4444
},
{
'profile': 'mobile_high',
'url': self.video_url_high,
'file_size': 111,
'bitrate': 333
},
]})
ItemFactory.create(
parent=self.other_unit,
category="video",
display_name=u"testing mobile high video",
edx_video_id=edx_video_id,
)
expected_output = {
'category': u'video',
'video_thumbnail_url': None,
'language': u'en',
'name': u'testing mobile high video',
'video_url': self.video_url_high,
'duration': 12.0,
'transcripts': {
'en': 'http://testserver/api/mobile/v0.5/video_outlines/transcripts/{}/testing_mobile_high_video/en'.format(self.course.id) # pylint: disable=line-too-long
},
'only_on_web': False,
'encoded_videos': {
u'mobile_high': {
'url': self.video_url_high,
'file_size': 111
},
u'youtube': {
'url': self.youtube_url,
'file_size': 2222
}
},
'size': 111
}
# Testing when video_profiles='mobile_low,mobile_high,youtube'
course_outline = self.api_response().data
course_outline[0]['summary'].pop("id")
self.assertEqual(course_outline[0]['summary'], expected_output)
# Testing when there is no mobile_low, and that mobile_high doesn't show
MobileApiConfig(video_profiles="mobile_low,youtube").save()
course_outline = self.api_response().data
expected_output['encoded_videos'].pop('mobile_high')
expected_output['video_url'] = self.youtube_url
expected_output['size'] = 2222
course_outline[0]['summary'].pop("id")
self.assertEqual(course_outline[0]['summary'], expected_output)
# Testing where youtube is the default video over mobile_high
MobileApiConfig(video_profiles="youtube,mobile_high").save()
course_outline = self.api_response().data
expected_output['encoded_videos']['mobile_high'] = {
'url': self.video_url_high,
'file_size': 111
}
course_outline[0]['summary'].pop("id")
self.assertEqual(course_outline[0]['summary'], expected_output)
def test_video_not_in_val(self):
self.login_and_enroll()
self._create_video_with_subs()
ItemFactory.create(
parent=self.other_unit,
category="video",
edx_video_id="some_non_existent_id_in_val",
display_name=u"some non existent video in val",
html5_sources=[self.html5_video_url]
)
summary = self.api_response().data[1]['summary']
self.assertEqual(summary['name'], "some non existent video in val")
self.assertIsNone(summary['encoded_videos'])
self.assertIsNone(summary['duration'])
self.assertEqual(summary['size'], 0)
self.assertEqual(summary['video_url'], self.html5_video_url)
def test_course_list(self):
self.login_and_enroll()
self._create_video_with_subs()
ItemFactory.create(
parent=self.other_unit,
category="video",
display_name=u"test video omega 2 \u03a9",
html5_sources=[self.html5_video_url]
)
ItemFactory.create(
parent=self.other_unit,
category="video",
display_name=u"test video omega 3 \u03a9",
source=self.html5_video_url
)
ItemFactory.create(
parent=self.unit,
category="video",
edx_video_id=self.edx_video_id,
display_name=u"test draft video omega \u03a9",
visible_to_staff_only=True,
)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 3)
vid = course_outline[0]
self.assertIn('test_subsection_omega_%CE%A9', vid['section_url'])
self.assertIn('test_subsection_omega_%CE%A9/1', vid['unit_url'])
self.assertIn(u'test_video_omega_\u03a9', vid['summary']['id'])
self.assertEqual(vid['summary']['video_url'], self.video_url)
self.assertEqual(vid['summary']['size'], 12345)
self.assertIn('en', vid['summary']['transcripts'])
self.assertFalse(vid['summary']['only_on_web'])
self.assertEqual(course_outline[1]['summary']['video_url'], self.html5_video_url)
self.assertEqual(course_outline[1]['summary']['size'], 0)
self.assertFalse(course_outline[1]['summary']['only_on_web'])
self.assertEqual(course_outline[1]['path'][2]['name'], self.other_unit.display_name)
self.assertEqual(course_outline[1]['path'][2]['id'], unicode(self.other_unit.location))
self.assertEqual(course_outline[2]['summary']['video_url'], self.html5_video_url)
self.assertEqual(course_outline[2]['summary']['size'], 0)
self.assertFalse(course_outline[2]['summary']['only_on_web'])
def test_with_nameless_unit(self):
self.login_and_enroll()
ItemFactory.create(
parent=self.nameless_unit,
category="video",
edx_video_id=self.edx_video_id,
display_name=u"test draft video omega 2 \u03a9"
)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 1)
self.assertEqual(course_outline[0]['path'][2]['name'], self.nameless_unit.location.block_id)
def test_with_video_in_sub_section(self):
"""
Tests a non standard xml format where a video is underneath a sequential
We are expecting to return the same unit and section url since there is
no unit vertical.
"""
self.login_and_enroll()
ItemFactory.create(
parent=self.sub_section,
category="video",
edx_video_id=self.edx_video_id,
display_name=u"video in the sub section"
)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 1)
self.assertEqual(len(course_outline[0]['path']), 2)
section_url = course_outline[0]["section_url"]
unit_url = course_outline[0]["unit_url"]
self.assertIn(
u'courseware/test_factory_section_omega_%CE%A9/test_subsection_omega_%CE%A9',
section_url
)
self.assertTrue(section_url)
self.assertTrue(unit_url)
self.assertEqual(section_url, unit_url)
@ddt.data(
*itertools.product([True, False], ["video", "problem"])
)
@ddt.unpack
def test_with_split_block(self, is_user_staff, sub_block_category):
"""Test with split_module->sub_block_category and for both staff and non-staff users."""
self.login_and_enroll()
self.user.is_staff = is_user_staff
self.user.save()
self._setup_split_module(sub_block_category)
video_outline = self.api_response().data
num_video_blocks = 1 if sub_block_category == "video" else 0
self.assertEqual(len(video_outline), num_video_blocks)
for block_index in range(num_video_blocks):
self._verify_paths(
video_outline,
[
self.section.display_name,
self.sub_section.display_name,
self.unit.display_name,
self.split_test.display_name
],
block_index
)
self.assertIn(u"split test block", video_outline[block_index]["summary"]["name"])
def test_with_split_vertical(self):
"""Test with split_module->vertical->video structure."""
self.login_and_enroll()
split_vertical_a, split_vertical_b = self._setup_split_module("vertical")
ItemFactory.create(
parent=split_vertical_a,
category="video",
display_name=u"video in vertical a",
)
ItemFactory.create(
parent=split_vertical_b,
category="video",
display_name=u"video in vertical b",
)
video_outline = self.api_response().data
# user should see only one of the videos (a or b).
self.assertEqual(len(video_outline), 1)
self.assertIn(u"video in vertical", video_outline[0]["summary"]["name"])
a_or_b = video_outline[0]["summary"]["name"][-1:]
self._verify_paths(
video_outline,
[
self.section.display_name,
self.sub_section.display_name,
self.unit.display_name,
self.split_test.display_name,
u"split test block " + a_or_b
],
)
def _create_cohorted_video(self, group_id):
"""Creates a cohorted video block, giving access to only the given group_id."""
video_block = ItemFactory.create(
parent=self.unit,
category="video",
display_name=u"video for group " + unicode(group_id),
)
self._setup_group_access(video_block, self.partition_id, [group_id])
def _create_cohorted_vertical_with_video(self, group_id):
"""Creates a cohorted vertical with a child video block, giving access to only the given group_id."""
vertical_block = ItemFactory.create(
parent=self.sub_section,
category="vertical",
display_name=u"vertical for group " + unicode(group_id),
)
self._setup_group_access(vertical_block, self.partition_id, [group_id])
ItemFactory.create(
parent=vertical_block,
category="video",
display_name=u"video for group " + unicode(group_id),
)
@ddt.data("_create_cohorted_video", "_create_cohorted_vertical_with_video")
def test_with_cohorted_content(self, content_creator_method_name):
self.login_and_enroll()
self._setup_course_partitions(scheme_id='cohort', is_cohorted=True)
cohorts = []
for group_id in [0, 1]:
getattr(self, content_creator_method_name)(group_id)
cohorts.append(CohortFactory(course_id=self.course.id, name=u"Cohort " + unicode(group_id)))
link = CourseUserGroupPartitionGroup(
course_user_group=cohorts[group_id],
partition_id=self.partition_id,
group_id=group_id,
)
link.save()
for cohort_index in range(len(cohorts)):
# add user to this cohort
add_user_to_cohort(cohorts[cohort_index], self.user.username)
# should only see video for this cohort
video_outline = self.api_response().data
self.assertEqual(len(video_outline), 1)
self.assertEquals(
u"video for group " + unicode(cohort_index),
video_outline[0]["summary"]["name"]
)
# remove user from this cohort
remove_user_from_cohort(cohorts[cohort_index], self.user.username)
# un-cohorted user should see no videos
video_outline = self.api_response().data
self.assertEqual(len(video_outline), 0)
# staff user sees all videos
self.user.is_staff = True
self.user.save()
video_outline = self.api_response().data
self.assertEqual(len(video_outline), 2)
def test_with_hidden_blocks(self):
self.login_and_enroll()
hidden_subsection = ItemFactory.create(
parent=self.section,
category="sequential",
hide_from_toc=True,
)
unit_within_hidden_subsection = ItemFactory.create(
parent=hidden_subsection,
category="vertical",
)
hidden_unit = ItemFactory.create(
parent=self.sub_section,
category="vertical",
hide_from_toc=True,
)
ItemFactory.create(
parent=unit_within_hidden_subsection,
category="video",
edx_video_id=self.edx_video_id,
)
ItemFactory.create(
parent=hidden_unit,
category="video",
edx_video_id=self.edx_video_id,
)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 0)
def test_language(self):
self.login_and_enroll()
video = ItemFactory.create(
parent=self.nameless_unit,
category="video",
edx_video_id=self.edx_video_id,
display_name=u"test draft video omega 2 \u03a9"
)
language_case = namedtuple('language_case', ['transcripts', 'expected_language'])
language_cases = [
# defaults to english
language_case({}, "en"),
# supports english
language_case({"en": 1}, "en"),
# supports another language
language_case({"lang1": 1}, "lang1"),
# returns first alphabetically-sorted language
language_case({"lang1": 1, "en": 2}, "en"),
language_case({"lang1": 1, "lang2": 2}, "lang1"),
]
for case in language_cases:
video.transcripts = case.transcripts
modulestore().update_item(video, self.user.id)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 1)
self.assertEqual(course_outline[0]['summary']['language'], case.expected_language)
def test_transcripts(self):
self.login_and_enroll()
video = ItemFactory.create(
parent=self.nameless_unit,
category="video",
edx_video_id=self.edx_video_id,
display_name=u"test draft video omega 2 \u03a9"
)
transcript_case = namedtuple('transcript_case', ['transcripts', 'english_subtitle', 'expected_transcripts'])
transcript_cases = [
# defaults to english
transcript_case({}, "", ["en"]),
transcript_case({}, "en-sub", ["en"]),
# supports english
transcript_case({"en": 1}, "", ["en"]),
transcript_case({"en": 1}, "en-sub", ["en"]),
# keeps both english and other languages
transcript_case({"lang1": 1, "en": 2}, "", ["lang1", "en"]),
transcript_case({"lang1": 1, "en": 2}, "en-sub", ["lang1", "en"]),
# adds english to list of languages only if english_subtitle is specified
transcript_case({"lang1": 1, "lang2": 2}, "", ["lang1", "lang2"]),
transcript_case({"lang1": 1, "lang2": 2}, "en-sub", ["lang1", "lang2", "en"]),
]
for case in transcript_cases:
video.transcripts = case.transcripts
video.sub = case.english_subtitle
modulestore().update_item(video, self.user.id)
course_outline = self.api_response().data
self.assertEqual(len(course_outline), 1)
self.assertSetEqual(
set(course_outline[0]['summary']['transcripts'].keys()),
set(case.expected_transcripts)
)
@attr(shard=2)
class TestTranscriptsDetail(TestVideoAPITestCase, MobileAuthTestMixin, MobileCourseAccessTestMixin,
TestVideoAPIMixin, MilestonesTestCaseMixin):
"""
Tests for /api/mobile/v0.5/video_outlines/transcripts/{course_id}..
"""
REVERSE_INFO = {'name': 'video-transcripts-detail', 'params': ['course_id']}
def setUp(self):
super(TestTranscriptsDetail, self).setUp()
self.video = self._create_video_with_subs()
def reverse_url(self, reverse_args=None, **kwargs):
reverse_args = reverse_args or {}
reverse_args.update({
'block_id': self.video.location.block_id,
'lang': kwargs.get('lang', 'en'),
})
return super(TestTranscriptsDetail, self).reverse_url(reverse_args, **kwargs)
def test_incorrect_language(self):
self.login_and_enroll()
self.api_response(expected_response_code=404, lang='pl')
def test_transcript_with_unicode_file_name(self):
self.video = self._create_video_with_subs(custom_subid=u'你好')
self.login_and_enroll()
self.api_response(expected_response_code=200, lang='en')
| agpl-3.0 |
Medium/phantomjs-1 | src/breakpad/src/tools/gyp/test/dependencies/gyptest-lib-only.py | 151 | 1091 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify that a link time only dependency will get pulled into the set of built
targets, even if no executable uses it.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('lib_only.gyp')
test.build('lib_only.gyp', test.ALL)
# Make doesn't put static libs in a common 'lib' directory, like it does with
# shared libs, so check in the obj path corresponding to the source path.
test.built_file_must_exist('a', type=test.STATIC_LIB, libdir='obj.target')
# TODO(bradnelson/mark):
# On linux and windows a library target will at least pull its link dependencies
# into the generated sln/_main.scons, since not doing so confuses users.
# This is not currently implemented on mac, which has the opposite behavior.
if test.format == 'xcode':
test.built_file_must_not_exist('b', type=test.STATIC_LIB)
else:
test.built_file_must_exist('b', type=test.STATIC_LIB, libdir='obj.target/b')
test.pass_test()
| bsd-3-clause |
tdtrask/ansible | lib/ansible/galaxy/token.py | 102 | 2142 | ########################################################################
#
# (C) 2015, Chris Houseknecht <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from stat import S_IRUSR, S_IWUSR
import yaml
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyToken(object):
''' Class to storing and retrieving token in ~/.ansible_galaxy '''
def __init__(self):
self.file = os.path.expanduser("~") + '/.ansible_galaxy'
self.config = yaml.safe_load(self.__open_config_for_read())
if not self.config:
self.config = {}
def __open_config_for_read(self):
if os.path.isfile(self.file):
display.vvv('Opened %s' % self.file)
return open(self.file, 'r')
# config.yml not found, create and chomd u+rw
f = open(self.file, 'w')
f.close()
os.chmod(self.file, S_IRUSR | S_IWUSR) # owner has +rw
display.vvv('Created %s' % self.file)
return open(self.file, 'r')
def set(self, token):
self.config['token'] = token
self.save()
def get(self):
return self.config.get('token', None)
def save(self):
with open(self.file, 'w') as f:
yaml.safe_dump(self.config, f, default_flow_style=False)
| gpl-3.0 |
slightlymadphoenix/activityPointsApp | activitypoints/lib/python3.5/site-packages/pip/_vendor/distlib/wheel.py | 412 | 39115 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2016 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import base64
import codecs
import datetime
import distutils.util
from email import message_from_file
import hashlib
import imp
import json
import logging
import os
import posixpath
import re
import shutil
import sys
import tempfile
import zipfile
from . import __version__, DistlibException
from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
from .database import InstalledDistribution
from .metadata import Metadata, METADATA_FILENAME
from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache,
cached_property, get_cache_base, read_exports, tempdir)
from .version import NormalizedVersion, UnsupportedVersionError
logger = logging.getLogger(__name__)
cache = None # created when needed
if hasattr(sys, 'pypy_version_info'):
IMP_PREFIX = 'pp'
elif sys.platform.startswith('java'):
IMP_PREFIX = 'jy'
elif sys.platform == 'cli':
IMP_PREFIX = 'ip'
else:
IMP_PREFIX = 'cp'
VER_SUFFIX = sysconfig.get_config_var('py_version_nodot')
if not VER_SUFFIX: # pragma: no cover
VER_SUFFIX = '%s%s' % sys.version_info[:2]
PYVER = 'py' + VER_SUFFIX
IMPVER = IMP_PREFIX + VER_SUFFIX
ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_')
ABI = sysconfig.get_config_var('SOABI')
if ABI and ABI.startswith('cpython-'):
ABI = ABI.replace('cpython-', 'cp')
else:
def _derive_abi():
parts = ['cp', VER_SUFFIX]
if sysconfig.get_config_var('Py_DEBUG'):
parts.append('d')
if sysconfig.get_config_var('WITH_PYMALLOC'):
parts.append('m')
if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4:
parts.append('u')
return ''.join(parts)
ABI = _derive_abi()
del _derive_abi
FILENAME_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?
-(?P<py>\w+\d+(\.\w+\d+)*)
-(?P<bi>\w+)
-(?P<ar>\w+(\.\w+)*)
\.whl$
''', re.IGNORECASE | re.VERBOSE)
NAME_VERSION_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?$
''', re.IGNORECASE | re.VERBOSE)
SHEBANG_RE = re.compile(br'\s*#![^\r\n]*')
SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$')
SHEBANG_PYTHON = b'#!python'
SHEBANG_PYTHONW = b'#!pythonw'
if os.sep == '/':
to_posix = lambda o: o
else:
to_posix = lambda o: o.replace(os.sep, '/')
class Mounter(object):
def __init__(self):
self.impure_wheels = {}
self.libs = {}
def add(self, pathname, extensions):
self.impure_wheels[pathname] = extensions
self.libs.update(extensions)
def remove(self, pathname):
extensions = self.impure_wheels.pop(pathname)
for k, v in extensions:
if k in self.libs:
del self.libs[k]
def find_module(self, fullname, path=None):
if fullname in self.libs:
result = self
else:
result = None
return result
def load_module(self, fullname):
if fullname in sys.modules:
result = sys.modules[fullname]
else:
if fullname not in self.libs:
raise ImportError('unable to find extension for %s' % fullname)
result = imp.load_dynamic(fullname, self.libs[fullname])
result.__loader__ = self
parts = fullname.rsplit('.', 1)
if len(parts) > 1:
result.__package__ = parts[0]
return result
_hook = Mounter()
class Wheel(object):
"""
Class to build and install from Wheel files (PEP 427).
"""
wheel_version = (1, 1)
hash_kind = 'sha256'
def __init__(self, filename=None, sign=False, verify=False):
"""
Initialise an instance using a (valid) filename.
"""
self.sign = sign
self.should_verify = verify
self.buildver = ''
self.pyver = [PYVER]
self.abi = ['none']
self.arch = ['any']
self.dirname = os.getcwd()
if filename is None:
self.name = 'dummy'
self.version = '0.1'
self._filename = self.filename
else:
m = NAME_VERSION_RE.match(filename)
if m:
info = m.groupdict('')
self.name = info['nm']
# Reinstate the local version separator
self.version = info['vn'].replace('_', '-')
self.buildver = info['bn']
self._filename = self.filename
else:
dirname, filename = os.path.split(filename)
m = FILENAME_RE.match(filename)
if not m:
raise DistlibException('Invalid name or '
'filename: %r' % filename)
if dirname:
self.dirname = os.path.abspath(dirname)
self._filename = filename
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self.pyver = info['py'].split('.')
self.abi = info['bi'].split('.')
self.arch = info['ar'].split('.')
@property
def filename(self):
"""
Build and return a filename from the various components.
"""
if self.buildver:
buildver = '-' + self.buildver
else:
buildver = ''
pyver = '.'.join(self.pyver)
abi = '.'.join(self.abi)
arch = '.'.join(self.arch)
# replace - with _ as a local version separator
version = self.version.replace('-', '_')
return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver,
pyver, abi, arch)
@property
def exists(self):
path = os.path.join(self.dirname, self.filename)
return os.path.isfile(path)
@property
def tags(self):
for pyver in self.pyver:
for abi in self.abi:
for arch in self.arch:
yield pyver, abi, arch
@cached_property
def metadata(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
wheel_metadata = self.get_wheel_metadata(zf)
wv = wheel_metadata['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if file_version < (1, 1):
fn = 'METADATA'
else:
fn = METADATA_FILENAME
try:
metadata_filename = posixpath.join(info_dir, fn)
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
result = Metadata(fileobj=wf)
except KeyError:
raise ValueError('Invalid wheel, because %s is '
'missing' % fn)
return result
def get_wheel_metadata(self, zf):
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
metadata_filename = posixpath.join(info_dir, 'WHEEL')
with zf.open(metadata_filename) as bf:
wf = codecs.getreader('utf-8')(bf)
message = message_from_file(wf)
return dict(message)
@cached_property
def info(self):
pathname = os.path.join(self.dirname, self.filename)
with ZipFile(pathname, 'r') as zf:
result = self.get_wheel_metadata(zf)
return result
def process_shebang(self, data):
m = SHEBANG_RE.match(data)
if m:
end = m.end()
shebang, data_after_shebang = data[:end], data[end:]
# Preserve any arguments after the interpreter
if b'pythonw' in shebang.lower():
shebang_python = SHEBANG_PYTHONW
else:
shebang_python = SHEBANG_PYTHON
m = SHEBANG_DETAIL_RE.match(shebang)
if m:
args = b' ' + m.groups()[-1]
else:
args = b''
shebang = shebang_python + args
data = shebang + data_after_shebang
else:
cr = data.find(b'\r')
lf = data.find(b'\n')
if cr < 0 or cr > lf:
term = b'\n'
else:
if data[cr:cr + 2] == b'\r\n':
term = b'\r\n'
else:
term = b'\r'
data = SHEBANG_PYTHON + term + data
return data
def get_hash(self, data, hash_kind=None):
if hash_kind is None:
hash_kind = self.hash_kind
try:
hasher = getattr(hashlib, hash_kind)
except AttributeError:
raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)
result = hasher(data).digest()
result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
return hash_kind, result
def write_record(self, records, record_path, base):
records = list(records) # make a copy for sorting
p = to_posix(os.path.relpath(record_path, base))
records.append((p, '', ''))
records.sort()
with CSVWriter(record_path) as writer:
for row in records:
writer.writerow(row)
def write_records(self, info, libdir, archive_paths):
records = []
distinfo, info_dir = info
hasher = getattr(hashlib, self.hash_kind)
for ap, p in archive_paths:
with open(p, 'rb') as f:
data = f.read()
digest = '%s=%s' % self.get_hash(data)
size = os.path.getsize(p)
records.append((ap, digest, size))
p = os.path.join(distinfo, 'RECORD')
self.write_record(records, p, libdir)
ap = to_posix(os.path.join(info_dir, 'RECORD'))
archive_paths.append((ap, p))
def build_zip(self, pathname, archive_paths):
with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
for ap, p in archive_paths:
logger.debug('Wrote %s to %s in wheel', p, ap)
zf.write(p, ap)
def build(self, paths, tags=None, wheel_version=None):
"""
Build a wheel from files in specified paths, and use any specified tags
when determining the name of the wheel.
"""
if tags is None:
tags = {}
libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
if libkey == 'platlib':
is_pure = 'false'
default_pyver = [IMPVER]
default_abi = [ABI]
default_arch = [ARCH]
else:
is_pure = 'true'
default_pyver = [PYVER]
default_abi = ['none']
default_arch = ['any']
self.pyver = tags.get('pyver', default_pyver)
self.abi = tags.get('abi', default_abi)
self.arch = tags.get('arch', default_arch)
libdir = paths[libkey]
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
archive_paths = []
# First, stuff which is not in site-packages
for key in ('data', 'headers', 'scripts'):
if key not in paths:
continue
path = paths[key]
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for fn in files:
p = fsdecode(os.path.join(root, fn))
rp = os.path.relpath(p, path)
ap = to_posix(os.path.join(data_dir, key, rp))
archive_paths.append((ap, p))
if key == 'scripts' and not p.endswith('.exe'):
with open(p, 'rb') as f:
data = f.read()
data = self.process_shebang(data)
with open(p, 'wb') as f:
f.write(data)
# Now, stuff which is in site-packages, other than the
# distinfo stuff.
path = libdir
distinfo = None
for root, dirs, files in os.walk(path):
if root == path:
# At the top level only, save distinfo for later
# and skip it for now
for i, dn in enumerate(dirs):
dn = fsdecode(dn)
if dn.endswith('.dist-info'):
distinfo = os.path.join(root, dn)
del dirs[i]
break
assert distinfo, '.dist-info directory expected, not found'
for fn in files:
# comment out next suite to leave .pyc files in
if fsdecode(fn).endswith(('.pyc', '.pyo')):
continue
p = os.path.join(root, fn)
rp = to_posix(os.path.relpath(p, path))
archive_paths.append((rp, p))
# Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
files = os.listdir(distinfo)
for fn in files:
if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'):
p = fsdecode(os.path.join(distinfo, fn))
ap = to_posix(os.path.join(info_dir, fn))
archive_paths.append((ap, p))
wheel_metadata = [
'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),
'Generator: distlib %s' % __version__,
'Root-Is-Purelib: %s' % is_pure,
]
for pyver, abi, arch in self.tags:
wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
p = os.path.join(distinfo, 'WHEEL')
with open(p, 'w') as f:
f.write('\n'.join(wheel_metadata))
ap = to_posix(os.path.join(info_dir, 'WHEEL'))
archive_paths.append((ap, p))
# Now, at last, RECORD.
# Paths in here are archive paths - nothing else makes sense.
self.write_records((distinfo, info_dir), libdir, archive_paths)
# Now, ready to build the zip file
pathname = os.path.join(self.dirname, self.filename)
self.build_zip(pathname, archive_paths)
return pathname
def install(self, paths, maker, **kwargs):
"""
Install a wheel to the specified paths. If kwarg ``warner`` is
specified, it should be a callable, which will be called with two
tuples indicating the wheel version of this software and the wheel
version in the file, if there is a discrepancy in the versions.
This can be used to issue any warnings to raise any exceptions.
If kwarg ``lib_only`` is True, only the purelib/platlib files are
installed, and the headers, scripts, data and dist-info metadata are
not written.
The return value is a :class:`InstalledDistribution` instance unless
``options.lib_only`` is True, in which case the return value is ``None``.
"""
dry_run = maker.dry_run
warner = kwargs.get('warner')
lib_only = kwargs.get('lib_only', False)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if (file_version != self.wheel_version) and warner:
warner(self.wheel_version, file_version)
if message['Root-Is-Purelib'] == 'true':
libdir = paths['purelib']
else:
libdir = paths['platlib']
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
data_pfx = posixpath.join(data_dir, '')
info_pfx = posixpath.join(info_dir, '')
script_pfx = posixpath.join(data_dir, 'scripts', '')
# make a new instance rather than a copy of maker's,
# as we mutate it
fileop = FileOperator(dry_run=dry_run)
fileop.record = True # so we can rollback if needed
bc = not sys.dont_write_bytecode # Double negatives. Lovely!
outfiles = [] # for RECORD writing
# for script copying/shebang processing
workdir = tempfile.mkdtemp()
# set target dir later
# we default add_launchers to False, as the
# Python Launcher should be used instead
maker.source_dir = workdir
maker.target_dir = None
try:
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
logger.debug('lib_only: skipping %s', u_arcname)
continue
is_script = (u_arcname.startswith(script_pfx)
and not u_arcname.endswith('.exe'))
if u_arcname.startswith(data_pfx):
_, where, rp = u_arcname.split('/', 2)
outfile = os.path.join(paths[where], convert_path(rp))
else:
# meant for site-packages.
if u_arcname in (wheel_metadata_name, record_name):
continue
outfile = os.path.join(libdir, convert_path(u_arcname))
if not is_script:
with zf.open(arcname) as bf:
fileop.copy_stream(bf, outfile)
outfiles.append(outfile)
# Double check the digest of the written file
if not dry_run and row[1]:
with open(outfile, 'rb') as bf:
data = bf.read()
_, newdigest = self.get_hash(data, kind)
if newdigest != digest:
raise DistlibException('digest mismatch '
'on write for '
'%s' % outfile)
if bc and outfile.endswith('.py'):
try:
pyc = fileop.byte_compile(outfile)
outfiles.append(pyc)
except Exception:
# Don't give up if byte-compilation fails,
# but log it and perhaps warn the user
logger.warning('Byte-compilation failed',
exc_info=True)
else:
fn = os.path.basename(convert_path(arcname))
workname = os.path.join(workdir, fn)
with zf.open(arcname) as bf:
fileop.copy_stream(bf, workname)
dn, fn = os.path.split(outfile)
maker.target_dir = dn
filenames = maker.make(fn)
fileop.set_executable_mode(filenames)
outfiles.extend(filenames)
if lib_only:
logger.debug('lib_only: returning None')
dist = None
else:
# Generate scripts
# Try to get pydist.json so we can see if there are
# any commands to generate. If this fails (e.g. because
# of a legacy wheel), log a warning but don't give up.
commands = None
file_version = self.info['Wheel-Version']
if file_version == '1.0':
# Use legacy info
ep = posixpath.join(info_dir, 'entry_points.txt')
try:
with zf.open(ep) as bwf:
epdata = read_exports(bwf)
commands = {}
for key in ('console', 'gui'):
k = '%s_scripts' % key
if k in epdata:
commands['wrap_%s' % key] = d = {}
for v in epdata[k].values():
s = '%s:%s' % (v.prefix, v.suffix)
if v.flags:
s += ' %s' % v.flags
d[v.name] = s
except Exception:
logger.warning('Unable to read legacy script '
'metadata, so cannot generate '
'scripts')
else:
try:
with zf.open(metadata_name) as bwf:
wf = wrapper(bwf)
commands = json.load(wf).get('extensions')
if commands:
commands = commands.get('python.commands')
except Exception:
logger.warning('Unable to read JSON metadata, so '
'cannot generate scripts')
if commands:
console_scripts = commands.get('wrap_console', {})
gui_scripts = commands.get('wrap_gui', {})
if console_scripts or gui_scripts:
script_dir = paths.get('scripts', '')
if not os.path.isdir(script_dir):
raise ValueError('Valid script path not '
'specified')
maker.target_dir = script_dir
for k, v in console_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script)
fileop.set_executable_mode(filenames)
if gui_scripts:
options = {'gui': True }
for k, v in gui_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script, options)
fileop.set_executable_mode(filenames)
p = os.path.join(libdir, info_dir)
dist = InstalledDistribution(p)
# Write SHARED
paths = dict(paths) # don't change passed in dict
del paths['purelib']
del paths['platlib']
paths['lib'] = libdir
p = dist.write_shared_locations(paths, dry_run)
if p:
outfiles.append(p)
# Write RECORD
dist.write_installed_files(outfiles, paths['prefix'],
dry_run)
return dist
except Exception: # pragma: no cover
logger.exception('installation failed.')
fileop.rollback()
raise
finally:
shutil.rmtree(workdir)
def _get_dylib_cache(self):
global cache
if cache is None:
# Use native string to avoid issues on 2.x: see Python #20140.
base = os.path.join(get_cache_base(), str('dylib-cache'),
sys.version[:3])
cache = Cache(base)
return cache
def _get_extensions(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
arcname = posixpath.join(info_dir, 'EXTENSIONS')
wrapper = codecs.getreader('utf-8')
result = []
with ZipFile(pathname, 'r') as zf:
try:
with zf.open(arcname) as bf:
wf = wrapper(bf)
extensions = json.load(wf)
cache = self._get_dylib_cache()
prefix = cache.prefix_to_dir(pathname)
cache_base = os.path.join(cache.base, prefix)
if not os.path.isdir(cache_base):
os.makedirs(cache_base)
for name, relpath in extensions.items():
dest = os.path.join(cache_base, convert_path(relpath))
if not os.path.exists(dest):
extract = True
else:
file_time = os.stat(dest).st_mtime
file_time = datetime.datetime.fromtimestamp(file_time)
info = zf.getinfo(relpath)
wheel_time = datetime.datetime(*info.date_time)
extract = wheel_time > file_time
if extract:
zf.extract(relpath, cache_base)
result.append((name, dest))
except KeyError:
pass
return result
def is_compatible(self):
"""
Determine if a wheel is compatible with the running system.
"""
return is_compatible(self)
def is_mountable(self):
"""
Determine if a wheel is asserted as mountable by its metadata.
"""
return True # for now - metadata details TBD
def mount(self, append=False):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if not self.is_compatible():
msg = 'Wheel %s not compatible with this Python.' % pathname
raise DistlibException(msg)
if not self.is_mountable():
msg = 'Wheel %s is marked as not mountable.' % pathname
raise DistlibException(msg)
if pathname in sys.path:
logger.debug('%s already in path', pathname)
else:
if append:
sys.path.append(pathname)
else:
sys.path.insert(0, pathname)
extensions = self._get_extensions()
if extensions:
if _hook not in sys.meta_path:
sys.meta_path.append(_hook)
_hook.add(pathname, extensions)
def unmount(self):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if pathname not in sys.path:
logger.debug('%s not in path', pathname)
else:
sys.path.remove(pathname)
if pathname in _hook.impure_wheels:
_hook.remove(pathname)
if not _hook.impure_wheels:
if _hook in sys.meta_path:
sys.meta_path.remove(_hook)
def verify(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
# TODO version verification
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
def update(self, modifier, dest_dir=None, **kwargs):
"""
Update the contents of a wheel in a generic way. The modifier should
be a callable which expects a dictionary argument: its keys are
archive-entry paths, and its values are absolute filesystem paths
where the contents the corresponding archive entries can be found. The
modifier is free to change the contents of the files pointed to, add
new entries and remove entries, before returning. This method will
extract the entire contents of the wheel to a temporary location, call
the modifier, and then use the passed (and possibly updated)
dictionary to write a new wheel. If ``dest_dir`` is specified, the new
wheel is written there -- otherwise, the original wheel is overwritten.
The modifier should return True if it updated the wheel, else False.
This method returns the same value the modifier returns.
"""
def get_version(path_map, info_dir):
version = path = None
key = '%s/%s' % (info_dir, METADATA_FILENAME)
if key not in path_map:
key = '%s/PKG-INFO' % info_dir
if key in path_map:
path = path_map[key]
version = Metadata(path=path).version
return version, path
def update_version(version, path):
updated = None
try:
v = NormalizedVersion(version)
i = version.find('-')
if i < 0:
updated = '%s+1' % version
else:
parts = [int(s) for s in version[i + 1:].split('.')]
parts[-1] += 1
updated = '%s+%s' % (version[:i],
'.'.join(str(i) for i in parts))
except UnsupportedVersionError:
logger.debug('Cannot update non-compliant (PEP-440) '
'version %r', version)
if updated:
md = Metadata(path=path)
md.version = updated
legacy = not path.endswith(METADATA_FILENAME)
md.write(path=path, legacy=legacy)
logger.debug('Version updated from %r to %r', version,
updated)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
record_name = posixpath.join(info_dir, 'RECORD')
with tempdir() as workdir:
with ZipFile(pathname, 'r') as zf:
path_map = {}
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if u_arcname == record_name:
continue
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
zf.extract(zinfo, workdir)
path = os.path.join(workdir, convert_path(u_arcname))
path_map[u_arcname] = path
# Remember the version.
original_version, _ = get_version(path_map, info_dir)
# Files extracted. Call the modifier.
modified = modifier(path_map, **kwargs)
if modified:
# Something changed - need to build a new wheel.
current_version, path = get_version(path_map, info_dir)
if current_version and (current_version == original_version):
# Add or update local version to signify changes.
update_version(current_version, path)
# Decide where the new wheel goes.
if dest_dir is None:
fd, newpath = tempfile.mkstemp(suffix='.whl',
prefix='wheel-update-',
dir=workdir)
os.close(fd)
else:
if not os.path.isdir(dest_dir):
raise DistlibException('Not a directory: %r' % dest_dir)
newpath = os.path.join(dest_dir, self.filename)
archive_paths = list(path_map.items())
distinfo = os.path.join(workdir, info_dir)
info = distinfo, info_dir
self.write_records(info, workdir, archive_paths)
self.build_zip(newpath, archive_paths)
if dest_dir is None:
shutil.copyfile(newpath, pathname)
return modified
def compatible_tags():
"""
Return (pyver, abi, arch) tuples compatible with this Python.
"""
versions = [VER_SUFFIX]
major = VER_SUFFIX[0]
for minor in range(sys.version_info[1] - 1, - 1, -1):
versions.append(''.join([major, str(minor)]))
abis = []
for suffix, _, _ in imp.get_suffixes():
if suffix.startswith('.abi'):
abis.append(suffix.split('.', 2)[1])
abis.sort()
if ABI != 'none':
abis.insert(0, ABI)
abis.append('none')
result = []
arches = [ARCH]
if sys.platform == 'darwin':
m = re.match('(\w+)_(\d+)_(\d+)_(\w+)$', ARCH)
if m:
name, major, minor, arch = m.groups()
minor = int(minor)
matches = [arch]
if arch in ('i386', 'ppc'):
matches.append('fat')
if arch in ('i386', 'ppc', 'x86_64'):
matches.append('fat3')
if arch in ('ppc64', 'x86_64'):
matches.append('fat64')
if arch in ('i386', 'x86_64'):
matches.append('intel')
if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
matches.append('universal')
while minor >= 0:
for match in matches:
s = '%s_%s_%s_%s' % (name, major, minor, match)
if s != ARCH: # already there
arches.append(s)
minor -= 1
# Most specific - our Python version, ABI and arch
for abi in abis:
for arch in arches:
result.append((''.join((IMP_PREFIX, versions[0])), abi, arch))
# where no ABI / arch dependency, but IMP_PREFIX dependency
for i, version in enumerate(versions):
result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
if i == 0:
result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
# no IMP_PREFIX, ABI or arch dependency
for i, version in enumerate(versions):
result.append((''.join(('py', version)), 'none', 'any'))
if i == 0:
result.append((''.join(('py', version[0])), 'none', 'any'))
return set(result)
COMPATIBLE_TAGS = compatible_tags()
del compatible_tags
def is_compatible(wheel, tags=None):
if not isinstance(wheel, Wheel):
wheel = Wheel(wheel) # assume it's a filename
result = False
if tags is None:
tags = COMPATIBLE_TAGS
for ver, abi, arch in tags:
if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
result = True
break
return result
| mit |
cmakler/econgraphs | lib/flask/templating.py | 783 | 4707 | # -*- coding: utf-8 -*-
"""
flask.templating
~~~~~~~~~~~~~~~~
Implements the bridge to Jinja2.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import posixpath
from jinja2 import BaseLoader, Environment as BaseEnvironment, \
TemplateNotFound
from .globals import _request_ctx_stack, _app_ctx_stack
from .signals import template_rendered
from .module import blueprint_is_module
from ._compat import itervalues, iteritems
def _default_template_ctx_processor():
"""Default template context processor. Injects `request`,
`session` and `g`.
"""
reqctx = _request_ctx_stack.top
appctx = _app_ctx_stack.top
rv = {}
if appctx is not None:
rv['g'] = appctx.g
if reqctx is not None:
rv['request'] = reqctx.request
rv['session'] = reqctx.session
return rv
class Environment(BaseEnvironment):
"""Works like a regular Jinja2 environment but has some additional
knowledge of how Flask's blueprint works so that it can prepend the
name of the blueprint to referenced templates if necessary.
"""
def __init__(self, app, **options):
if 'loader' not in options:
options['loader'] = app.create_global_jinja_loader()
BaseEnvironment.__init__(self, **options)
self.app = app
class DispatchingJinjaLoader(BaseLoader):
"""A loader that looks for templates in the application and all
the blueprint folders.
"""
def __init__(self, app):
self.app = app
def get_source(self, environment, template):
for loader, local_name in self._iter_loaders(template):
try:
return loader.get_source(environment, local_name)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
def _iter_loaders(self, template):
loader = self.app.jinja_loader
if loader is not None:
yield loader, template
# old style module based loaders in case we are dealing with a
# blueprint that is an old style module
try:
module, local_name = posixpath.normpath(template).split('/', 1)
blueprint = self.app.blueprints[module]
if blueprint_is_module(blueprint):
loader = blueprint.jinja_loader
if loader is not None:
yield loader, local_name
except (ValueError, KeyError):
pass
for blueprint in itervalues(self.app.blueprints):
if blueprint_is_module(blueprint):
continue
loader = blueprint.jinja_loader
if loader is not None:
yield loader, template
def list_templates(self):
result = set()
loader = self.app.jinja_loader
if loader is not None:
result.update(loader.list_templates())
for name, blueprint in iteritems(self.app.blueprints):
loader = blueprint.jinja_loader
if loader is not None:
for template in loader.list_templates():
prefix = ''
if blueprint_is_module(blueprint):
prefix = name + '/'
result.add(prefix + template)
return list(result)
def _render(template, context, app):
"""Renders the template and fires the signal"""
rv = template.render(context)
template_rendered.send(app, template=template, context=context)
return rv
def render_template(template_name_or_list, **context):
"""Renders a template from the template folder with the given
context.
:param template_name_or_list: the name of the template to be
rendered, or an iterable with template names
the first one existing will be rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list),
context, ctx.app)
def render_template_string(source, **context):
"""Renders a template from the given template source string
with the given context.
:param source: the sourcecode of the template to be
rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.from_string(source),
context, ctx.app)
| mit |
xiaoxiamii/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
ruijie/quantum | quantum/plugins/cisco/l2network_plugin_configuration.py | 7 | 2232 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, Cisco Systems, Inc.
# @author: Rohit Agarwalla, Cisco Systems, Inc.
from quantum.common.utils import find_config_file
from quantum.plugins.cisco.common import cisco_configparser as confp
CONF_FILE = find_config_file({'plugin': 'cisco'}, "l2network_plugin.ini")
CONF_PARSER_OBJ = confp.CiscoConfigParser(CONF_FILE)
# Read the conf for the l2network_plugin
SECTION_CONF = CONF_PARSER_OBJ['VLANS']
VLAN_NAME_PREFIX = SECTION_CONF['vlan_name_prefix']
VLAN_START = SECTION_CONF['vlan_start']
VLAN_END = SECTION_CONF['vlan_end']
SECTION_CONF = CONF_PARSER_OBJ['PORTS']
MAX_PORTS = SECTION_CONF['max_ports']
SECTION_CONF = CONF_PARSER_OBJ['PORTPROFILES']
MAX_PORT_PROFILES = SECTION_CONF['max_port_profiles']
SECTION_CONF = CONF_PARSER_OBJ['NETWORKS']
MAX_NETWORKS = SECTION_CONF['max_networks']
SECTION_CONF = CONF_PARSER_OBJ['MODEL']
MODEL_CLASS = SECTION_CONF['model_class']
CONF_FILE = find_config_file({'plugin': 'cisco'}, "cisco_plugins.ini")
SECTION_CONF = CONF_PARSER_OBJ['SEGMENTATION']
MANAGER_CLASS = SECTION_CONF['manager_class']
CONF_PARSER_OBJ = confp.CiscoConfigParser(CONF_FILE)
# Read the config for the device plugins
PLUGINS = CONF_PARSER_OBJ.walk(CONF_PARSER_OBJ.dummy)
CONF_FILE = find_config_file({'plugin': 'cisco'}, "db_conn.ini")
CONF_PARSER_OBJ = confp.CiscoConfigParser(CONF_FILE)
# Read DB config for the Quantum DB
SECTION_CONF = CONF_PARSER_OBJ['DATABASE']
DB_NAME = SECTION_CONF['name']
DB_USER = SECTION_CONF['user']
DB_PASS = SECTION_CONF['pass']
DB_HOST = SECTION_CONF['host']
| apache-2.0 |
cluckmaster/MissionPlanner | Lib/site-packages/scipy/ndimage/info.py | 55 | 2112 | """
N-dimensional image package
===========================
This package contains various functions for multi-dimensional image
processing.
Modules
-------
.. autosummary::
:toctree: generated/
filters -
fourier -
interpolation -
io -
measurements -
morphology -
Functions (partial list)
------------------------
.. autosummary::
:toctree: generated/
affine_transform - Apply an affine transformation
center_of_mass - The center of mass of the values of an array at labels
convolve - Multi-dimensional convolution
convolve1d - 1-D convolution along the given axis
correlate - Multi-dimensional correlation
correlate1d - 1-D correlation along the given axis
extrema - Min's and max's of an array at labels, with their positions
find_objects - Find objects in a labeled array
generic_filter - Multi-dimensional filter using a given function
generic_filter1d - 1-D generic filter along the given axis
geometric_transform - Apply an arbritrary geometric transform
histogram - Histogram of the values of an array, optionally at labels
imread - Load an image from a file
label - Label features in an array
laplace - n-D Laplace filter based on approximate second derivatives
map_coordinates - Map input array to new coordinates by interpolation
mean - Mean of the values of an array at labels
median_filter - Calculates a multi-dimensional median filter
percentile_filter - Calculates a multi-dimensional percentile filter
rank_filter - Calculates a multi-dimensional rank filter
rotate - Rotate an array
shift - Shift an array
standard_deviation - Standard deviation of an n-D image array
sum - Sum of the values of the array
uniform_filter - Multi-dimensional uniform filter
uniform_filter1d - 1-D uniform filter along the given axis
variance - Variance of the values of an n-D image array
zoom - Zoom an array
Note: the above is only roughly half the functions available in this
package
Objects
-------
.. autosummary::
:toctree: generated/
docdict -
"""
postpone_import = 1
depends = []
| gpl-3.0 |
codewarrior0/pytest | testing/test_recwarn.py | 17 | 6579 | import warnings
import py
import pytest
from _pytest.recwarn import WarningsRecorder
def test_recwarn_functional(testdir):
reprec = testdir.inline_runsource("""
import warnings
oldwarn = warnings.showwarning
def test_method(recwarn):
assert warnings.showwarning != oldwarn
warnings.warn("hello")
warn = recwarn.pop()
assert isinstance(warn.message, UserWarning)
def test_finalized():
assert warnings.showwarning == oldwarn
""")
res = reprec.countoutcomes()
assert tuple(res) == (2, 0, 0), res
class TestWarningsRecorderChecker(object):
def test_recording(self, recwarn):
showwarning = py.std.warnings.showwarning
rec = WarningsRecorder()
with rec:
assert py.std.warnings.showwarning != showwarning
assert not rec.list
py.std.warnings.warn_explicit("hello", UserWarning, "xyz", 13)
assert len(rec.list) == 1
py.std.warnings.warn(DeprecationWarning("hello"))
assert len(rec.list) == 2
warn = rec.pop()
assert str(warn.message) == "hello"
l = rec.list
rec.clear()
assert len(rec.list) == 0
assert l is rec.list
pytest.raises(AssertionError, "rec.pop()")
assert showwarning == py.std.warnings.showwarning
def test_typechecking(self):
from _pytest.recwarn import WarningsChecker
with pytest.raises(TypeError):
WarningsChecker(5)
with pytest.raises(TypeError):
WarningsChecker(('hi', RuntimeWarning))
with pytest.raises(TypeError):
WarningsChecker([DeprecationWarning, RuntimeWarning])
def test_invalid_enter_exit(self):
# wrap this test in WarningsRecorder to ensure warning state gets reset
with WarningsRecorder():
with pytest.raises(RuntimeError):
rec = WarningsRecorder()
rec.__exit__(None, None, None) # can't exit before entering
with pytest.raises(RuntimeError):
rec = WarningsRecorder()
with rec:
with rec:
pass # can't enter twice
#
# ============ test pytest.deprecated_call() ==============
#
def dep(i):
if i == 0:
py.std.warnings.warn("is deprecated", DeprecationWarning)
return 42
reg = {}
def dep_explicit(i):
if i == 0:
py.std.warnings.warn_explicit("dep_explicit", category=DeprecationWarning,
filename="hello", lineno=3)
class TestDeprecatedCall(object):
def test_deprecated_call_raises(self):
excinfo = pytest.raises(AssertionError,
"pytest.deprecated_call(dep, 3)")
assert str(excinfo).find("did not produce") != -1
def test_deprecated_call(self):
pytest.deprecated_call(dep, 0)
def test_deprecated_call_ret(self):
ret = pytest.deprecated_call(dep, 0)
assert ret == 42
def test_deprecated_call_preserves(self):
onceregistry = py.std.warnings.onceregistry.copy()
filters = py.std.warnings.filters[:]
warn = py.std.warnings.warn
warn_explicit = py.std.warnings.warn_explicit
self.test_deprecated_call_raises()
self.test_deprecated_call()
assert onceregistry == py.std.warnings.onceregistry
assert filters == py.std.warnings.filters
assert warn is py.std.warnings.warn
assert warn_explicit is py.std.warnings.warn_explicit
def test_deprecated_explicit_call_raises(self):
pytest.raises(AssertionError,
"pytest.deprecated_call(dep_explicit, 3)")
def test_deprecated_explicit_call(self):
pytest.deprecated_call(dep_explicit, 0)
pytest.deprecated_call(dep_explicit, 0)
class TestWarns(object):
def test_strings(self):
# different messages, b/c Python suppresses multiple identical warnings
source1 = "warnings.warn('w1', RuntimeWarning)"
source2 = "warnings.warn('w2', RuntimeWarning)"
source3 = "warnings.warn('w3', RuntimeWarning)"
pytest.warns(RuntimeWarning, source1)
pytest.raises(pytest.fail.Exception,
lambda: pytest.warns(UserWarning, source2))
pytest.warns(RuntimeWarning, source3)
def test_function(self):
pytest.warns(SyntaxWarning,
lambda msg: warnings.warn(msg, SyntaxWarning), "syntax")
def test_warning_tuple(self):
pytest.warns((RuntimeWarning, SyntaxWarning),
lambda: warnings.warn('w1', RuntimeWarning))
pytest.warns((RuntimeWarning, SyntaxWarning),
lambda: warnings.warn('w2', SyntaxWarning))
pytest.raises(pytest.fail.Exception,
lambda: pytest.warns(
(RuntimeWarning, SyntaxWarning),
lambda: warnings.warn('w3', UserWarning)))
def test_as_contextmanager(self):
with pytest.warns(RuntimeWarning):
warnings.warn("runtime", RuntimeWarning)
with pytest.raises(pytest.fail.Exception):
with pytest.warns(RuntimeWarning):
warnings.warn("user", UserWarning)
with pytest.raises(pytest.fail.Exception):
with pytest.warns(UserWarning):
warnings.warn("runtime", RuntimeWarning)
with pytest.warns(UserWarning):
warnings.warn("user", UserWarning)
def test_record(self):
with pytest.warns(UserWarning) as record:
warnings.warn("user", UserWarning)
assert len(record) == 1
assert str(record[0].message) == "user"
def test_record_only(self):
with pytest.warns(None) as record:
warnings.warn("user", UserWarning)
warnings.warn("runtime", RuntimeWarning)
assert len(record) == 2
assert str(record[0].message) == "user"
assert str(record[1].message) == "runtime"
def test_double_test(self, testdir):
"""If a test is run again, the warning should still be raised"""
testdir.makepyfile('''
import pytest
import warnings
@pytest.mark.parametrize('run', [1, 2])
def test(run):
with pytest.warns(RuntimeWarning):
warnings.warn("runtime", RuntimeWarning)
''')
result = testdir.runpytest()
result.stdout.fnmatch_lines(['*2 passed in*'])
| mit |
pranner/CMPUT410-Lab6-Django | v1/lib/python2.7/site-packages/django/utils/deconstruct.py | 70 | 2066 | from __future__ import absolute_import # Avoid importing `importlib` from this package.
from importlib import import_module
def deconstructible(*args, **kwargs):
"""
Class decorator that allow the decorated class to be serialized
by the migrations subsystem.
Accepts an optional kwarg `path` to specify the import path.
"""
path = kwargs.pop('path', None)
def decorator(klass):
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
obj = super(klass, cls).__new__(cls)
obj._constructor_args = (args, kwargs)
return obj
def deconstruct(obj):
"""
Returns a 3-tuple of class import path, positional arguments,
and keyword arguments.
"""
# Python 2/fallback version
if path:
module_name, _, name = path.rpartition('.')
else:
module_name = obj.__module__
name = obj.__class__.__name__
# Make sure it's actually there and not an inner class
module = import_module(module_name)
if not hasattr(module, name):
raise ValueError(
"Could not find object %s in %s.\n"
"Please note that you cannot serialize things like inner "
"classes. Please move the object into the main module "
"body to use migrations.\n"
"For more information, see "
"https://docs.djangoproject.com/en/dev/topics/migrations/#serializing-values"
% (name, module_name))
return (
path or '%s.%s' % (obj.__class__.__module__, name),
obj._constructor_args[0],
obj._constructor_args[1],
)
klass.__new__ = staticmethod(__new__)
klass.deconstruct = deconstruct
return klass
if not args:
return decorator
return decorator(*args, **kwargs)
| apache-2.0 |
nyalldawson/QGIS | tests/src/python/test_qgsserver_accesscontrol_wfs.py | 15 | 13735 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsServer.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Stephane Brunner'
__date__ = '28/08/2015'
__copyright__ = 'Copyright 2015, The QGIS Project'
print('CTEST_FULL_OUTPUT')
from qgis.testing import unittest
import urllib.request
import urllib.parse
import urllib.error
from test_qgsserver_accesscontrol import TestQgsServerAccessControl, XML_NS
class TestQgsServerAccessControlWFS(TestQgsServerAccessControl):
def test_wfs_getcapabilities(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "GetCapabilities"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<Name>Hello</Name>") != -1,
"No Hello layer in WFS/GetCapabilities\n%s" % response)
self.assertTrue(
str(response).find("<Name>Hello_OnOff</Name>") != -1,
"No Hello layer in WFS/GetCapabilities\n%s" % response)
self.assertTrue(
str(response).find("<Name>Country</Name>") != -1,
"No Country layer in WFS/GetCapabilities\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertTrue(
str(response).find("<Name>Hello</Name>") != -1,
"No Hello layer in WFS/GetCapabilities\n%s" % response)
self.assertFalse(
str(response).find("<Name>Country</Name>") != -1,
"Unexpected Country layer in WFS/GetCapabilities\n%s" % response)
def test_wfs_describefeaturetype_hello(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "DescribeFeatureType",
"TYPENAME": "Hello"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find('name="Hello"') != -1,
"No Hello layer in DescribeFeatureType\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertTrue(
str(response).find('name="Hello"') != -1,
"No Hello layer in DescribeFeatureType\n%s" % response)
def test_wfs_describefeaturetype_country(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "DescribeFeatureType",
"TYPENAME": "Country"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find('name="Country"') != -1,
"No Country layer in DescribeFeatureType\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertFalse(
str(response).find('name="Country"') != -1,
"Unexpected Country layer in DescribeFeatureType\n%s" % response)
def test_wfs_getfeature_hello(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>1</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:color>red</qgs:color>") != -1, # spellok
"No color in result of GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:color>red</qgs:color>") != -1, # spellok
"Unexpected color in result of GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:color>NULL</qgs:color>") != -1, # spellok
"Unexpected color NULL in result of GetFeature\n%s" % response)
def test_wfs_getfeature_hello2(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>2</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>2</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>2</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
def test_wfs_getfeature_country(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_OnOff" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>1</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response) # spellok
# # Subset String # #
def test_wfs_getfeature_subsetstring(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>1</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No good result in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No good result in GetFeature\n%s" % response)
def test_wfs_getfeature_subsetstring2(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>2</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>2</qgs:pk>") != -1,
"No good result in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
def test_wfs_getfeature_project_subsetstring(self):
"""Tests access control with a subset string already applied to a layer in a project
'Hello_Project_SubsetString' layer has a subsetString of "pkuid in (7,8)"
This test checks for retrieving a feature which should be available in with/without access control
"""
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_Project_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>7</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
# should be one result
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>7</qgs:pk>") != -1,
"Feature with pkuid=7 not found in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>7</qgs:pk>") != -1,
"Feature with pkuid=7 not found in GetFeature, has been incorrectly filtered out by access controls\n%s" % response)
def test_wfs_getfeature_project_subsetstring2(self):
"""Tests access control with a subset string already applied to a layer in a project
'Hello_Project_SubsetString' layer has a subsetString of "pkuid in (7,8)"
This test checks for a feature which should be filtered out by access controls
"""
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_Project_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>8</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
# should be one result
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>8</qgs:pk>") != -1,
"Feature with pkuid=8 not found in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>") != -1,
"Feature with pkuid=8 was found in GetFeature, but should have been filtered out by access controls\n%s" % response)
def test_wfs_getfeature_project_subsetstring3(self):
"""Tests access control with a subset string already applied to a layer in a project
'Hello_Project_SubsetString' layer has a subsetString of "pkuid in (7,8)"
This test checks for a features which should be filtered out by project subsetStrings.
For example, pkuid 6 passes the access control checks, but should not be shown because of project layer subsetString
"""
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_Project_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>6</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
# should be no results, since pkuid 1 should be filtered out by project subsetString
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") == -1,
"Project based layer subsetString not respected in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>") != -1,
"Project based layer subsetString not respected in GetFeature with restricted access\n%s" % response)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
twilio/howtos | intercom/gdata/blogger/data.py | 61 | 4551 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data model classes for parsing and generating XML for the Blogger API."""
__author__ = '[email protected] (Jeff Scudder)'
import re
import urlparse
import atom.core
import gdata.data
LABEL_SCHEME = 'http://www.blogger.com/atom/ns#'
THR_TEMPLATE = '{http://purl.org/syndication/thread/1.0}%s'
BLOG_NAME_PATTERN = re.compile('(http://)(\w*)')
BLOG_ID_PATTERN = re.compile('(tag:blogger.com,1999:blog-)(\w*)')
BLOG_ID2_PATTERN = re.compile('tag:blogger.com,1999:user-(\d+)\.blog-(\d+)')
POST_ID_PATTERN = re.compile(
'(tag:blogger.com,1999:blog-)(\w*)(.post-)(\w*)')
PAGE_ID_PATTERN = re.compile(
'(tag:blogger.com,1999:blog-)(\w*)(.page-)(\w*)')
COMMENT_ID_PATTERN = re.compile('.*-(\w*)$')
class BloggerEntry(gdata.data.GDEntry):
"""Adds convenience methods inherited by all Blogger entries."""
def get_blog_id(self):
"""Extracts the Blogger id of this blog.
This method is useful when contructing URLs by hand. The blog id is
often used in blogger operation URLs. This should not be confused with
the id member of a BloggerBlog. The id element is the Atom id XML element.
The blog id which this method returns is a part of the Atom id.
Returns:
The blog's unique id as a string.
"""
if self.id.text:
match = BLOG_ID_PATTERN.match(self.id.text)
if match:
return match.group(2)
else:
return BLOG_ID2_PATTERN.match(self.id.text).group(2)
return None
GetBlogId = get_blog_id
def get_blog_name(self):
"""Finds the name of this blog as used in the 'alternate' URL.
An alternate URL is in the form 'http://blogName.blogspot.com/'. For an
entry representing the above example, this method would return 'blogName'.
Returns:
The blog's URL name component as a string.
"""
for link in self.link:
if link.rel == 'alternate':
return urlparse.urlparse(link.href)[1].split(".", 1)[0]
return None
GetBlogName = get_blog_name
class Blog(BloggerEntry):
"""Represents a blog which belongs to the user."""
class BlogFeed(gdata.data.GDFeed):
entry = [Blog]
class BlogPost(BloggerEntry):
"""Represents a single post on a blog."""
def add_label(self, label):
"""Adds a label to the blog post.
The label is represented by an Atom category element, so this method
is shorthand for appending a new atom.Category object.
Args:
label: str
"""
self.category.append(atom.data.Category(scheme=LABEL_SCHEME, term=label))
AddLabel = add_label
def get_post_id(self):
"""Extracts the postID string from the entry's Atom id.
Returns: A string of digits which identify this post within the blog.
"""
if self.id.text:
return POST_ID_PATTERN.match(self.id.text).group(4)
return None
GetPostId = get_post_id
class BlogPostFeed(gdata.data.GDFeed):
entry = [BlogPost]
class BlogPage(BloggerEntry):
"""Represents a single page on a blog."""
def get_page_id(self):
"""Extracts the pageID string from entry's Atom id.
Returns: A string of digits which identify this post within the blog.
"""
if self.id.text:
return PAGE_ID_PATTERN.match(self.id.text).group(4)
return None
GetPageId = get_page_id
class BlogPageFeed(gdata.data.GDFeed):
entry = [BlogPage]
class InReplyTo(atom.core.XmlElement):
_qname = THR_TEMPLATE % 'in-reply-to'
href = 'href'
ref = 'ref'
source = 'source'
type = 'type'
class Comment(BloggerEntry):
"""Blog post comment entry in a feed listing comments on a post or blog."""
in_reply_to = InReplyTo
def get_comment_id(self):
"""Extracts the commentID string from the entry's Atom id.
Returns: A string of digits which identify this post within the blog.
"""
if self.id.text:
return COMMENT_ID_PATTERN.match(self.id.text).group(1)
return None
GetCommentId = get_comment_id
class CommentFeed(gdata.data.GDFeed):
entry = [Comment]
| mit |
lxn2/mxnet | example/rcnn/demo.py | 13 | 5637 | import argparse
import os
import cv2
import mxnet as mx
import numpy as np
from rcnn.logger import logger
from rcnn.config import config
from rcnn.symbol import get_vgg_test, get_vgg_rpn_test
from rcnn.io.image import resize, transform
from rcnn.core.tester import Predictor, im_detect, im_proposal, vis_all_detection, draw_all_detection
from rcnn.utils.load_model import load_param
from rcnn.processing.nms import py_nms_wrapper, cpu_nms_wrapper, gpu_nms_wrapper
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
config.TEST.HAS_RPN = True
SHORT_SIDE = config.SCALES[0][0]
LONG_SIDE = config.SCALES[0][1]
PIXEL_MEANS = config.PIXEL_MEANS
DATA_NAMES = ['data', 'im_info']
LABEL_NAMES = None
DATA_SHAPES = [('data', (1, 3, LONG_SIDE, SHORT_SIDE)), ('im_info', (1, 3))]
LABEL_SHAPES = None
# visualization
CONF_THRESH = 0.7
NMS_THRESH = 0.3
nms = py_nms_wrapper(NMS_THRESH)
def get_net(symbol, prefix, epoch, ctx):
arg_params, aux_params = load_param(prefix, epoch, convert=True, ctx=ctx, process=True)
# infer shape
data_shape_dict = dict(DATA_SHAPES)
arg_names, aux_names = symbol.list_arguments(), symbol.list_auxiliary_states()
arg_shape, _, aux_shape = symbol.infer_shape(**data_shape_dict)
arg_shape_dict = dict(zip(arg_names, arg_shape))
aux_shape_dict = dict(zip(aux_names, aux_shape))
# check shapes
for k in symbol.list_arguments():
if k in data_shape_dict or 'label' in k:
continue
assert k in arg_params, k + ' not initialized'
assert arg_params[k].shape == arg_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(arg_shape_dict[k]) + ' provided ' + str(arg_params[k].shape)
for k in symbol.list_auxiliary_states():
assert k in aux_params, k + ' not initialized'
assert aux_params[k].shape == aux_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(aux_shape_dict[k]) + ' provided ' + str(aux_params[k].shape)
predictor = Predictor(symbol, DATA_NAMES, LABEL_NAMES, context=ctx,
provide_data=DATA_SHAPES, provide_label=LABEL_SHAPES,
arg_params=arg_params, aux_params=aux_params)
return predictor
def generate_batch(im):
"""
preprocess image, return batch
:param im: cv2.imread returns [height, width, channel] in BGR
:return:
data_batch: MXNet input batch
data_names: names in data_batch
im_scale: float number
"""
im_array, im_scale = resize(im, SHORT_SIDE, LONG_SIDE)
im_array = transform(im_array, PIXEL_MEANS)
im_info = np.array([[im_array.shape[2], im_array.shape[3], im_scale]], dtype=np.float32)
data = [mx.nd.array(im_array), mx.nd.array(im_info)]
data_shapes = [('data', im_array.shape), ('im_info', im_info.shape)]
data_batch = mx.io.DataBatch(data=data, label=None, provide_data=data_shapes, provide_label=None)
return data_batch, DATA_NAMES, im_scale
def demo_net(predictor, image_name, vis=False):
"""
generate data_batch -> im_detect -> post process
:param predictor: Predictor
:param image_name: image name
:param vis: will save as a new image if not visualized
:return: None
"""
assert os.path.exists(image_name), image_name + ' not found'
im = cv2.imread(image_name)
data_batch, data_names, im_scale = generate_batch(im)
scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, im_scale)
all_boxes = [[] for _ in CLASSES]
for cls in CLASSES:
cls_ind = CLASSES.index(cls)
cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
cls_scores = scores[:, cls_ind, np.newaxis]
keep = np.where(cls_scores >= CONF_THRESH)[0]
dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
keep = nms(dets)
all_boxes[cls_ind] = dets[keep, :]
boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]
# print results
logger.info('---class---')
logger.info('[[x1, x2, y1, y2, confidence]]')
for ind, boxes in enumerate(boxes_this_image):
if len(boxes) > 0:
logger.info('---%s---' % CLASSES[ind])
logger.info('%s' % boxes)
if vis:
vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)
else:
result_file = image_name.replace('.', '_result.')
logger.info('results saved to %s' % result_file)
im = draw_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)
cv2.imwrite(result_file, im)
def parse_args():
parser = argparse.ArgumentParser(description='Demonstrate a Faster R-CNN network')
parser.add_argument('--image', help='custom image', type=str)
parser.add_argument('--prefix', help='saved model prefix', type=str)
parser.add_argument('--epoch', help='epoch of pretrained model', type=int)
parser.add_argument('--gpu', help='GPU device to use', default=0, type=int)
parser.add_argument('--vis', help='display result', action='store_true')
args = parser.parse_args()
return args
def main():
args = parse_args()
ctx = mx.gpu(args.gpu)
symbol = get_vgg_test(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS)
predictor = get_net(symbol, args.prefix, args.epoch, ctx)
demo_net(predictor, args.image, args.vis)
if __name__ == '__main__':
main()
| apache-2.0 |
hjoliver/cylc | tests/unit/tui/test_data.py | 1 | 1331 | # THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import cylc.flow.tui.data
from cylc.flow.tui.data import generate_mutation
def test_generate_mutation(monkeypatch):
"""It should produce a GraphQL mutation with the args filled in."""
arg_types = {
'foo': 'String!',
'bar': '[Int]'
}
monkeypatch.setattr(cylc.flow.tui.data, 'ARGUMENT_TYPES', arg_types)
assert generate_mutation(
'my_mutation',
['foo', 'bar']
) == '''
mutation($foo: String!, $bar: [Int]) {
my_mutation (foos: $foo, bars: $bar) {
result
}
}
'''
| gpl-3.0 |
fosfataza/protwis | construct/migrations/0002_auto_20180117_1457.py | 3 | 1640 | # Generated by Django 2.0.1 on 2018-01-17 13:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('ligand', '0001_initial'),
('construct', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='crystallizationligandconc',
name='ligand',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ligand.Ligand'),
),
migrations.AddField(
model_name='crystallizationligandconc',
name='ligand_role',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ligand.LigandRole'),
),
migrations.AddField(
model_name='crystallization',
name='chemical_lists',
field=models.ManyToManyField(to='construct.ChemicalList'),
),
migrations.AddField(
model_name='crystallization',
name='crystal_method',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='construct.CrystallizationMethods'),
),
migrations.AddField(
model_name='crystallization',
name='crystal_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='construct.CrystallizationTypes'),
),
migrations.AddField(
model_name='crystallization',
name='ligands',
field=models.ManyToManyField(to='construct.CrystallizationLigandConc'),
),
]
| apache-2.0 |
dessHub/bc-14-online-store-application | flask/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.py | 354 | 10534 | from __future__ import absolute_import, division, unicode_literals
from . import base
class Filter(base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
if previous1 is not None:
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
| gpl-3.0 |
mcocdawc/chemopt | src/chemopt/utilities/_print_versions.py | 2 | 4591 | # The following code was taken from the pandas project and modified.
# http://pandas.pydata.org/
import codecs
import importlib
import locale
import os
import platform
import struct
import sys
def get_sys_info():
"Returns system information as a dict"
blob = []
# commit = cc._git_hash
# blob.append(('commit', commit))
try:
(sysname, nodename, release, version,
machine, processor) = platform.uname()
blob.extend([
("python", "%d.%d.%d.%s.%s" % sys.version_info[:]),
("python-bits", struct.calcsize("P") * 8),
("OS", "%s" % (sysname)),
("OS-release", "%s" % (release)),
# ("Version", "%s" % (version)),
("machine", "%s" % (machine)),
("processor", "%s" % (processor)),
# ("byteorder", "%s" % sys.byteorder),
("LC_ALL", "%s" % os.environ.get('LC_ALL', "None")),
("LANG", "%s" % os.environ.get('LANG', "None")),
("LOCALE", "%s.%s" % locale.getlocale()),
])
except Exception:
pass
return blob
def show_versions(as_json=False):
sys_info = get_sys_info()
deps = [
# (MODULE_NAME, f(mod) -> mod version)
("chemcoord", lambda mod: mod.__version__),
("numpy", lambda mod: mod.version.version),
("scipy", lambda mod: mod.version.version),
("pandas", lambda mod: mod.__version__),
("numba", lambda mod: mod.__version__),
("sortedcontainers", lambda mod: mod.__version__),
("sympy", lambda mod: mod.__version__),
("pytest", lambda mod: mod.__version__),
("pip", lambda mod: mod.__version__),
("setuptools", lambda mod: mod.__version__),
("IPython", lambda mod: mod.__version__),
("sphinx", lambda mod: mod.__version__),
# ("tables", lambda mod: mod.__version__),
# ("matplotlib", lambda mod: mod.__version__),
# ("Cython", lambda mod: mod.__version__),
# ("xarray", lambda mod: mod.__version__),
# ("patsy", lambda mod: mod.__version__),
# ("dateutil", lambda mod: mod.__version__),
# ("pytz", lambda mod: mod.VERSION),
# ("blosc", lambda mod: mod.__version__),
# ("bottleneck", lambda mod: mod.__version__),
# ("numexpr", lambda mod: mod.__version__),
# ("feather", lambda mod: mod.__version__),
# ("openpyxl", lambda mod: mod.__version__),
# ("xlrd", lambda mod: mod.__VERSION__),
# ("xlwt", lambda mod: mod.__VERSION__),
# ("xlsxwriter", lambda mod: mod.__version__),
# ("lxml", lambda mod: mod.etree.__version__),
# ("bs4", lambda mod: mod.__version__),
# ("html5lib", lambda mod: mod.__version__),
# ("sqlalchemy", lambda mod: mod.__version__),
# ("pymysql", lambda mod: mod.__version__),
# ("psycopg2", lambda mod: mod.__version__),
# ("jinja2", lambda mod: mod.__version__),
# ("s3fs", lambda mod: mod.__version__),
# ("pandas_gbq", lambda mod: mod.__version__),
# ("pandas_datareader", lambda mod: mod.__version__)
]
deps_blob = list()
for (modname, ver_f) in deps:
try:
if modname in sys.modules:
mod = sys.modules[modname]
else:
mod = importlib.import_module(modname)
ver = ver_f(mod)
deps_blob.append((modname, ver))
except Exception:
deps_blob.append((modname, None))
if (as_json):
try:
import json
except Exception:
import simplejson as json
j = dict(system=dict(sys_info), dependencies=dict(deps_blob))
if as_json is True:
print(j)
else:
with codecs.open(as_json, "wb", encoding='utf8') as f:
json.dump(j, f, indent=2)
else:
print("\nINSTALLED VERSIONS")
print("------------------")
for k, stat in sys_info:
print("%s: %s" % (k, stat))
print("")
for k, stat in deps_blob:
print("%s: %s" % (k, stat))
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-j", "--json", metavar="FILE", nargs=1,
help="Save output as JSON into file, pass in "
"'-' to output to stdout")
options = parser.parse_args()[0]
if options.json == "-":
options.json = True
show_versions(as_json=options.json)
return 0
if __name__ == "__main__":
sys.exit(main())
| lgpl-3.0 |
da1z/intellij-community | python/lib/Lib/pwd.py | 93 | 2552 | """
This module provides access to the Unix password database.
Password database entries are reported as 7-tuples containing the
following items from the password database (see `<pwd.h>'), in order:
pw_name, pw_passwd, pw_uid, pw_gid, pw_gecos, pw_dir, pw_shell. The
uid and gid items are integers, all others are strings. An exception
is raised if the entry asked for cannot be found.
"""
__all__ = ['getpwuid', 'getpwnam', 'getpwall']
from os import _name, _posix_impl
from org.python.core.Py import newString
if _name == 'nt':
raise ImportError, 'pwd module not supported on Windows'
class struct_passwd(tuple):
"""
pwd.struct_passwd: Results from getpw*() routines.
This object may be accessed either as a tuple of
(pw_name,pw_passwd,pw_uid,pw_gid,pw_gecos,pw_dir,pw_shell)
or via the object attributes as named in the above tuple.
"""
attrs = ['pw_name', 'pw_passwd', 'pw_uid', 'pw_gid', 'pw_gecos',
'pw_dir', 'pw_shell']
def __new__(cls, pwd):
pwd = (newString(pwd.loginName), newString(pwd.password), int(pwd.UID),
int(pwd.GID), newString(pwd.GECOS), newString(pwd.home),
newString(pwd.shell))
return tuple.__new__(cls, pwd)
def __getattr__(self, attr):
try:
return self[self.attrs.index(attr)]
except ValueError:
raise AttributeError
def getpwuid(uid):
"""
getpwuid(uid) -> (pw_name,pw_passwd,pw_uid,
pw_gid,pw_gecos,pw_dir,pw_shell)
Return the password database entry for the given numeric user ID.
See pwd.__doc__ for more on password database entries.
"""
entry = _posix_impl.getpwuid(uid)
if not entry:
raise KeyError(uid)
return struct_passwd(entry)
def getpwnam(name):
"""
getpwnam(name) -> (pw_name,pw_passwd,pw_uid,
pw_gid,pw_gecos,pw_dir,pw_shell)
Return the password database entry for the given user name.
See pwd.__doc__ for more on password database entries.
"""
entry = _posix_impl.getpwnam(name)
if not entry:
raise KeyError(name)
return struct_passwd(entry)
def getpwall():
"""
getpwall() -> list_of_entries
Return a list of all available password database entries,
in arbitrary order.
See pwd.__doc__ for more on password database entries.
"""
entries = []
while True:
entry = _posix_impl.getpwent()
if not entry:
break
entries.append(struct_passwd(entry))
return entries
| apache-2.0 |
InAnimaTe/CouchPotatoServer | libs/tornado/platform/common.py | 285 | 3403 | """Lowest-common-denominator implementations of platform functionality."""
from __future__ import absolute_import, division, print_function, with_statement
import errno
import socket
from tornado.platform import interface
class Waker(interface.Waker):
"""Create an OS independent asynchronous pipe.
For use on platforms that don't have os.pipe() (or where pipes cannot
be passed to select()), but do have sockets. This includes Windows
and Jython.
"""
def __init__(self):
# Based on Zope select_trigger.py:
# https://github.com/zopefoundation/Zope/blob/master/src/ZServer/medusa/thread/select_trigger.py
self.writer = socket.socket()
# Disable buffering -- pulling the trigger sends 1 byte,
# and we want that sent immediately, to wake up ASAP.
self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
count = 0
while 1:
count += 1
# Bind to a local port; for efficiency, let the OS pick
# a free port for us.
# Unfortunately, stress tests showed that we may not
# be able to connect to that port ("Address already in
# use") despite that the OS picked it. This appears
# to be a race bug in the Windows socket implementation.
# So we loop until a connect() succeeds (almost always
# on the first try). See the long thread at
# http://mail.zope.org/pipermail/zope/2005-July/160433.html
# for hideous details.
a = socket.socket()
a.bind(("127.0.0.1", 0))
a.listen(1)
connect_address = a.getsockname() # assigned (host, port) pair
try:
self.writer.connect(connect_address)
break # success
except socket.error as detail:
if (not hasattr(errno, 'WSAEADDRINUSE') or
detail[0] != errno.WSAEADDRINUSE):
# "Address already in use" is the only error
# I've seen on two WinXP Pro SP2 boxes, under
# Pythons 2.3.5 and 2.4.1.
raise
# (10048, 'Address already in use')
# assert count <= 2 # never triggered in Tim's tests
if count >= 10: # I've never seen it go above 2
a.close()
self.writer.close()
raise socket.error("Cannot bind trigger!")
# Close `a` and try again. Note: I originally put a short
# sleep() here, but it didn't appear to help or hurt.
a.close()
self.reader, addr = a.accept()
self.reader.setblocking(0)
self.writer.setblocking(0)
a.close()
self.reader_fd = self.reader.fileno()
def fileno(self):
return self.reader.fileno()
def write_fileno(self):
return self.writer.fileno()
def wake(self):
try:
self.writer.send(b"x")
except (IOError, socket.error):
pass
def consume(self):
try:
while True:
result = self.reader.recv(1024)
if not result:
break
except (IOError, socket.error):
pass
def close(self):
self.reader.close()
self.writer.close()
| gpl-3.0 |
jmschrei/scikit-learn | sklearn/cluster/k_means_.py | 30 | 55793 | """K-means clustering"""
# Authors: Gael Varoquaux <[email protected]>
# Thomas Rueckstiess <[email protected]>
# James Bergstra <[email protected]>
# Jan Schlueter <[email protected]>
# Nelle Varoquaux
# Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..utils.extmath import row_norms, squared_norm
from ..utils.sparsefuncs_fast import assign_rows_csr
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.fixes import astype
from ..utils import check_array
from ..utils import check_random_state
from ..utils import as_float_array
from ..utils import gen_batches
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..utils.random import choice
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.six import string_types
from . import _k_means
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X: array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters: integer
The number of seeds to choose
x_squared_norms: array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state: numpy.RandomState
The generator used to initialize the centers.
n_local_trials: integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features))
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(closest_dist_sq.cumsum(), rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _validate_center_shape(X, n_centers, centers):
"""Check if centers is compatible with X and n_centers"""
if len(centers) != n_centers:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, n_centers))
if centers.shape[1] != X.shape[1]:
raise ValueError(
"The number of features of the initial centers %s "
"does not match the number of features of the data %s."
% (centers.shape[1], X.shape[1]))
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def k_means(X, n_clusters, init='k-means++', precompute_distances='auto',
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1,
return_n_iter=False):
"""K-means clustering algorithm.
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter: int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
"""
if n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError('Number of iterations should be a positive number,'
' got %d instead' % max_iter)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
# If the distances are precomputed every job will create a matrix of shape
# (n_clusters, n_samples). To stop KMeans from eating up memory we only
# activate this if the created matrix is guaranteed to be under 100MB. 12
# million entries consume a little under 100MB if they are of type double.
if precompute_distances == 'auto':
n_samples = X.shape[0]
precompute_distances = (n_clusters * n_samples) < 12e6
elif isinstance(precompute_distances, bool):
pass
else:
raise ValueError("precompute_distances should be 'auto' or True/False"
", but a value of %r was passed" %
precompute_distances)
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X) or hasattr(init, '__array__'):
X_mean = X.mean(axis=0)
if not sp.issparse(X):
# The copy was already done above
X -= X_mean
if hasattr(init, '__array__'):
init = check_array(init, dtype=np.float64, copy=True)
_validate_center_shape(X, n_clusters, init)
init -= X_mean
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
best_labels, best_inertia, best_centers = None, None, None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = _kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_kmeans_single)(X, n_clusters, max_iter=max_iter,
init=init, verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
def _kmeans_single(X, n_clusters, x_squared_norms, max_iter=300,
init='k-means++', verbose=False, random_state=None,
tol=1e-4, precompute_distances=True):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X: array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters: int
The number of clusters to form as well as the number of
centroids to generate.
max_iter: int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init: {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol: float, optional
The relative increment in the results before declaring convergence.
verbose: boolean, optional
Verbosity mode
x_squared_norms: array
Precomputed x_squared_norms.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
centroid: float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label: integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia: float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=np.float64)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
shift = squared_norm(centers_old - centers)
if shift <= tol:
if verbose:
print("Converged at iteration %d" % i)
break
if shift > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = \
_labels_inertia(X, x_squared_norms, best_centers,
precompute_distances=precompute_distances,
distances=distances)
return best_labels, best_inertia, best_centers, i + 1
def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances):
"""Compute labels and inertia using a full distance matrix.
This will overwrite the 'distances' array in-place.
Parameters
----------
X : numpy array, shape (n_sample, n_features)
Input data.
x_squared_norms : numpy array, shape (n_samples,)
Precomputed squared norms of X.
centers : numpy array, shape (n_clusters, n_features)
Cluster centers which data is assigned to.
distances : numpy array, shape (n_samples,)
Pre-allocated array in which distances are stored.
Returns
-------
labels : numpy array, dtype=np.int, shape (n_samples,)
Indices of clusters that samples are assigned to.
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
k = centers.shape[0]
all_distances = euclidean_distances(centers, X, x_squared_norms,
squared=True)
labels = np.empty(n_samples, dtype=np.int32)
labels.fill(-1)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(k):
dist = all_distances[center_id]
labels[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
if n_samples == distances.shape[0]:
# distances will be changed in-place
distances[:] = mindist
inertia = mindist.sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
This will compute the distances in-place.
Parameters
----------
X: float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms: array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers: float64 array, shape (k, n_features)
The cluster centers.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
distances: float64 array, shape (n_samples,)
Pre-allocated array to be filled in with each sample's distance
to the closest center.
Returns
-------
labels: int array of shape(n)
The resulting assignment
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = -np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=np.float64)
# distances will be changed in-place
if sp.issparse(X):
inertia = _k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers, distances)
inertia = _k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X: array, shape (n_samples, n_features)
k: int
number of centroids
init: {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
x_squared_norms: array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers: array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.random_integers(
0, n_samples - 1, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if isinstance(init, string_types) and init == 'k-means++':
centers = _k_init(X, k, random_state=random_state,
x_squared_norms=x_squared_norms)
elif isinstance(init, string_types) and init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
centers = init
elif callable(init):
centers = init(X, k, random_state=random_state)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if sp.issparse(centers):
centers = centers.toarray()
_validate_center_shape(X, k, centers)
return centers
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""K-Means clustering
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
See also
--------
MiniBatchKMeans:
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster to than the default batch implementation.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10, max_iter=300,
tol=1e-4, precompute_distances='auto',
verbose=0, random_state=None, copy_x=True, n_jobs=1):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if X.shape[0] < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
X.shape[0], self.n_clusters))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES,
warn_on_dtype=True)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
return X
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
k_means(
X, n_clusters=self.n_clusters, init=self.init,
n_init=self.n_init, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
X = self._check_fit_data(X)
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def _mini_batch_step(X, x_squared_norms, centers, counts,
old_center_buffer, compute_squared_diff,
distances, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : array, shape (n_samples, n_features)
The original data array.
x_squared_norms : array, shape (n_samples,)
Squared euclidean norm of each data point.
centers : array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts : array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances : array, dtype float64, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to its closest center.
May not be None when random_reassign is True.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
random_reassign : boolean, optional
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, optional, default False
Controls the verbosity.
compute_squared_diff : bool
If set to False, the squared diff computation is skipped.
old_center_buffer : int
Copy of old centers for monitoring convergence.
Returns
-------
inertia : float
Sum of distances of samples to their closest cluster center.
squared_diff : numpy array, shape (n_clusters,)
Squared distances between previous and updated cluster centers.
"""
# Perform label assignment to nearest centers
nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,
distances=distances)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low counts
to_reassign = counts < reassignment_ratio * counts.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > .5 * X.shape[0]:
indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = choice(X.shape[0], replace=False, size=n_reassigns,
random_state=random_state)
if verbose:
print("[MiniBatchKMeans] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(X) and not sp.issparse(centers):
assign_rows_csr(X,
astype(new_centers, np.intp),
astype(np.where(to_reassign)[0], np.intp),
centers)
else:
centers[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
counts[to_reassign] = np.min(counts[~to_reassign])
# implementation for the sparse CSR representation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, x_squared_norms, centers, counts, nearest_center,
old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
count = center_mask.sum()
if count > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= counts[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += np.sum(X[center_mask], axis=0)
# update the count statistics for this center
counts[center_idx] += count
# inplace rescale to compute mean of all points (old and new)
centers[center_idx] /= counts[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
diff = centers[center_idx].ravel() - old_center_buffer.ravel()
squared_diff += np.dot(diff, diff)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulte the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""Mini-Batch K-Means clustering
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
max_no_improvement : int, default: 10
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
tol : float, default: 0.0
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
batch_size : int, optional, default: 100
Size of the mini batches.
init_size : int, optional, default: 3 * batch_size
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than n_clusters.
init : {'k-means++', 'random' or an ndarray}, default: 'k-means++'
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
n_init : int, default=3
Number of random initializations that are tried.
In contrast to KMeans, the algorithm is only run once, using the
best of the ``n_init`` initializations as measured by inertia.
compute_labels : boolean, default=True
Compute label assignment and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
reassignment_ratio : float, default: 0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : boolean, optional
Verbosity mode.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point (if compute_labels is set to True).
inertia_ : float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
Notes
-----
See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01):
super(MiniBatchKMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
self.reassignment_ratio = reassignment_ratio
def fit(self, X, y=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster
"""
random_state = check_random_state(self.random_state)
X = check_array(X, accept_sparse="csr", order='C', dtype=np.float64)
n_samples, n_features = X.shape
if n_samples < self.n_clusters:
raise ValueError("Number of samples smaller than number "
"of clusters.")
n_init = self.n_init
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=np.float64)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in MiniBatchKMeans instead of '
'n_init=%d'
% self.n_init, RuntimeWarning, stacklevel=2)
n_init = 1
x_squared_norms = row_norms(X, squared=True)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, np.double)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, np.double)
distances = np.zeros(self.batch_size, dtype=np.float64)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
init_size = self.init_size
if init_size is None:
init_size = 3 * self.batch_size
if init_size > n_samples:
init_size = n_samples
self.init_size_ = init_size
validation_indices = random_state.random_integers(
0, n_samples - 1, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(n_init):
if self.verbose:
print("Init %d/%d with method: %s"
% (init_idx + 1, n_init, self.init))
counts = np.zeros(self.n_clusters, dtype=np.int32)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = _init_centroids(
X, self.n_clusters, self.init,
random_state=random_state,
x_squared_norms=x_squared_norms,
init_size=init_size)
# Compute the label assignment on the init dataset
batch_inertia, centers_squared_diff = _mini_batch_step(
X_valid, x_squared_norms[validation_indices],
cluster_centers, counts, old_center_buffer, False,
distances=None, verbose=self.verbose)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, x_squared_norms_valid,
cluster_centers)
if self.verbose:
print("Inertia for init %d/%d: %f"
% (init_idx + 1, n_init, inertia))
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in range(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.random_integers(
0, n_samples - 1, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], x_squared_norms[minibatch_indices],
self.cluster_centers_, self.counts_,
old_center_buffer, tol > 0.0, distances=distances,
# Here we randomly choose whether to perform
# random reassignment: the choice is done as a function
# of the iteration index, and the minimum number of
# counts, in order to force this reassignment to happen
# every once in a while
random_reassign=((iteration_idx + 1)
% (10 + self.counts_.min()) == 0),
random_state=random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
self.n_iter_ = iteration_idx + 1
if self.compute_labels:
self.labels_, self.inertia_ = self._labels_inertia_minibatch(X)
return self
def _labels_inertia_minibatch(self, X):
"""Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but preventes
memory errors / segfaults.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
labels : array, shap (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared distances of points to nearest cluster.
"""
if self.verbose:
print('Computing label assignment and total inertia')
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], x_squared_norms[s],
self.cluster_centers_) for s in slices]
labels, inertia = zip(*results)
return np.hstack(labels), np.sum(inertia)
def partial_fit(self, X, y=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster.
"""
X = check_array(X, accept_sparse="csr")
n_samples, n_features = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=np.float64)
if n_samples == 0:
return self
x_squared_norms = row_norms(X, squared=True)
self.random_state_ = getattr(self, "random_state_",
check_random_state(self.random_state))
if (not hasattr(self, 'counts_')
or not hasattr(self, 'cluster_centers_')):
# this is the first call partial_fit on this object:
# initialize the cluster centers
self.cluster_centers_ = _init_centroids(
X, self.n_clusters, self.init,
random_state=self.random_state_,
x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
random_reassign = False
distances = None
else:
# The lower the minimum count is, the more we do random
# reassignment, however, we don't want to do random
# reassignment too often, to allow for building up counts
random_reassign = self.random_state_.randint(
10 * (1 + self.counts_.min())) == 0
distances = np.zeros(X.shape[0], dtype=np.float64)
_mini_batch_step(X, x_squared_norms, self.cluster_centers_,
self.counts_, np.zeros(0, np.double), 0,
random_reassign=random_reassign, distances=distances,
random_state=self.random_state_,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X)[0]
| bsd-3-clause |
koparasy/faultinjection-gem5 | src/arch/power/PowerTLB.py | 20 | 1765 | # -*- mode:python -*-
# Copyright (c) 2009 The University of Edinburgh
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Timothy M. Jones
from m5.SimObject import SimObject
from m5.params import *
class PowerTLB(SimObject):
type = 'PowerTLB'
cxx_class = 'PowerISA::TLB'
size = Param.Int(64, "TLB size")
| bsd-3-clause |
JonathonReinhart/scuba | scuba/config.py | 1 | 11648 | import os
import yaml
import re
import shlex
from .constants import *
from .utils import *
class ConfigError(Exception):
pass
class ConfigNotFoundError(ConfigError):
pass
# http://stackoverflow.com/a/9577670
class Loader(yaml.SafeLoader):
def __init__(self, stream):
self._root = os.path.split(stream.name)[0]
self._cache = dict()
super().__init__(stream)
def from_yaml(self, node):
'''
Implementes a !from_yaml constructor with the following syntax:
!from_yaml filename key
Arguments:
filename: Filename of external YAML document from which to load,
relative to the current YAML file.
key: Key from external YAML document to return,
using a dot-separated syntax for nested keys.
Examples:
!from_yaml external.yml pop
!from_yaml external.yml foo.bar.pop
!from_yaml "another file.yml" "foo bar.snap crackle.pop"
'''
# Load the content from the node, as a scalar
content = self.construct_scalar(node)
# Split on unquoted spaces
parts = shlex.split(content)
if len(parts) != 2:
raise yaml.YAMLError('Two arguments expected to !from_yaml')
filename, key = parts
# path is relative to the current YAML document
path = os.path.join(self._root, filename)
# Load the other YAML document
doc = self._cache.get(path)
if not doc:
with open(path, 'r') as f:
doc = yaml.load(f, self.__class__)
self._cache[path] = doc
# Retrieve the key
try:
cur = doc
# Use a negative look-behind to split the key on non-escaped '.' characters
for k in re.split(r'(?<!\\)\.', key):
cur = cur[k.replace('\\.', '.')] # Be sure to replace any escaped '.' characters with *just* the '.'
except KeyError:
raise yaml.YAMLError('Key "{}" not found in {}'.format(key, filename))
return cur
Loader.add_constructor('!from_yaml', Loader.from_yaml)
def find_config():
'''Search up the directory hierarchy for .scuba.yml
Returns: path, rel, config on success, or None if not found
path The absolute path of the directory where .scuba.yml was found
rel The relative path from the directory where .scuba.yml was found
to the current directory
config The loaded configuration
'''
cross_fs = 'SCUBA_DISCOVERY_ACROSS_FILESYSTEM' in os.environ
path = os.getcwd()
rel = ''
while True:
cfg_path = os.path.join(path, SCUBA_YML)
if os.path.exists(cfg_path):
return path, rel, load_config(cfg_path)
if not cross_fs and os.path.ismount(path):
msg = '{} not found here or any parent up to mount point {}'.format(SCUBA_YML, path) \
+ '\nStopping at filesystem boundary (SCUBA_DISCOVERY_ACROSS_FILESYSTEM not set).'
raise ConfigNotFoundError(msg)
# Traverse up directory hierarchy
path, rest = os.path.split(path)
if not rest:
raise ConfigNotFoundError('{} not found here or any parent directories'.format(SCUBA_YML))
# Accumulate the relative path back to where we started
rel = os.path.join(rest, rel)
def _process_script_node(node, name):
'''Process a script-type node
This handles nodes that follow the *Common script schema*,
as outlined in doc/yaml-reference.md.
'''
if isinstance(node, str):
# The script is just the text itself
return [node]
if isinstance(node, dict):
# There must be a "script" key, which must be a list of strings
script = node.get('script')
if not script:
raise ConfigError("{}: must have a 'script' subkey".format(name))
if isinstance(script, list):
return script
if isinstance(script, str):
return [script]
raise ConfigError("{}.script: must be a string or list".format(name))
raise ConfigError("{}: must be string or dict".format(name))
def _process_environment(node, name):
# Environment can be either a list of strings ("KEY=VALUE") or a mapping
# Environment keys and values are always strings
result = {}
if not node:
pass
elif isinstance(node, dict):
for k, v in node.items():
if v is None:
v = os.getenv(k, '')
result[k] = str(v)
elif isinstance(node, list):
for e in node:
k, v = parse_env_var(e)
result[k] = v
else:
raise ConfigError("'{}' must be list or mapping, not {}".format(
name, type(node).__name__))
return result
def _get_entrypoint(data):
# N.B. We can't use data.get() here, because that might return
# None, leading to ambiguity between entrypoint being absent or set
# to a null value.
#
# "Note that a null is different from an empty string and that a
# mapping entry with some key and a null value is valid and
# different from not having that key in the mapping."
# - http://yaml.org/type/null.html
key = 'entrypoint'
if not key in data:
return None
ep = data[key]
# We represent a null value as an empty string.
if ep is None:
ep = ''
if not isinstance(ep, str):
raise ConfigError("'{}' must be a string, not {}".format(
key, type(ep).__name__))
return ep
class ScubaAlias:
def __init__(self, name, script, image, entrypoint, environment, shell, as_root):
self.name = name
self.script = script
self.image = image
self.entrypoint = entrypoint
self.environment = environment
self.shell = shell
self.as_root = as_root
@classmethod
def from_dict(cls, name, node):
script = _process_script_node(node, name)
image = None
entrypoint = None
environment = None
shell = None
as_root = False
if isinstance(node, dict): # Rich alias
image = node.get('image')
entrypoint = _get_entrypoint(node)
environment = _process_environment(
node.get('environment'),
'{}.{}'.format(name, 'environment'))
shell = node.get('shell')
as_root = node.get('root', as_root)
return cls(name, script, image, entrypoint, environment, shell, as_root)
class ScubaContext:
pass
class ScubaConfig:
def __init__(self, **data):
optional_nodes = ('image','aliases','hooks','entrypoint','environment','shell')
# Check for unrecognized nodes
extra = [n for n in data if not n in optional_nodes]
if extra:
raise ConfigError('{}: Unrecognized node{}: {}'.format(SCUBA_YML,
's' if len(extra) > 1 else '', ', '.join(extra)))
self._image = data.get('image')
self._shell = data.get('shell', DEFAULT_SHELL)
self._entrypoint = _get_entrypoint(data)
self._load_aliases(data)
self._load_hooks(data)
self._environment = self._load_environment(data)
def _load_aliases(self, data):
self._aliases = {}
for name, node in data.get('aliases', {}).items():
if ' ' in name:
raise ConfigError('Alias names cannot contain spaces')
self._aliases[name] = ScubaAlias.from_dict(name, node)
def _load_hooks(self, data):
self._hooks = {}
for name in ('user', 'root',):
node = data.get('hooks', {}).get(name)
if node:
hook = _process_script_node(node, name)
self._hooks[name] = hook
def _load_environment(self, data):
return _process_environment(data.get('environment'), 'environment')
@property
def image(self):
if not self._image:
raise ConfigError("Top-level 'image' not set")
return self._image
@property
def entrypoint(self):
return self._entrypoint
@property
def aliases(self):
return self._aliases
@property
def hooks(self):
return self._hooks
@property
def environment(self):
return self._environment
@property
def shell(self):
return self._shell
def process_command(self, command, image=None, shell=None):
'''Processes a user command using aliases
Arguments:
command A user command list (e.g. argv)
image Override the image from .scuba.yml
shell Override the shell from .scuba.yml
Returns: A ScubaContext object with the following attributes:
script: a list of command line strings
image: the docker image name to use
'''
result = ScubaContext()
result.script = None
result.image = None
result.entrypoint = self.entrypoint
result.environment = self.environment.copy()
result.shell = self.shell
result.as_root = False
if command:
alias = self.aliases.get(command[0])
if not alias:
# Command is not an alias; use it as-is.
result.script = [shell_quote_cmd(command)]
else:
# Using an alias
# Does this alias override the image and/or entrypoint?
if alias.image:
result.image = alias.image
if alias.entrypoint is not None:
result.entrypoint = alias.entrypoint
if alias.shell is not None:
result.shell = alias.shell
if alias.as_root:
result.as_root = True
# Merge/override the environment
if alias.environment:
result.environment.update(alias.environment)
if len(alias.script) > 1:
# Alias is a multiline script; no additional
# arguments are allowed in the scuba invocation.
if len(command) > 1:
raise ConfigError('Additional arguments not allowed with multi-line aliases')
result.script = alias.script
else:
# Alias is a single-line script; perform substituion
# and add user arguments.
command.pop(0)
result.script = [alias.script[0] + ' ' + shell_quote_cmd(command)]
result.script = flatten_list(result.script)
# If a shell was given on the CLI, it should override the shell set by
# the alias or top-level config
if shell:
result.shell = shell
# If an image was given, it overrides what might have been set by an alias
if image:
result.image = image
# If the image was still not set, then try to get it from the confg,
# which will raise a ConfigError if it is not set
if not result.image:
result.image = self.image
return result
def load_config(path):
try:
with open(path, 'r') as f:
data = yaml.load(f, Loader)
except IOError as e:
raise ConfigError('Error opening {}: {}'.format(SCUBA_YML, e))
except yaml.YAMLError as e:
raise ConfigError('Error loading {}: {}'.format(SCUBA_YML, e))
return ScubaConfig(**(data or {}))
| mit |
badele/home-assistant | homeassistant/components/switch/modbus.py | 9 | 4290 | """
homeassistant.components.switch.modbus
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for Modbus switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.modbus/
"""
import logging
import homeassistant.components.modbus as modbus
from homeassistant.helpers.entity import ToggleEntity
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['modbus']
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Read configuration and create Modbus devices. """
switches = []
slave = config.get("slave", None)
if modbus.TYPE == "serial" and not slave:
_LOGGER.error("No slave number provided for serial Modbus")
return False
registers = config.get("registers")
if registers:
for regnum, register in registers.items():
bits = register.get("bits")
for bitnum, bit in bits.items():
if bit.get("name"):
switches.append(ModbusSwitch(bit.get("name"),
slave,
regnum,
bitnum))
coils = config.get("coils")
if coils:
for coilnum, coil in coils.items():
switches.append(ModbusSwitch(coil.get("name"),
slave,
coilnum,
0,
coil=True))
add_devices(switches)
class ModbusSwitch(ToggleEntity):
# pylint: disable=too-many-arguments
""" Represents a Modbus switch. """
def __init__(self, name, slave, register, bit, coil=False):
self._name = name
self.slave = int(slave) if slave else 1
self.register = int(register)
self.bit = int(bit)
self._coil = coil
self._is_on = None
self.register_value = None
def __str__(self):
return "%s: %s" % (self.name, self.state)
@property
def should_poll(self):
"""
We should poll, because slaves are not allowed to initiate
communication on Modbus networks.
"""
return True
@property
def unique_id(self):
""" Returns a unique id. """
return "MODBUS-SWITCH-{}-{}-{}".format(self.slave,
self.register,
self.bit)
@property
def is_on(self):
""" Returns True if switch is on. """
return self._is_on
@property
def name(self):
""" Get the name of the switch. """
return self._name
def turn_on(self, **kwargs):
""" Set switch on. """
if self.register_value is None:
self.update()
if self._coil:
modbus.NETWORK.write_coil(self.register, True)
else:
val = self.register_value | (0x0001 << self.bit)
modbus.NETWORK.write_register(unit=self.slave,
address=self.register,
value=val)
def turn_off(self, **kwargs):
""" Set switch off. """
if self.register_value is None:
self.update()
if self._coil:
modbus.NETWORK.write_coil(self.register, False)
else:
val = self.register_value & ~(0x0001 << self.bit)
modbus.NETWORK.write_register(unit=self.slave,
address=self.register,
value=val)
def update(self):
""" Update the state of the switch. """
if self._coil:
result = modbus.NETWORK.read_coils(self.register, 1)
self.register_value = result.bits[0]
self._is_on = self.register_value
else:
result = modbus.NETWORK.read_holding_registers(
unit=self.slave, address=self.register,
count=1)
val = 0
for i, res in enumerate(result.registers):
val += res * (2**(i*16))
self.register_value = val
self._is_on = (val & (0x0001 << self.bit) > 0)
| mit |
RichardLitt/wyrd-django-dev | tests/regressiontests/admin_filters/tests.py | 4 | 33533 | from __future__ import absolute_import, unicode_literals
import datetime
from django.contrib.admin import (site, ModelAdmin, SimpleListFilter,
BooleanFieldListFilter)
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, RequestFactory
from django.test.utils import override_settings, six
from django.utils.encoding import force_text
from .models import Book, Department, Employee
def select_by(dictlist, key, value):
return [x for x in dictlist if x[key] == value][0]
class DecadeListFilter(SimpleListFilter):
def lookups(self, request, model_admin):
return (
('the 80s', "the 1980's"),
('the 90s', "the 1990's"),
('the 00s', "the 2000's"),
('other', "other decades"),
)
def queryset(self, request, queryset):
decade = self.value()
if decade == 'the 80s':
return queryset.filter(year__gte=1980, year__lte=1989)
if decade == 'the 90s':
return queryset.filter(year__gte=1990, year__lte=1999)
if decade == 'the 00s':
return queryset.filter(year__gte=2000, year__lte=2009)
class DecadeListFilterWithTitleAndParameter(DecadeListFilter):
title = 'publication decade'
parameter_name = 'publication-decade'
class DecadeListFilterWithoutTitle(DecadeListFilter):
parameter_name = 'publication-decade'
class DecadeListFilterWithoutParameter(DecadeListFilter):
title = 'publication decade'
class DecadeListFilterWithNoneReturningLookups(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
pass
class DecadeListFilterWithFailingQueryset(DecadeListFilterWithTitleAndParameter):
def queryset(self, request, queryset):
raise 1/0
class DecadeListFilterWithQuerysetBasedLookups(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
qs = model_admin.queryset(request)
if qs.filter(year__gte=1980, year__lte=1989).exists():
yield ('the 80s', "the 1980's")
if qs.filter(year__gte=1990, year__lte=1999).exists():
yield ('the 90s', "the 1990's")
if qs.filter(year__gte=2000, year__lte=2009).exists():
yield ('the 00s', "the 2000's")
class DecadeListFilterParameterEndsWith__In(DecadeListFilter):
title = 'publication decade'
parameter_name = 'decade__in' # Ends with '__in"
class DecadeListFilterParameterEndsWith__Isnull(DecadeListFilter):
title = 'publication decade'
parameter_name = 'decade__isnull' # Ends with '__isnull"
class CustomUserAdmin(UserAdmin):
list_filter = ('books_authored', 'books_contributed')
class BookAdmin(ModelAdmin):
list_filter = ('year', 'author', 'contributors', 'is_best_seller', 'date_registered', 'no')
ordering = ('-id',)
class BookAdminWithTupleBooleanFilter(BookAdmin):
list_filter = ('year', 'author', 'contributors', ('is_best_seller', BooleanFieldListFilter), 'date_registered', 'no')
class DecadeFilterBookAdmin(ModelAdmin):
list_filter = ('author', DecadeListFilterWithTitleAndParameter)
ordering = ('-id',)
class DecadeFilterBookAdminWithoutTitle(ModelAdmin):
list_filter = (DecadeListFilterWithoutTitle,)
class DecadeFilterBookAdminWithoutParameter(ModelAdmin):
list_filter = (DecadeListFilterWithoutParameter,)
class DecadeFilterBookAdminWithNoneReturningLookups(ModelAdmin):
list_filter = (DecadeListFilterWithNoneReturningLookups,)
class DecadeFilterBookAdminWithFailingQueryset(ModelAdmin):
list_filter = (DecadeListFilterWithFailingQueryset,)
class DecadeFilterBookAdminWithQuerysetBasedLookups(ModelAdmin):
list_filter = (DecadeListFilterWithQuerysetBasedLookups,)
class DecadeFilterBookAdminParameterEndsWith__In(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__In,)
class DecadeFilterBookAdminParameterEndsWith__Isnull(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__Isnull,)
class EmployeeAdmin(ModelAdmin):
list_display = ['name', 'department']
list_filter = ['department']
class ListFiltersTests(TestCase):
def setUp(self):
self.today = datetime.date.today()
self.tomorrow = self.today + datetime.timedelta(days=1)
self.one_week_ago = self.today - datetime.timedelta(days=7)
self.request_factory = RequestFactory()
# Users
self.alfred = User.objects.create_user('alfred', '[email protected]')
self.bob = User.objects.create_user('bob', '[email protected]')
self.lisa = User.objects.create_user('lisa', '[email protected]')
# Books
self.djangonaut_book = Book.objects.create(title='Djangonaut: an art of living', year=2009, author=self.alfred, is_best_seller=True, date_registered=self.today)
self.bio_book = Book.objects.create(title='Django: a biography', year=1999, author=self.alfred, is_best_seller=False, no=207)
self.django_book = Book.objects.create(title='The Django Book', year=None, author=self.bob, is_best_seller=None, date_registered=self.today, no=103)
self.gipsy_book = Book.objects.create(title='Gipsy guitar for dummies', year=2002, is_best_seller=True, date_registered=self.one_week_ago)
self.gipsy_book.contributors = [self.bob, self.lisa]
self.gipsy_book.save()
def get_changelist(self, request, model, modeladmin):
return ChangeList(request, model, modeladmin.list_display, modeladmin.list_display_links,
modeladmin.list_filter, modeladmin.date_hierarchy, modeladmin.search_fields,
modeladmin.list_select_related, modeladmin.list_per_page, modeladmin.list_max_show_all, modeladmin.list_editable, modeladmin)
def test_datefieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
request = self.request_factory.get('/', {'date_registered__gte': self.today,
'date_registered__lt': self.tomorrow})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "Today")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (self.today, self.tomorrow))
request = self.request_factory.get('/', {'date_registered__gte': self.today.replace(day=1),
'date_registered__lt': self.tomorrow})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
if (self.today.year, self.today.month) == (self.one_week_ago.year, self.one_week_ago.month):
# In case one week ago is in the same month.
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "This month")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (self.today.replace(day=1), self.tomorrow))
request = self.request_factory.get('/', {'date_registered__gte': self.today.replace(month=1, day=1),
'date_registered__lt': self.tomorrow})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
if self.today.year == self.one_week_ago.year:
# In case one week ago is in the same year.
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "This year")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (self.today.replace(month=1, day=1), self.tomorrow))
request = self.request_factory.get('/', {'date_registered__gte': str(self.one_week_ago),
'date_registered__lt': str(self.tomorrow)})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "Past 7 days")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (str(self.one_week_ago), str(self.tomorrow)))
@override_settings(USE_TZ=True)
def test_datefieldlistfilter_with_time_zone_support(self):
# Regression for #17830
self.test_datefieldlistfilter()
def test_allvaluesfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'year__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'year')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?year__isnull=True')
request = self.request_factory.get('/', {'year': '2002'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'year')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?year=2002')
def test_relatedfieldlistfilter_foreignkey(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'author__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.gipsy_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'Verbose Author')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?author__isnull=True')
request = self.request_factory.get('/', {'author__id__exact': self.alfred.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'Verbose Author')
# order of choices depends on User model, which has no order
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?author__id__exact=%d' % self.alfred.pk)
def test_relatedfieldlistfilter_manytomany(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'contributors__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.django_book, self.bio_book, self.djangonaut_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEqual(force_text(filterspec.title), 'Verbose Contributors')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?contributors__isnull=True')
request = self.request_factory.get('/', {'contributors__id__exact': self.bob.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEqual(force_text(filterspec.title), 'Verbose Contributors')
choice = select_by(filterspec.choices(changelist), "display", "bob")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?contributors__id__exact=%d' % self.bob.pk)
def test_relatedfieldlistfilter_reverse_relationships(self):
modeladmin = CustomUserAdmin(User, site)
# FK relationship -----
request = self.request_factory.get('/', {'books_authored__isnull': 'True'})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.lisa])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'book')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_authored__isnull=True')
request = self.request_factory.get('/', {'books_authored__id__exact': self.bio_book.pk})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'book')
choice = select_by(filterspec.choices(changelist), "display", self.bio_book.title)
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_authored__id__exact=%d' % self.bio_book.pk)
# M2M relationship -----
request = self.request_factory.get('/', {'books_contributed__isnull': 'True'})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.alfred])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'book')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_contributed__isnull=True')
request = self.request_factory.get('/', {'books_contributed__id__exact': self.django_book.pk})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'book')
choice = select_by(filterspec.choices(changelist), "display", self.django_book.title)
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_contributed__id__exact=%d' % self.django_book.pk)
def test_booleanfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def test_booleanfieldlistfilter_tuple(self):
modeladmin = BookAdminWithTupleBooleanFilter(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def verify_booleanfieldlistfilter(self, modeladmin):
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
request = self.request_factory.get('/', {'is_best_seller__exact': 0})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_text(filterspec.title), 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "No")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__exact=0')
request = self.request_factory.get('/', {'is_best_seller__exact': 1})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_text(filterspec.title), 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "Yes")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__exact=1')
request = self.request_factory.get('/', {'is_best_seller__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_text(filterspec.title), 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "Unknown")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__isnull=True')
def test_simplelistfilter(self):
modeladmin = DecadeFilterBookAdmin(Book, site)
# Make sure that the first option is 'All' ---------------------------
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), list(Book.objects.all().order_by('-id')))
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
# Look for books in the 1980s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 80s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], 'the 1980\'s')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?publication-decade=the+80s')
# Look for books in the 1990s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?publication-decade=the+90s')
# Look for books in the 2000s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 00s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]['display'], 'the 2000\'s')
self.assertEqual(choices[3]['selected'], True)
self.assertEqual(choices[3]['query_string'], '?publication-decade=the+00s')
# Combine multiple filters -------------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 00s', 'author__id__exact': self.alfred.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.djangonaut_book])
# Make sure the correct choices are selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]['display'], 'the 2000\'s')
self.assertEqual(choices[3]['selected'], True)
self.assertEqual(choices[3]['query_string'], '?publication-decade=the+00s&author__id__exact=%s' % self.alfred.pk)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'Verbose Author')
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?publication-decade=the+00s&author__id__exact=%s' % self.alfred.pk)
def test_listfilter_without_title(self):
"""
Any filter must define a title.
"""
modeladmin = DecadeFilterBookAdminWithoutTitle(Book, site)
request = self.request_factory.get('/', {})
six.assertRaisesRegex(self, ImproperlyConfigured,
"The list filter 'DecadeListFilterWithoutTitle' does not specify a 'title'.",
self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_without_parameter(self):
"""
Any SimpleListFilter must define a parameter_name.
"""
modeladmin = DecadeFilterBookAdminWithoutParameter(Book, site)
request = self.request_factory.get('/', {})
six.assertRaisesRegex(self, ImproperlyConfigured,
"The list filter 'DecadeListFilterWithoutParameter' does not specify a 'parameter_name'.",
self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_with_none_returning_lookups(self):
"""
A SimpleListFilter lookups method can return None but disables the
filter completely.
"""
modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
filterspec = changelist.get_filters(request)[0]
self.assertEqual(len(filterspec), 0)
def test_filter_with_failing_queryset(self):
"""
Ensure that when a filter's queryset method fails, it fails loudly and
the corresponding exception doesn't get swallowed.
Refs #17828.
"""
modeladmin = DecadeFilterBookAdminWithFailingQueryset(Book, site)
request = self.request_factory.get('/', {})
self.assertRaises(ZeroDivisionError, self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_with_queryset_based_lookups(self):
modeladmin = DecadeFilterBookAdminWithQuerysetBasedLookups(Book, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(len(choices), 3)
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'the 1990\'s')
self.assertEqual(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?publication-decade=the+90s')
self.assertEqual(choices[2]['display'], 'the 2000\'s')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?publication-decade=the+00s')
def test_two_characters_long_field(self):
"""
Ensure that list_filter works with two-characters long field names.
Refs #16080.
"""
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'no': '207'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'number')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?no=207')
def test_parameter_ends_with__in__or__isnull(self):
"""
Ensure that a SimpleListFilter's parameter name is not mistaken for a
model field if it ends with '__isnull' or '__in'.
Refs #17091.
"""
# When it ends with '__in' -----------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__In(Book, site)
request = self.request_factory.get('/', {'decade__in': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?decade__in=the+90s')
# When it ends with '__isnull' ---------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__Isnull(Book, site)
request = self.request_factory.get('/', {'decade__isnull': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?decade__isnull=the+90s')
def test_fk_with_to_field(self):
"""
Ensure that a filter on a FK respects the FK's to_field attribute.
Refs #17972.
"""
modeladmin = EmployeeAdmin(Employee, site)
dev = Department.objects.create(code='DEV', description='Development')
design = Department.objects.create(code='DSN', description='Design')
john = Employee.objects.create(name='John Blue', department=dev)
jack = Employee.objects.create(name='Jack Red', department=design)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Employee, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [jack, john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'Development')
self.assertEqual(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], 'Design')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
# Filter by Department=='Development' --------------------------------
request = self.request_factory.get('/', {'department__code__exact': 'DEV'})
changelist = self.get_changelist(request, Employee, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_query_set(request)
self.assertEqual(list(queryset), [john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], False)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'Development')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], 'Design')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
| bsd-3-clause |
vpramo/contrail-controller | src/vnsw/opencontrail-vrouter-netns/opencontrail_vrouter_netns/linux/utils.py | 15 | 4675 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Juliano Martinez, Locaweb.
import fcntl
import os
import shlex
import signal
import socket
import struct
import tempfile
import sys
if sys.version_info[:2] == (2, 6):
import subprocess
else:
from eventlet.green import subprocess
from eventlet import greenthread
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def subprocess_popen(args, stdin=None, stdout=None, stderr=None, shell=False,
env=None):
return subprocess.Popen(args, shell=shell, stdin=stdin, stdout=stdout,
stderr=stderr, preexec_fn=_subprocess_setup,
close_fds=True, env=env)
def create_process(cmd, root_helper=None, addl_env=None):
"""Create a process object for the given command.
The return value will be a tuple of the process object and the
list of command arguments used to create it.
"""
if root_helper:
cmd = shlex.split(root_helper) + cmd
cmd = map(str, cmd)
env = os.environ.copy()
if addl_env:
env.update(addl_env)
obj = subprocess_popen(cmd, shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
return obj, cmd
def execute(cmd, root_helper=None, process_input=None, addl_env=None,
check_exit_code=True, return_stderr=False):
try:
obj, cmd = create_process(cmd, root_helper=root_helper,
addl_env=addl_env)
_stdout, _stderr = (process_input and
obj.communicate(process_input) or
obj.communicate())
obj.stdin.close()
m = ("\nCommand: %(cmd)s\nExit code: %(code)s\nStdout: %(stdout)r\n"
"Stderr: %(stderr)r") % {'cmd': cmd, 'code': obj.returncode,
'stdout': _stdout, 'stderr': _stderr}
if obj.returncode:
if check_exit_code:
raise RuntimeError(m)
finally:
if sys.version_info[:2] == (2, 6):
pass
else:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
return return_stderr and (_stdout, _stderr) or _stdout
def get_interface_mac(interface):
DEVICE_NAME_LEN = 15
MAC_START = 18
MAC_END = 24
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927,
struct.pack('256s', interface[:DEVICE_NAME_LEN]))
return ''.join(['%02x:' % ord(char)
for char in info[MAC_START:MAC_END]])[:-1]
def replace_file(file_name, data):
"""Replaces the contents of file_name with data in a safe manner.
First write to a temp file and then rename. Since POSIX renames are
atomic, the file is unlikely to be corrupted by competing writes.
We create the tempfile on the same device to ensure that it can be renamed.
"""
base_dir = os.path.dirname(os.path.abspath(file_name))
tmp_file = tempfile.NamedTemporaryFile('w+', dir=base_dir, delete=False)
tmp_file.write(data)
tmp_file.close()
os.chmod(tmp_file.name, 0o644)
os.rename(tmp_file.name, file_name)
def find_child_pids(pid):
"""Retrieve a list of the pids of child processes of the given pid."""
try:
raw_pids = execute(['ps', '--ppid', pid, '-o', 'pid='])
except RuntimeError as e:
no_children_found = 'Exit code: 1' in str(e)
if no_children_found:
ctxt.reraise = False
return []
raise
return [x.strip() for x in raw_pids.split('\n') if x.strip()]
| apache-2.0 |
lscheinkman/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/units.py | 70 | 4810 | """
The classes here provide support for using custom classes with
matplotlib, eg those that do not expose the array interface but know
how to converter themselves to arrays. It also supoprts classes with
units and units conversion. Use cases include converters for custom
objects, eg a list of datetime objects, as well as for objects that
are unit aware. We don't assume any particular units implementation,
rather a units implementation must provide a ConversionInterface, and
the register with the Registry converter dictionary. For example,
here is a complete implementation which support plotting with native
datetime objects
import matplotlib.units as units
import matplotlib.dates as dates
import matplotlib.ticker as ticker
import datetime
class DateConverter(units.ConversionInterface):
def convert(value, unit):
'convert value to a scalar or array'
return dates.date2num(value)
convert = staticmethod(convert)
def axisinfo(unit):
'return major and minor tick locators and formatters'
if unit!='date': return None
majloc = dates.AutoDateLocator()
majfmt = dates.AutoDateFormatter(majloc)
return AxisInfo(majloc=majloc,
majfmt=majfmt,
label='date')
axisinfo = staticmethod(axisinfo)
def default_units(x):
'return the default unit for x or None'
return 'date'
default_units = staticmethod(default_units)
# finally we register our object type with a converter
units.registry[datetime.date] = DateConverter()
"""
import numpy as np
from matplotlib.cbook import iterable, is_numlike
class AxisInfo:
'information to support default axis labeling and tick labeling'
def __init__(self, majloc=None, minloc=None,
majfmt=None, minfmt=None, label=None):
"""
majloc and minloc: TickLocators for the major and minor ticks
majfmt and minfmt: TickFormatters for the major and minor ticks
label: the default axis label
If any of the above are None, the axis will simply use the default
"""
self.majloc = majloc
self.minloc = minloc
self.majfmt = majfmt
self.minfmt = minfmt
self.label = label
class ConversionInterface:
"""
The minimal interface for a converter to take custom instances (or
sequences) and convert them to values mpl can use
"""
def axisinfo(unit):
'return an units.AxisInfo instance for unit'
return None
axisinfo = staticmethod(axisinfo)
def default_units(x):
'return the default unit for x or None'
return None
default_units = staticmethod(default_units)
def convert(obj, unit):
"""
convert obj using unit. If obj is a sequence, return the
converted sequence. The ouput must be a sequence of scalars
that can be used by the numpy array layer
"""
return obj
convert = staticmethod(convert)
def is_numlike(x):
"""
The matplotlib datalim, autoscaling, locators etc work with
scalars which are the units converted to floats given the
current unit. The converter may be passed these floats, or
arrays of them, even when units are set. Derived conversion
interfaces may opt to pass plain-ol unitless numbers through
the conversion interface and this is a helper function for
them.
"""
if iterable(x):
for thisx in x:
return is_numlike(thisx)
else:
return is_numlike(x)
is_numlike = staticmethod(is_numlike)
class Registry(dict):
"""
register types with conversion interface
"""
def __init__(self):
dict.__init__(self)
self._cached = {}
def get_converter(self, x):
'get the converter interface instance for x, or None'
if not len(self): return None # nothing registered
#DISABLED idx = id(x)
#DISABLED cached = self._cached.get(idx)
#DISABLED if cached is not None: return cached
converter = None
classx = getattr(x, '__class__', None)
if classx is not None:
converter = self.get(classx)
if converter is None and iterable(x):
# if this is anything but an object array, we'll assume
# there are no custom units
if isinstance(x, np.ndarray) and x.dtype != np.object:
return None
for thisx in x:
converter = self.get_converter( thisx )
return converter
#DISABLED self._cached[idx] = converter
return converter
registry = Registry()
| agpl-3.0 |
gmatteo/pymatgen | pymatgen/analysis/thermochemistry.py | 5 | 3877 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
A module to perform experimental thermochemical data analysis.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Jun 10, 2012"
from pymatgen.core.composition import Composition
STANDARD_TEMP = 298.0
class ThermoData:
"""
A object container for an experimental Thermochemical Data.
"""
def __init__(
self,
data_type,
cpdname,
phaseinfo,
formula,
value,
ref="",
method="",
temp_range=(298, 298),
uncertainty=None,
):
"""
Args:
data_type: The thermochemical data type. Should be one of the
following: fH - Formation enthalpy, S - Entropy,
A, B, C, D, E, F, G, H - variables for use in the various
quations for generating formation enthaplies or Cp at
various temperatures.
cpdname (str): A name for the compound. For example, hematite for
Fe2O3.
phaseinfo (str): Denoting the phase. For example, "solid", "liquid",
"gas" or "tetragonal".
formula (str): A proper string formula, e.g., Fe2O3
value (float): The value of the data.
ref (str): A reference, if any, for the data.
method (str): The method by which the data was determined,
if available.
temp_range ([float, float]): Temperature range of validity for the
data in Kelvin. Defaults to 298 K only.
uncertainty (float):
An uncertainty for the data, if available.
"""
self.type = data_type
self.formula = formula
self.composition = Composition(self.formula)
self.reduced_formula = self.composition.reduced_formula
self.compound_name = cpdname
self.phaseinfo = phaseinfo
self.value = value
self.temp_range = temp_range
self.method = method
self.ref = ref
self.uncertainty = uncertainty
@classmethod
def from_dict(cls, d):
"""
Args:
d (dict): Dict representation
Returns:
ThermoData
"""
return ThermoData(
d["type"],
d["compound_name"],
d["phaseinfo"],
d["formula"],
d["value"],
d["ref"],
d["method"],
d["temp_range"],
d.get("uncertainty", None),
)
def as_dict(self):
"""
Returns: MSONable dict
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"type": self.type,
"formula": self.formula,
"compound_name": self.compound_name,
"phaseinfo": self.phaseinfo,
"value": self.value,
"temp_range": self.temp_range,
"method": self.method,
"ref": self.ref,
"uncertainty": self.uncertainty,
}
def __repr__(self):
props = [
"formula",
"compound_name",
"phaseinfo",
"type",
"temp_range",
"value",
"method",
"ref",
"uncertainty",
]
output = ["{} : {}".format(k, getattr(self, k)) for k in props]
return "\n".join(output)
def __str__(self):
return "{}_{}_{} = {}, Valid T : {}, Ref = {}".format(
self.type,
self.formula,
self.phaseinfo,
self.value,
self.temp_range,
self.ref,
)
| mit |
YongseopKim/crosswalk-test-suite | webapi/tct-selectorslevel2-w3c-tests/inst.xpk.py | 357 | 6759 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex+1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t xpk -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t xpk -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
Akshay0724/scikit-learn | sklearn/model_selection/_split.py | 12 | 63090 | """
The :mod:`sklearn.model_selection._split` module includes classes and
functions to split the data based on a preset strategy.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# Raghav RV <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from collections import Iterable
from math import ceil, floor
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.misc import comb
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.validation import _num_samples, column_or_1d
from ..utils.validation import check_array
from ..utils.multiclass import type_of_target
from ..externals.six import with_metaclass
from ..externals.six.moves import zip
from ..utils.fixes import bincount
from ..utils.fixes import signature
from ..utils.random import choice
from ..base import _pprint
__all__ = ['BaseCrossValidator',
'KFold',
'GroupKFold',
'LeaveOneGroupOut',
'LeaveOneOut',
'LeavePGroupsOut',
'LeavePOut',
'ShuffleSplit',
'GroupShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'train_test_split',
'check_cv']
class BaseCrossValidator(with_metaclass(ABCMeta)):
"""Base class for all cross-validators
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
"""
def __init__(self):
# We need this for the build_repr to work properly in py2.7
# see #6304
pass
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, groups):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self, X=None, y=None, groups=None):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, groups)
"""
for test_index in self._iter_test_indices(X, y, groups):
test_mask = np.zeros(_num_samples(X), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, X=None, y=None, groups=None):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
@abstractmethod
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator"""
def __repr__(self):
return _build_repr(self)
class LeaveOneOut(BaseCrossValidator):
"""Leave-One-Out cross-validator
Provides train/test indices to split data in train/test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_splits=n)`` and
``LeavePOut(p=1)`` where ``n`` is the number of samples.
Due to the high number of test sets (which is the same as the
number of samples) this cross-validation method can be very costly.
For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit`
or :class:`StratifiedKFold`.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneOut
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = LeaveOneOut()
>>> loo.get_n_splits(X)
2
>>> print(loo)
LeaveOneOut()
>>> for train_index, test_index in loo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit, domain-specific
stratification of the dataset.
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def _iter_test_indices(self, X, y=None, groups=None):
return range(_num_samples(X))
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return _num_samples(X)
class LeavePOut(BaseCrossValidator):
"""Leave-P-Out cross-validator
Provides train/test indices to split data in train/test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(p)`` is NOT equivalent to
``KFold(n_splits=n_samples // p)`` which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross-validation method can be very costly. For
large datasets one should favor :class:`KFold`, :class:`StratifiedKFold`
or :class:`ShuffleSplit`.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
p : int
Size of the test sets.
Examples
--------
>>> from sklearn.model_selection import LeavePOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = LeavePOut(2)
>>> lpo.get_n_splits(X)
6
>>> print(lpo)
LeavePOut(p=2)
>>> for train_index, test_index in lpo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, p):
self.p = p
def _iter_test_indices(self, X, y=None, groups=None):
for combination in combinations(range(_num_samples(X)), self.p):
yield np.array(combination)
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return int(comb(_num_samples(X), self.p, exact=True))
class _BaseKFold(with_metaclass(ABCMeta, BaseCrossValidator)):
"""Base class for KFold, GroupKFold, and StratifiedKFold"""
@abstractmethod
def __init__(self, n_splits, shuffle, random_state):
if not isinstance(n_splits, numbers.Integral):
raise ValueError('The number of folds must be of Integral type. '
'%s of type %s was passed.'
% (n_splits, type(n_splits)))
n_splits = int(n_splits)
if n_splits <= 1:
raise ValueError(
"k-fold cross-validation requires at least one"
" train/test split by setting n_splits=2 or more,"
" got n_splits={0}.".format(n_splits))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
if self.n_splits > n_samples:
raise ValueError(
("Cannot have number of splits n_splits={0} greater"
" than the number of samples: {1}.").format(self.n_splits,
n_samples))
for train, test in super(_BaseKFold, self).split(X, y, groups):
yield train, test
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
class KFold(_BaseKFold):
"""K-Folds cross-validator
Provides train/test indices to split data in train/test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1 remaining
folds form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(n_splits=2)
>>> kf.get_n_splits(X)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
KFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in kf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first ``n_samples % n_splits`` folds have size
``n_samples // n_splits + 1``, other folds have size
``n_samples // n_splits``, where ``n_samples`` is the number of samples.
See also
--------
StratifiedKFold
Takes group information into account to avoid building folds with
imbalanced class distributions (for binary or multiclass
classification tasks).
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def __init__(self, n_splits=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n_splits, shuffle, random_state)
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
indices = np.arange(n_samples)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_splits = self.n_splits
fold_sizes = (n_samples // n_splits) * np.ones(n_splits, dtype=np.int)
fold_sizes[:n_samples % n_splits] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class GroupKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping groups.
The same group will not appear in two different folds (the number of
distinct groups has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct groups is approximately the same in each fold.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.model_selection import GroupKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> groups = np.array([0, 0, 2, 2])
>>> group_kfold = GroupKFold(n_splits=2)
>>> group_kfold.get_n_splits(X, y, groups)
2
>>> print(group_kfold)
GroupKFold(n_splits=2)
>>> for train_index, test_index in group_kfold.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit domain-specific
stratification of the dataset.
"""
def __init__(self, n_splits=3):
super(GroupKFold, self).__init__(n_splits, shuffle=False,
random_state=None)
def _iter_test_indices(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, ensure_2d=False, dtype=None)
unique_groups, groups = np.unique(groups, return_inverse=True)
n_groups = len(unique_groups)
if self.n_splits > n_groups:
raise ValueError("Cannot have number of splits n_splits=%d greater"
" than the number of groups: %d."
% (self.n_splits, n_groups))
# Weight groups by their number of occurrences
n_samples_per_group = np.bincount(groups)
# Distribute the most frequent groups first
indices = np.argsort(n_samples_per_group)[::-1]
n_samples_per_group = n_samples_per_group[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(self.n_splits)
# Mapping from group index to fold index
group_to_fold = np.zeros(len(unique_groups))
# Distribute samples by adding the largest weight to the lightest fold
for group_index, weight in enumerate(n_samples_per_group):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
group_to_fold[indices[group_index]] = lightest_fold
indices = group_to_fold[groups]
for f in range(self.n_splits):
yield np.where(indices == f)[0]
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a variation of KFold that returns
stratified folds. The folds are made by preserving the percentage of
samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(n_splits=2)
>>> skf.get_n_splits(X, y)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
StratifiedKFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in skf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size ``trunc(n_samples / n_splits)``, the last one has
the complementary.
"""
def __init__(self, n_splits=3, shuffle=False, random_state=None):
super(StratifiedKFold, self).__init__(n_splits, shuffle, random_state)
def _make_test_folds(self, X, y=None, groups=None):
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
y = np.asarray(y)
n_samples = y.shape[0]
unique_y, y_inversed = np.unique(y, return_inverse=True)
y_counts = bincount(y_inversed)
min_groups = np.min(y_counts)
if np.all(self.n_splits > y_counts):
raise ValueError("All the n_groups for individual classes"
" are less than n_splits=%d."
% (self.n_splits))
if self.n_splits > min_groups:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of groups for any class cannot"
" be less than n_splits=%d."
% (min_groups, self.n_splits)), Warning)
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each class so as to respect the balance of
# classes
# NOTE: Passing the data corresponding to ith class say X[y==class_i]
# will break when the data is not 100% stratifiable for all classes.
# So we pass np.zeroes(max(c, n_splits)) as data to the KFold
per_cls_cvs = [
KFold(self.n_splits, shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_splits)))
for count in y_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[y == cls]
# the test split can be too big because we used
# KFold(...).split(X[:max(c, n_splits)]) when data is not 100%
# stratifiable for all the classes
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[y == cls] = cls_test_folds
return test_folds
def _iter_test_masks(self, X, y=None, groups=None):
test_folds = self._make_test_folds(X, y)
for i in range(self.n_splits):
yield test_folds == i
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
y = check_array(y, ensure_2d=False, dtype=None)
return super(StratifiedKFold, self).split(X, y, groups)
class TimeSeriesSplit(_BaseKFold):
"""Time Series cross-validator
Provides train/test indices to split time series data samples
that are observed at fixed time intervals, in train/test sets.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of splits. Must be at least 1.
Examples
--------
>>> from sklearn.model_selection import TimeSeriesSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> tscv = TimeSeriesSplit(n_splits=3)
>>> print(tscv) # doctest: +NORMALIZE_WHITESPACE
TimeSeriesSplit(n_splits=3)
>>> for train_index, test_index in tscv.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0] TEST: [1]
TRAIN: [0 1] TEST: [2]
TRAIN: [0 1 2] TEST: [3]
Notes
-----
The training set has size ``i * n_samples // (n_splits + 1)
+ n_samples % (n_splits + 1)`` in the ``i``th split,
with a test set of size ``n_samples//(n_splits + 1)``,
where ``n_samples`` is the number of samples.
"""
def __init__(self, n_splits=3):
super(TimeSeriesSplit, self).__init__(n_splits,
shuffle=False,
random_state=None)
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
if n_folds > n_samples:
raise ValueError(
("Cannot have number of folds ={0} greater"
" than the number of samples: {1}.").format(n_folds,
n_samples))
indices = np.arange(n_samples)
test_size = (n_samples // n_folds)
test_starts = range(test_size + n_samples % n_folds,
n_samples, test_size)
for test_start in test_starts:
yield (indices[:test_start],
indices[test_start:test_start + test_size])
class LeaveOneGroupOut(BaseCrossValidator):
"""Leave One Group Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneGroupOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> groups = np.array([1, 1, 2, 2])
>>> logo = LeaveOneGroupOut()
>>> logo.get_n_splits(X, y, groups)
2
>>> print(logo)
LeaveOneGroupOut()
>>> for train_index, test_index in logo.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
# We make a copy of groups to avoid side-effects during iteration
groups = check_array(groups, copy=True, ensure_2d=False, dtype=None)
unique_groups = np.unique(groups)
if len(unique_groups) <= 1:
raise ValueError(
"The groups parameter contains fewer than 2 unique groups "
"(%s). LeaveOneGroupOut expects at least 2." % unique_groups)
for i in unique_groups:
yield groups == i
def get_n_splits(self, X, y, groups):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The groups parameter should not be None")
return len(np.unique(groups))
class LeavePGroupsOut(BaseCrossValidator):
"""Leave P Group(s) Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and LeaveOneGroupOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the groups while the latter uses samples
all assigned the same groups.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_groups : int
Number of groups (``p``) to leave out in the test split.
Examples
--------
>>> from sklearn.model_selection import LeavePGroupsOut
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> groups = np.array([1, 2, 3])
>>> lpgo = LeavePGroupsOut(n_groups=2)
>>> lpgo.get_n_splits(X, y, groups)
3
>>> print(lpgo)
LeavePGroupsOut(n_groups=2)
>>> for train_index, test_index in lpgo.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def __init__(self, n_groups):
self.n_groups = n_groups
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, copy=True, ensure_2d=False, dtype=None)
unique_groups = np.unique(groups)
if self.n_groups >= len(unique_groups):
raise ValueError(
"The groups parameter contains fewer than (or equal to) "
"n_groups (%d) numbers of unique groups (%s). LeavePGroupsOut "
"expects that at least n_groups + 1 (%d) unique groups be "
"present" % (self.n_groups, unique_groups, self.n_groups + 1))
combi = combinations(range(len(unique_groups)), self.n_groups)
for indices in combi:
test_index = np.zeros(_num_samples(X), dtype=np.bool)
for l in unique_groups[np.array(indices)]:
test_index[groups == l] = True
yield test_index
def get_n_splits(self, X, y, groups):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
y : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, ensure_2d=False, dtype=None)
X, y, groups = indexable(X, y, groups)
return int(comb(len(np.unique(groups)), self.n_groups, exact=True))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n_splits=10, test_size=0.1, train_size=None,
random_state=None):
_validate_shuffle_split_init(test_size, train_size)
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
for train, test in self._iter_indices(X, y, groups):
yield train, test
@abstractmethod
def _iter_indices(self, X, y=None, groups=None):
"""Generate (train, test) indices"""
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
def __repr__(self):
return _build_repr(self)
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validator
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float, int, or None, default 0.1
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import ShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> rs = ShuffleSplit(n_splits=3, test_size=.25, random_state=0)
>>> rs.get_n_splits(X)
3
>>> print(rs)
ShuffleSplit(n_splits=3, random_state=0, test_size=0.25, train_size=None)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = ShuffleSplit(n_splits=3, train_size=0.5, test_size=.25,
... random_state=0)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
rng = check_random_state(self.random_state)
for i in range(self.n_splits):
# random partition
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:(n_test + n_train)]
yield ind_train, ind_test
class GroupShuffleSplit(ShuffleSplit):
'''Shuffle-Group(s)-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided group. This group information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and GroupShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique groups,
whereas GroupShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique groups.
For example, a less computationally intensive alternative to
``LeavePGroupsOut(p=10)`` would be
``GroupShuffleSplit(test_size=10, n_splits=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to groups, and
not to samples, as in ShuffleSplit.
Parameters
----------
n_splits : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the test split. If
int, represents the absolute number of test groups. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the train split. If
int, represents the absolute number of train groups. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
'''
def __init__(self, n_splits=5, test_size=0.2, train_size=None,
random_state=None):
super(GroupShuffleSplit, self).__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state)
def _iter_indices(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, ensure_2d=False, dtype=None)
classes, group_indices = np.unique(groups, return_inverse=True)
for group_train, group_test in super(
GroupShuffleSplit, self)._iter_indices(X=classes):
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(group_indices, group_train))
test = np.flatnonzero(np.in1d(group_indices, group_test))
yield train, test
def _approximate_mode(class_counts, n_draws, rng):
"""Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely outcome of drawing n_draws many
samples from the population given by class_counts.
Parameters
----------
class_counts : ndarray of int
Population per class.
n_draws : int
Number of draws (samples to draw) from the overall population.
rng : random state
Used to break ties.
Returns
-------
sampled_classes : ndarray of int
Number of samples drawn from each class.
np.sum(sampled_classes) == n_draws
Examples
--------
>>> from sklearn.model_selection._split import _approximate_mode
>>> _approximate_mode(class_counts=np.array([4, 2]), n_draws=3, rng=0)
array([2, 1])
>>> _approximate_mode(class_counts=np.array([5, 2]), n_draws=4, rng=0)
array([3, 1])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=0)
array([0, 1, 1, 0])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=42)
array([1, 1, 0, 0])
"""
# this computes a bad approximation to the mode of the
# multivariate hypergeometric given by class_counts and n_draws
continuous = n_draws * class_counts / class_counts.sum()
# floored means we don't overshoot n_samples, but probably undershoot
floored = np.floor(continuous)
# we add samples according to how much "left over" probability
# they had, until we arrive at n_samples
need_to_add = int(n_draws - floored.sum())
if need_to_add > 0:
remainder = continuous - floored
values = np.sort(np.unique(remainder))[::-1]
# add according to remainder, but break ties
# randomly to avoid biases
for value in values:
inds, = np.where(remainder == value)
# if we need_to_add less than what's in inds
# we draw randomly from them.
# if we need to add more, we add them all and
# go to the next value
add_now = min(len(inds), need_to_add)
inds = choice(inds, size=add_now, replace=False, random_state=rng)
floored[inds] += 1
need_to_add -= add_now
if need_to_add == 0:
break
return floored.astype(np.int)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(n_splits=3, test_size=0.5, random_state=0)
>>> sss.get_n_splits(X, y)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(n_splits=3, random_state=0, ...)
>>> for train_index, test_index in sss.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, n_splits=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
n_splits, test_size, train_size, random_state)
def _iter_indices(self, X, y, groups=None):
n_samples = _num_samples(X)
y = check_array(y, ensure_2d=False, dtype=None)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of groups for any class cannot"
" be less than 2.")
if n_train < n_classes:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_train, n_classes))
if n_test < n_classes:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_test, n_classes))
rng = check_random_state(self.random_state)
for _ in range(self.n_splits):
# if there are ties in the class-counts, we want
# to make sure to break them anew in each iteration
n_i = _approximate_mode(class_counts, n_train, rng)
class_counts_remaining = class_counts - n_i
t_i = _approximate_mode(class_counts_remaining, n_test, rng)
train = []
test = []
for i, class_i in enumerate(classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = np.where((y == class_i))[0][permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
y = check_array(y, ensure_2d=False, dtype=None)
return super(StratifiedShuffleSplit, self).split(X, y, groups)
def _validate_shuffle_split_init(test_size, train_size):
"""Validation helper to check the test_size and train_size at init
NOTE This does not take into account the number of samples which is known
only at split
"""
if test_size is None and train_size is None:
raise ValueError('test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif (np.asarray(test_size).dtype.kind == 'f' and
(train_size + test_size) > 1.):
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for train_size: %r" % train_size)
def _validate_shuffle_split(n_samples, test_size, train_size):
"""
Validation helper to check if the test/test sizes are meaningful wrt to the
size of the data (n_samples)
"""
if (test_size is not None and np.asarray(test_size).dtype.kind == 'i' and
test_size >= n_samples):
raise ValueError('test_size=%d should be smaller than the number of '
'samples %d' % (test_size, n_samples))
if (train_size is not None and np.asarray(train_size).dtype.kind == 'i' and
train_size >= n_samples):
raise ValueError("train_size=%d should be smaller than the number of"
" samples %d" % (train_size, n_samples))
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n_samples)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n_samples - n_test
elif np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n_samples)
else:
n_train = float(train_size)
if test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n_samples))
return int(n_train), int(n_test)
class PredefinedSplit(BaseCrossValidator):
"""Predefined split cross-validator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> test_fold = [0, 1, -1, 1]
>>> ps = PredefinedSplit(test_fold)
>>> ps.get_n_splits()
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
PredefinedSplit(test_fold=array([ 0, 1, -1, 1]))
>>> for train_index, test_index in ps.split():
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
ind = np.arange(len(self.test_fold))
for test_index in self._iter_test_masks():
train_index = ind[np.logical_not(test_index)]
test_index = ind[test_index]
yield train_index, test_index
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets."""
for f in self.unique_folds:
test_index = np.where(self.test_fold == f)[0]
test_mask = np.zeros(len(self.test_fold), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.unique_folds)
class _CVIterableWrapper(BaseCrossValidator):
"""Wrapper class for old style cv objects and iterables."""
def __init__(self, cv):
self.cv = list(cv)
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.cv)
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
for train, test in self.cv:
yield train, test
def check_cv(cv=3, y=None, classifier=False):
"""Input checker utility for building a cross-validator
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is either
binary or multiclass, :class:`StratifiedKFold` is used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
y : array-like, optional
The target variable for supervised learning problems.
classifier : boolean, optional, default False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
"""
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if (classifier and (y is not None) and
(type_of_target(y) in ('binary', 'multiclass'))):
return StratifiedKFold(cv)
else:
return KFold(cv)
if not hasattr(cv, 'split') or isinstance(cv, str):
if not isinstance(cv, Iterable) or isinstance(cv, str):
raise ValueError("Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or an iterable. Got %s." % cv)
return _CVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(ShuffleSplit().split(X, y))`` and application to input data
into a single call for splitting (and optionally subsampling) data in a
oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the class labels.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 0.16
If the input is sparse, the output will be a
``scipy.sparse.csr_matrix``. Else, output type is the same as the
input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
CVClass = StratifiedShuffleSplit
else:
CVClass = ShuffleSplit
cv = CVClass(test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(cv.split(X=arrays[0], y=stratify))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
def _build_repr(self):
# XXX This is copied from BaseEstimator's get_params
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
# Ignore varargs, kw and default values and pop self
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
if init is object.__init__:
args = []
else:
args = sorted([p.name for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD])
class_name = self.__class__.__name__
params = dict()
for key in args:
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
params[key] = value
return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))
| bsd-3-clause |
kmiller96/Shipping-Containers-Software | lib/core.py | 1 | 8600 | # AUTHOR: Kale Miller
# DESCRIPTION: The 'main brain' of the program is held in here.
# 50726f6772616d6d696e6720697320627265616b696e67206f66206f6e652062696720696d706f737369626c65207461736b20696e746f20736576
# 6572616c207665727920736d616c6c20706f737369626c65207461736b732e
# DEVELOPMENT LOG:
# 07/12/16: Initialized file. Moved IDGenerator class into the script. Added holding bay class.
# 12/12/16: Tweaked the IDGenerator class to help remove dependancy.
# 13/12/16: Fleshed out the NewHoldingBay class.
# 15/12/16: Added methods to add auxilary labels. Added method to generate information label. Small bug fixes.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~IMPORTS/GLOBALS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import os, time
import numpy as np
from lib import containers
CONTAINER_CLASSES = [
containers.BasicContainer,
containers.HeavyContainer,
containers.RefrigeratedContainer,
containers.LiquidContainer,
containers.ExplosivesContainer,
containers.ToxicContainer,
containers.ChemicalContainer
]
CONTAINER_TYPES = ['basic', 'heavy', 'refrigerated', 'liquid', 'explosive', 'toxic', 'chemical']
SERIAL_CODES = ['B', 'H', 'R', 'L', 'E', 'T', 'C']
TAG_APPLICATION_TIME = 0.2
PRINTALL_TIME = 1
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~.:.~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~MAIN~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def processshipfile(filename, path):
"""Processes the csv file that the ship supplies."""
def _deletenewline(string):
"""Deletes the \n symbol from a string if it exists."""
try:
truncatedstring = string[:string.index('\n')]
except ValueError:
truncatedstring = string
finally:
return truncatedstring
try:
home = os.getcwd()
os.chdir(path)
except WindowsError: # Would this hold true on all machines?
raise NameError, "The path specified does not exist."
rawfile = open(filename, 'r')
arylines = rawfile.readlines()
basematrix = map(lambda x: _deletenewline(x).split(','), arylines)
numpyarray = np.array(basematrix)
return numpyarray
class IDGenerator:
"""Controls the assignment of id tags on the containers."""
# TODO: Change the __init__ such that it works by reading a collection of tuples instead of two lists.
def __init__(self):
"""Initialise the id generator."""
self._COUNTERS = [0] * len(CONTAINER_TYPES)
return
def _findindex(self, container):
"""Determines the index in the lists the class should use."""
return CONTAINER_TYPES.index(container)
def _serialcode(self, index):
"""Fetches the serial code for a supplied index."""
return SERIAL_CODES[index]
def _counter(self, index):
"""Fetches the counter for a specific serial type and increments it by one."""
self._COUNTERS[index] += 1
return self._COUNTERS[index]
def newid(self, containertype):
"""Generates a new id."""
ii = self._findindex(containertype)
idtag = self._serialcode(ii) + str(self._counter(ii)).zfill(5)
return idtag
class NewHoldingBay:
"""Creates a new holding bay for the containers. Thus it contains all of the information about the containers
along with the methods controlling unloading and loading them."""
def __init__(self):
self._path = os.getcwd()
self.idgenerator = IDGenerator()
self.containerlist = list()
self._iOnship = 0
self._iLoaded = 0
self._iHolding = 0
return None
def _createcontainer(self, containerstr, parameters):
"""Creates a new container class based off the first column of the CSV."""
# TODO: Fix this method up to catch more and print useful error messages.
if not isinstance(containerstr, str):
raise TypeError, "The parameter passed must be a string."
elif len(containerstr) == 1:
try:
ii = SERIAL_CODES.index(containerstr)
except ValueError:
raise Exception("Bad input.") # TODO: Fix this area up.
elif len(containerstr) != 1:
try:
ii = CONTAINER_TYPES.index(containerstr)
except ValueError:
raise Exception("Bad input.")
idtag = self.idgenerator.newid(CONTAINER_TYPES[ii])
return CONTAINER_CLASSES[ii](idtag, *parameters)
def defineship(self, file):
"""Pass in the CSV file of the ship in order to unload it."""
shipdata = processshipfile(file, self._path)
shipdata = shipdata[1::] # Throw out the headers.
for line in shipdata:
newcontainer = self._createcontainer(line[0], (line[1], line[3]))
self.containerlist.append(newcontainer)
self._iOnship += 1
def printcontainer(self, serial):
"""Prints the information about a specific container."""
for container in self.containerlist:
if container.id() == serial:
container.information()
return None
else:
continue
raise NameError, "Unable to find container with serial code %s" % serial
return -1
def printallinformation(self):
"""Prints the information of all the containers."""
for container in self.containerlist:
container.information()
time.sleep(PRINTALL_TIME)
return None
def unloadall(self, debug=False):
"""Unloads all of the containers from the ship."""
for container in self.containerlist:
container.unload(debug=debug)
self._iHolding += 1
self._iOnship -= 1
return None
def loadall(self, debug=False):
"""Loads all of the containers into trucks and trains."""
# TODO: Proper loading locations.
ii = 1
for container in self.containerlist:
container.load('Truck ' + str(ii).zfill(3), debug=debug)
self._iHolding -= 1
self._iLoaded += 1
ii += 1
return None
def printauditedload(self):
"""Prints information about the holding bay at this time."""
iOnship = 0; iLoaded = 0; iHolding = 0
iContainercount = [0] * len(CONTAINER_TYPES)
for container in self.containerlist:
try:
ii = CONTAINER_TYPES.index(container._type)
iContainercount[ii] += 1
except ValueError:
raise NameError, "One (or more) containers don't have a valid type."
# Print the appropriate information.
print "----------------------------------------------------------------------"
print "TOTAL CONTAINERS: %i" % len(self.containerlist); time.sleep(0.3)
print "CONTAINERS CURRENTLY STILL ON SHIP: %i" % self._iOnship; time.sleep(0.3)
print "CONTAINERS LOADED ON TRUCKS AND TRAINS: %i" % self._iLoaded; time.sleep(0.3)
print "CONTAINERS BEING HELD IN THE HOLDING BAY: %i" % self._iHolding; time.sleep(0.3)
print ""
print "THE NUMBER OF CONTAINERS FOR EACH TYPE:"; time.sleep(0.3)
for ii in xrange(len(CONTAINER_TYPES)):
if iContainercount[ii] == 0: continue
print "\t%s: %i" % (CONTAINER_TYPES[ii], iContainercount[ii]); time.sleep(0.3)
print "----------------------------------------------------------------------"
return None
def addidtags(self, debug=False):
"""Applys appropriate serial numbers to all of the containers."""
for container in self.containerlist:
print "Applying id tag to container %s" % container.id()
if not debug: time.sleep(TAG_APPLICATION_TIME)
container.addidtag()
return None
def applyauxilarylabels(self):
"""Applys the labels that should go on containers about their contents and handling."""
for container in self.containerlist:
print "Adding labels to container %s" % container.id()
container.addauxilarylabels()
return None
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~.:.~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
| mit |
ashhher3/invenio | modules/bibrank/lib/bibrank_downloads_similarity.py | 19 | 4328 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = \
"$Id$"
from invenio.config import \
CFG_ACCESS_CONTROL_LEVEL_SITE, \
CFG_CERN_SITE
from invenio.dbquery import run_sql
from invenio.bibrank_downloads_indexer import database_tuples_to_single_list
from invenio.search_engine_utils import get_fieldvalues
def record_exists(recID):
"""Return 1 if record RECID exists.
Return 0 if it doesn't exist.
Return -1 if it exists but is marked as deleted.
Copy from search_engine"""
out = 0
query = "SELECT id FROM bibrec WHERE id='%s'" % recID
res = run_sql(query, None, 1)
if res:
# record exists; now check whether it isn't marked as deleted:
dbcollids = get_fieldvalues(recID, "980__%")
if ("DELETED" in dbcollids) or (CFG_CERN_SITE and "DUMMY" in dbcollids):
out = -1 # exists, but marked as deleted
else:
out = 1 # exists fine
return out
### INTERFACE
def register_page_view_event(recid, uid, client_ip_address):
"""Register Detailed record page view event for record RECID
consulted by user UID from machine CLIENT_HOST_IP.
To be called by the search engine.
"""
if CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
# do not register access if we are in read-only access control
# site mode:
return []
return run_sql("INSERT DELAYED INTO rnkPAGEVIEWS " \
" (id_bibrec,id_user,client_host,view_time) " \
" VALUES (%s,%s,INET_ATON(%s),NOW())", \
(recid, uid, client_ip_address))
def calculate_reading_similarity_list(recid, type="pageviews"):
"""Calculate reading similarity data to use in reading similarity
boxes (``people who downloaded/viewed this file/page have also
downloaded/viewed''). Return list of (recid1, score1),
(recid2,score2), ... for all recidN that were consulted by the
same people who have also consulted RECID. The reading
similarity TYPE can be either `pageviews' or `downloads',
depending whether we want to obtain page view similarity or
download similarity.
"""
if CFG_CERN_SITE:
return [] # CERN hack 2009-11-23 to ease the load
if type == "downloads":
tablename = "rnkDOWNLOADS"
else: # default
tablename = "rnkPAGEVIEWS"
# firstly compute the set of client hosts who consulted recid:
client_host_list = run_sql("SELECT DISTINCT(client_host)" + \
" FROM " + tablename + \
" WHERE id_bibrec=%s " + \
" AND client_host IS NOT NULL",
(recid,))
# secondly look up all recids that were consulted by these client hosts,
# and order them by the number of different client hosts reading them:
res = []
if client_host_list != ():
client_host_list = str(database_tuples_to_single_list(client_host_list))
client_host_list = client_host_list.replace("L", "")
client_host_list = client_host_list.replace("[", "")
client_host_list = client_host_list.replace("]", "")
res = run_sql("SELECT id_bibrec,COUNT(DISTINCT(client_host)) AS c" \
" FROM " + tablename + \
" WHERE client_host IN (" + client_host_list + ")" + \
" AND id_bibrec != %s" \
" GROUP BY id_bibrec ORDER BY c DESC LIMIT 10",
(recid,))
return res
| gpl-2.0 |
zkota/pyblio-1.2 | pybrc.py | 2 | 1564 | # Site configuration
from Pyblio import Autoload, Config, version
from Pyblio.TextUI import *
# ==================================================
import string, os
# define autoloaded formats
Autoload.preregister ('format', 'BibTeX', 'Pyblio.Format.BibTeX', '.*\.bib')
Autoload.preregister ('format', 'Ovid', 'Pyblio.Format.Ovid', '.*\.ovid')
Autoload.preregister ('format', 'Medline', 'Pyblio.Format.Medline', '.*\.med')
Autoload.preregister ('format', 'Refer', 'Pyblio.Format.Refer', '.*\.refer')
Autoload.preregister ('format', 'ISIFile', 'Pyblio.Format.isifile', '.*\.isi')
# define styles and outputs
Autoload.preregister ('style', 'Generic', 'Pyblio.Style.Generic')
Autoload.preregister ('style', 'apa4e', 'Pyblio.Style.apa4e')
Autoload.preregister ('style', 'abbrv', 'Pyblio.Style.abbrv')
Autoload.preregister ('output', 'Text', 'Pyblio.Output.text')
Autoload.preregister ('output', 'Raw', 'Pyblio.Output.raw')
Autoload.preregister ('output', 'HTML', 'Pyblio.Output.html')
Autoload.preregister ('output', 'LaTeX', 'Pyblio.Output.LaTeX')
Autoload.preregister ('output', 'Textnum', 'Pyblio.Output.textnum')
Autoload.preregister ('output', 'Textau', 'Pyblio.Output.textau')
# define key formats
Autoload.preregister ('key', 'Default', 'Pyblio.Utils')
# Parse the configuration directory
rootconfig = os.path.join ('Pyblio', 'ConfDir')
if not os.path.isdir (rootconfig):
rootconfig = os.path.join (version.pybdir, 'Pyblio', 'ConfDir')
if os.path.isdir (rootconfig):
Config.parse_directory (rootconfig)
| gpl-2.0 |
bryceguo/robotframework-selenium2library | demo/package.py | 4 | 1378 | #!/usr/bin/env python
import os, sys
from time import localtime
from zipfile import ZipFile, ZIP_DEFLATED
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(THIS_DIR, "..", "src", "Selenium2Library"))
import metadata
FILES = {
'': ['rundemo.py'],
'login_tests': ['valid_login.txt', 'invalid_login.txt', 'resource.txt'],
'demoapp': ['server.py'],
'demoapp/html': ['index.html', 'welcome.html', 'error.html', 'demo.css']
}
def main():
cwd = os.getcwd()
try:
os.chdir(THIS_DIR)
name = 'robotframework-selenium2library-%s-demo' % metadata.VERSION
zipname = '%s.zip' % name
if os.path.exists(zipname):
os.remove(zipname)
zipfile = ZipFile(zipname, 'w', ZIP_DEFLATED)
for dirname in FILES:
for filename in FILES[dirname]:
path = os.path.join('.', dirname.replace('/', os.sep), filename)
print 'Adding: ', os.path.normpath(path)
zipfile.write(path, os.path.join(name, path))
zipfile.close()
target_path = os.path.join('..', 'dist', zipname)
if os.path.exists(target_path):
os.remove(target_path)
os.rename(zipname, target_path)
print 'Created: ', os.path.abspath(target_path)
finally:
os.chdir(cwd)
if __name__ == '__main__':
main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.