repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
kar1m/firefox-ios
|
scripts/clean-xliff.py
|
41
|
2284
|
#! /usr/bin/env python
#
# clean-xliff.py <l10n_folder>
#
# Remove targets from a locale, remove target-language attribute
#
from glob import glob
from lxml import etree
import argparse
import os
NS = {'x':'urn:oasis:names:tc:xliff:document:1.2'}
def indent(elem, level=0):
# Prettify XML output
# http://effbot.org/zone/element-lib.htm#prettyprint
i = '\n' + level*' '
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + ' '
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def main():
xliff_filename = 'firefox-ios.xliff'
parser = argparse.ArgumentParser()
parser.add_argument('l10n_folder', help='Path to locale folder to clean up')
args = parser.parse_args()
file_path = os.path.join(
os.path.realpath(args.l10n_folder),
xliff_filename
)
print 'Updating %s' % file_path
# Read localized file XML
locale_tree = etree.parse(file_path)
locale_root = locale_tree.getroot()
# Remove existing localizations and target-language
for trans_node in locale_root.xpath('//x:trans-unit', namespaces=NS):
for child in trans_node.xpath('./x:target', namespaces=NS):
child.getparent().remove(child)
# Remove target-language where defined
for file_node in locale_root.xpath('//x:file', namespaces=NS):
if file_node.get('target-language'):
file_node.attrib.pop('target-language')
# Replace the existing locale file with the new XML content
with open(file_path, 'w') as fp:
# Fix indentations
indent(locale_root)
xliff_content = etree.tostring(
locale_tree,
encoding='UTF-8',
xml_declaration=True,
pretty_print=True
)
fp.write(xliff_content)
if __name__ == '__main__':
main()
|
mpl-2.0
|
fossoult/odoo
|
addons/stock/tests/test_owner_available.py
|
233
|
3002
|
# Author: Leonardo Pistone
# Copyright 2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openerp.addons.stock.tests.common import TestStockCommon
class TestVirtualAvailable(TestStockCommon):
def setUp(self):
super(TestVirtualAvailable, self).setUp()
self.env['stock.quant'].create({
'product_id': self.productA.id,
'location_id': self.stock_location,
'qty': 30.0,
})
self.env['stock.quant'].create({
'product_id': self.productA.id,
'location_id': self.stock_location,
'qty': 10.0,
'owner_id': self.ref('base.res_partner_4'),
})
self.picking_out = self.env['stock.picking'].create({
'picking_type_id': self.ref('stock.picking_type_out')})
self.env['stock.move'].create({
'name': 'a move',
'product_id': self.productA.id,
'product_uom_qty': 3.0,
'product_uom': self.productA.uom_id.id,
'picking_id': self.picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
self.picking_out_2 = self.env['stock.picking'].create({
'picking_type_id': self.ref('stock.picking_type_out')})
self.env['stock.move'].create({
'restrict_partner_id': self.ref('base.res_partner_4'),
'name': 'another move',
'product_id': self.productA.id,
'product_uom_qty': 5.0,
'product_uom': self.productA.uom_id.id,
'picking_id': self.picking_out_2.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
def test_without_owner(self):
self.assertAlmostEqual(40.0, self.productA.virtual_available)
self.picking_out.action_assign()
self.picking_out_2.action_assign()
self.assertAlmostEqual(32.0, self.productA.virtual_available)
def test_with_owner(self):
prod_context = self.productA.with_context(
owner_id=self.ref('base.res_partner_4')
)
self.assertAlmostEqual(10.0, prod_context.virtual_available)
self.picking_out.action_assign()
self.picking_out_2.action_assign()
self.assertAlmostEqual(5.0, prod_context.virtual_available)
|
agpl-3.0
|
comandrei/django-haystack
|
haystack/signals.py
|
27
|
3163
|
# encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from django.db import models
from haystack.exceptions import NotHandled
class BaseSignalProcessor(object):
"""
A convenient way to attach Haystack to Django's signals & cause things to
index.
By default, does nothing with signals but provides underlying functionality.
"""
def __init__(self, connections, connection_router):
self.connections = connections
self.connection_router = connection_router
self.setup()
def setup(self):
"""
A hook for setting up anything necessary for
``handle_save/handle_delete`` to be executed.
Default behavior is to do nothing (``pass``).
"""
# Do nothing.
pass
def teardown(self):
"""
A hook for tearing down anything necessary for
``handle_save/handle_delete`` to no longer be executed.
Default behavior is to do nothing (``pass``).
"""
# Do nothing.
pass
def handle_save(self, sender, instance, **kwargs):
"""
Given an individual model instance, determine which backends the
update should be sent to & update the object on those backends.
"""
using_backends = self.connection_router.for_write(instance=instance)
for using in using_backends:
try:
index = self.connections[using].get_unified_index().get_index(sender)
index.update_object(instance, using=using)
except NotHandled:
# TODO: Maybe log it or let the exception bubble?
pass
def handle_delete(self, sender, instance, **kwargs):
"""
Given an individual model instance, determine which backends the
delete should be sent to & delete the object on those backends.
"""
using_backends = self.connection_router.for_write(instance=instance)
for using in using_backends:
try:
index = self.connections[using].get_unified_index().get_index(sender)
index.remove_object(instance, using=using)
except NotHandled:
# TODO: Maybe log it or let the exception bubble?
pass
class RealtimeSignalProcessor(BaseSignalProcessor):
"""
Allows for observing when saves/deletes fire & automatically updates the
search engine appropriately.
"""
def setup(self):
# Naive (listen to all model saves).
models.signals.post_save.connect(self.handle_save)
models.signals.post_delete.connect(self.handle_delete)
# Efficient would be going through all backends & collecting all models
# being used, then hooking up signals only for those.
def teardown(self):
# Naive (listen to all model saves).
models.signals.post_save.disconnect(self.handle_save)
models.signals.post_delete.disconnect(self.handle_delete)
# Efficient would be going through all backends & collecting all models
# being used, then disconnecting signals only for those.
|
bsd-3-clause
|
florian-dacosta/OCB
|
addons/crm_partner_assign/report/crm_lead_report.py
|
309
|
5104
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp import tools
from openerp.addons.crm import crm
class crm_lead_report_assign(osv.osv):
""" CRM Lead Report """
_name = "crm.lead.report.assign"
_auto = False
_description = "CRM Lead Report"
_columns = {
'partner_assigned_id':fields.many2one('res.partner', 'Partner', readonly=True),
'grade_id':fields.many2one('res.partner.grade', 'Grade', readonly=True),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'country_id':fields.many2one('res.country', 'Country', readonly=True),
'section_id':fields.many2one('crm.case.section', 'Sales Team', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'date_assign': fields.date('Assign Date', readonly=True),
'create_date': fields.datetime('Create Date', readonly=True),
'delay_open': fields.float('Delay to Assign',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to open the case"),
'delay_close': fields.float('Delay to Close',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to close the case"),
'delay_expected': fields.float('Overpassed Deadline',digits=(16,2),readonly=True, group_operator="avg"),
'probability': fields.float('Avg Probability',digits=(16,2),readonly=True, group_operator="avg"),
'probability_max': fields.float('Max Probability',digits=(16,2),readonly=True, group_operator="max"),
'planned_revenue': fields.float('Planned Revenue',digits=(16,2),readonly=True),
'probable_revenue': fields.float('Probable Revenue', digits=(16,2),readonly=True),
'stage_id': fields.many2one ('crm.case.stage', 'Stage', domain="[('section_ids', '=', section_id)]"),
'partner_id': fields.many2one('res.partner', 'Customer' , readonly=True),
'opening_date': fields.datetime('Opening Date', readonly=True),
'date_closed': fields.datetime('Close Date', readonly=True),
'nbr': fields.integer('# of Cases', readonly=True), # TDE FIXME master: rename into nbr_cases
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'priority': fields.selection(crm.AVAILABLE_PRIORITIES, 'Priority'),
'type':fields.selection([
('lead','Lead'),
('opportunity','Opportunity')
],'Type', help="Type is used to separate Leads and Opportunities"),
}
def init(self, cr):
"""
CRM Lead Report
@param cr: the current row, from the database cursor
"""
tools.drop_view_if_exists(cr, 'crm_lead_report_assign')
cr.execute("""
CREATE OR REPLACE VIEW crm_lead_report_assign AS (
SELECT
c.id,
c.date_open as opening_date,
c.date_closed as date_closed,
c.date_assign,
c.user_id,
c.probability,
c.probability as probability_max,
c.stage_id,
c.type,
c.company_id,
c.priority,
c.section_id,
c.partner_id,
c.country_id,
c.planned_revenue,
c.partner_assigned_id,
p.grade_id,
p.date as partner_date,
c.planned_revenue*(c.probability/100) as probable_revenue,
1 as nbr,
c.create_date as create_date,
extract('epoch' from (c.write_date-c.create_date))/(3600*24) as delay_close,
extract('epoch' from (c.date_deadline - c.date_closed))/(3600*24) as delay_expected,
extract('epoch' from (c.date_open-c.create_date))/(3600*24) as delay_open
FROM
crm_lead c
left join res_partner p on (c.partner_assigned_id=p.id)
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
ryansb/boto
|
boto/dynamodb2/layer1.py
|
134
|
152740
|
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from binascii import crc32
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.dynamodb2 import exceptions
class DynamoDBConnection(AWSQueryConnection):
"""
Amazon DynamoDB
**Overview**
This is the Amazon DynamoDB API Reference. This guide provides
descriptions and samples of the low-level DynamoDB API. For
information about DynamoDB application development, go to the
`Amazon DynamoDB Developer Guide`_.
Instead of making the requests to the low-level DynamoDB API
directly from your application, we recommend that you use the AWS
Software Development Kits (SDKs). The easy-to-use libraries in the
AWS SDKs make it unnecessary to call the low-level DynamoDB API
directly from your application. The libraries take care of request
authentication, serialization, and connection management. For more
information, go to `Using the AWS SDKs with DynamoDB`_ in the
Amazon DynamoDB Developer Guide .
If you decide to code against the low-level DynamoDB API directly,
you will need to write the necessary code to authenticate your
requests. For more information on signing your requests, go to
`Using the DynamoDB API`_ in the Amazon DynamoDB Developer Guide .
The following are short descriptions of each low-level API action,
organized by function.
**Managing Tables**
+ CreateTable - Creates a table with user-specified provisioned
throughput settings. You must designate one attribute as the hash
primary key for the table; you can optionally designate a second
attribute as the range primary key. DynamoDB creates indexes on
these key attributes for fast data access. Optionally, you can
create one or more secondary indexes, which provide fast data
access using non-key attributes.
+ DescribeTable - Returns metadata for a table, such as table
size, status, and index information.
+ UpdateTable - Modifies the provisioned throughput settings for a
table. Optionally, you can modify the provisioned throughput
settings for global secondary indexes on the table.
+ ListTables - Returns a list of all tables associated with the
current AWS account and endpoint.
+ DeleteTable - Deletes a table and all of its indexes.
For conceptual information about managing tables, go to `Working
with Tables`_ in the Amazon DynamoDB Developer Guide .
**Reading Data**
+ GetItem - Returns a set of attributes for the item that has a
given primary key. By default, GetItem performs an eventually
consistent read; however, applications can specify a strongly
consistent read instead.
+ BatchGetItem - Performs multiple GetItem requests for data items
using their primary keys, from one table or multiple tables. The
response from BatchGetItem has a size limit of 16 MB and returns a
maximum of 100 items. Both eventually consistent and strongly
consistent reads can be used.
+ Query - Returns one or more items from a table or a secondary
index. You must provide a specific hash key value. You can narrow
the scope of the query using comparison operators against a range
key value, or on the index key. Query supports either eventual or
strong consistency. A single response has a size limit of 1 MB.
+ Scan - Reads every item in a table; the result set is eventually
consistent. You can limit the number of items returned by
filtering the data attributes, using conditional expressions. Scan
can be used to enable ad-hoc querying of a table against non-key
attributes; however, since this is a full table scan without using
an index, Scan should not be used for any application query use
case that requires predictable performance.
For conceptual information about reading data, go to `Working with
Items`_ and `Query and Scan Operations`_ in the Amazon DynamoDB
Developer Guide .
**Modifying Data**
+ PutItem - Creates a new item, or replaces an existing item with
a new item (including all the attributes). By default, if an item
in the table already exists with the same primary key, the new
item completely replaces the existing item. You can use
conditional operators to replace an item only if its attribute
values match certain conditions, or to insert a new item only if
that item doesn't already exist.
+ UpdateItem - Modifies the attributes of an existing item. You
can also use conditional operators to perform an update only if
the item's attribute values match certain conditions.
+ DeleteItem - Deletes an item in a table by primary key. You can
use conditional operators to perform a delete an item only if the
item's attribute values match certain conditions.
+ BatchWriteItem - Performs multiple PutItem and DeleteItem
requests across multiple tables in a single request. A failure of
any request(s) in the batch will not cause the entire
BatchWriteItem operation to fail. Supports batches of up to 25
items to put or delete, with a maximum total request size of 16
MB.
For conceptual information about modifying data, go to `Working
with Items`_ and `Query and Scan Operations`_ in the Amazon
DynamoDB Developer Guide .
"""
APIVersion = "2012-08-10"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "dynamodb.us-east-1.amazonaws.com"
ServiceName = "DynamoDB"
TargetPrefix = "DynamoDB_20120810"
ResponseError = JSONResponseError
_faults = {
"ProvisionedThroughputExceededException": exceptions.ProvisionedThroughputExceededException,
"LimitExceededException": exceptions.LimitExceededException,
"ConditionalCheckFailedException": exceptions.ConditionalCheckFailedException,
"ResourceInUseException": exceptions.ResourceInUseException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"InternalServerError": exceptions.InternalServerError,
"ItemCollectionSizeLimitExceededException": exceptions.ItemCollectionSizeLimitExceededException,
}
NumberRetries = 10
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
validate_checksums = kwargs.pop('validate_checksums', True)
if not region:
region_name = boto.config.get('DynamoDB', 'region',
self.DefaultRegionName)
for reg in boto.dynamodb2.regions():
if reg.name == region_name:
region = reg
break
# Only set host if it isn't manually overwritten
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(DynamoDBConnection, self).__init__(**kwargs)
self.region = region
self._validate_checksums = boto.config.getbool(
'DynamoDB', 'validate_checksums', validate_checksums)
self.throughput_exceeded_events = 0
def _required_auth_capability(self):
return ['hmac-v4']
def batch_get_item(self, request_items, return_consumed_capacity=None):
"""
The BatchGetItem operation returns the attributes of one or
more items from one or more tables. You identify requested
items by primary key.
A single operation can retrieve up to 16 MB of data, which can
contain as many as 100 items. BatchGetItem will return a
partial result if the response size limit is exceeded, the
table's provisioned throughput is exceeded, or an internal
processing failure occurs. If a partial result is returned,
the operation returns a value for UnprocessedKeys . You can
use this value to retry the operation starting with the next
item to get.
For example, if you ask to retrieve 100 items, but each
individual item is 300 KB in size, the system returns 52 items
(so as not to exceed the 16 MB limit). It also returns an
appropriate UnprocessedKeys value so you can get the next page
of results. If desired, your application can include its own
logic to assemble the pages of results into one data set.
If none of the items can be processed due to insufficient
provisioned throughput on all of the tables in the request,
then BatchGetItem will return a
ProvisionedThroughputExceededException . If at least one of
the items is successfully processed, then BatchGetItem
completes successfully, while returning the keys of the unread
items in UnprocessedKeys .
If DynamoDB returns any unprocessed items, you should retry
the batch operation on those items. However, we strongly
recommend that you use an exponential backoff algorithm . If
you retry the batch operation immediately, the underlying read
or write requests can still fail due to throttling on the
individual tables. If you delay the batch operation using
exponential backoff, the individual requests in the batch are
much more likely to succeed.
For more information, go to `Batch Operations and Error
Handling`_ in the Amazon DynamoDB Developer Guide .
By default, BatchGetItem performs eventually consistent reads
on every table in the request. If you want strongly consistent
reads instead, you can set ConsistentRead to `True` for any or
all tables.
In order to minimize response latency, BatchGetItem retrieves
items in parallel.
When designing your application, keep in mind that DynamoDB
does not return attributes in any particular order. To help
parse the response by item, include the primary key values for
the items in your request in the AttributesToGet parameter.
If a requested item does not exist, it is not returned in the
result. Requests for nonexistent items consume the minimum
read capacity units according to the type of read. For more
information, see `Capacity Units Calculations`_ in the Amazon
DynamoDB Developer Guide .
:type request_items: map
:param request_items:
A map of one or more table names and, for each table, the corresponding
primary keys for the items to retrieve. Each table name can be
invoked only once.
Each element in the map consists of the following:
+ Keys - An array of primary key attribute values that define specific
items in the table. For each primary key, you must provide all of
the key attributes. For example, with a hash type primary key, you
only need to specify the hash attribute. For a hash-and-range type
primary key, you must specify both the hash attribute and the range
attribute.
+ AttributesToGet - One or more attributes to be retrieved from the
table. By default, all attributes are returned. If a specified
attribute is not found, it does not appear in the result. Note that
AttributesToGet has no effect on provisioned throughput
consumption. DynamoDB determines capacity units consumed based on
item size, not on the amount of data that is returned to an
application.
+ ConsistentRead - If `True`, a strongly consistent read is used; if
`False` (the default), an eventually consistent read is used.
:type return_consumed_capacity: string
:param return_consumed_capacity: A value that if set to `TOTAL`, the
response includes ConsumedCapacity data for tables and indexes. If
set to `INDEXES`, the response includes ConsumedCapacity for
indexes. If set to `NONE` (the default), ConsumedCapacity is not
included in the response.
"""
params = {'RequestItems': request_items, }
if return_consumed_capacity is not None:
params['ReturnConsumedCapacity'] = return_consumed_capacity
return self.make_request(action='BatchGetItem',
body=json.dumps(params))
def batch_write_item(self, request_items, return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
The BatchWriteItem operation puts or deletes multiple items in
one or more tables. A single call to BatchWriteItem can write
up to 16 MB of data, which can comprise as many as 25 put or
delete requests. Individual items to be written can be as
large as 400 KB.
BatchWriteItem cannot update items. To update items, use the
UpdateItem API.
The individual PutItem and DeleteItem operations specified in
BatchWriteItem are atomic; however BatchWriteItem as a whole
is not. If any requested operations fail because the table's
provisioned throughput is exceeded or an internal processing
failure occurs, the failed operations are returned in the
UnprocessedItems response parameter. You can investigate and
optionally resend the requests. Typically, you would call
BatchWriteItem in a loop. Each iteration would check for
unprocessed items and submit a new BatchWriteItem request with
those unprocessed items until all items have been processed.
Note that if none of the items can be processed due to
insufficient provisioned throughput on all of the tables in
the request, then BatchWriteItem will return a
ProvisionedThroughputExceededException .
If DynamoDB returns any unprocessed items, you should retry
the batch operation on those items. However, we strongly
recommend that you use an exponential backoff algorithm . If
you retry the batch operation immediately, the underlying read
or write requests can still fail due to throttling on the
individual tables. If you delay the batch operation using
exponential backoff, the individual requests in the batch are
much more likely to succeed.
For more information, go to `Batch Operations and Error
Handling`_ in the Amazon DynamoDB Developer Guide .
With BatchWriteItem , you can efficiently write or delete
large amounts of data, such as from Amazon Elastic MapReduce
(EMR), or copy data from another database into DynamoDB. In
order to improve performance with these large-scale
operations, BatchWriteItem does not behave in the same way as
individual PutItem and DeleteItem calls would For example, you
cannot specify conditions on individual put and delete
requests, and BatchWriteItem does not return deleted items in
the response.
If you use a programming language that supports concurrency,
such as Java, you can use threads to write items in parallel.
Your application must include the necessary logic to manage
the threads. With languages that don't support threading, such
as PHP, you must update or delete the specified items one at a
time. In both situations, BatchWriteItem provides an
alternative where the API performs the specified put and
delete operations in parallel, giving you the power of the
thread pool approach without having to introduce complexity
into your application.
Parallel processing reduces latency, but each specified put
and delete request consumes the same number of write capacity
units whether it is processed in parallel or not. Delete
operations on nonexistent items consume one write capacity
unit.
If one or more of the following is true, DynamoDB rejects the
entire batch write operation:
+ One or more tables specified in the BatchWriteItem request
does not exist.
+ Primary key attributes specified on an item in the request
do not match those in the corresponding table's primary key
schema.
+ You try to perform multiple operations on the same item in
the same BatchWriteItem request. For example, you cannot put
and delete the same item in the same BatchWriteItem request.
+ There are more than 25 requests in the batch.
+ Any individual item in a batch exceeds 400 KB.
+ The total request size exceeds 16 MB.
:type request_items: map
:param request_items:
A map of one or more table names and, for each table, a list of
operations to be performed ( DeleteRequest or PutRequest ). Each
element in the map consists of the following:
+ DeleteRequest - Perform a DeleteItem operation on the specified item.
The item to be deleted is identified by a Key subelement:
+ Key - A map of primary key attribute values that uniquely identify
the ! item. Each entry in this map consists of an attribute name
and an attribute value. For each primary key, you must provide all
of the key attributes. For example, with a hash type primary key,
you only need to specify the hash attribute. For a hash-and-range
type primary key, you must specify both the hash attribute and the
range attribute.
+ PutRequest - Perform a PutItem operation on the specified item. The
item to be put is identified by an Item subelement:
+ Item - A map of attributes and their values. Each entry in this map
consists of an attribute name and an attribute value. Attribute
values must not be null; string and binary type attributes must
have lengths greater than zero; and set type attributes must not be
empty. Requests that contain empty values will be rejected with a
ValidationException exception. If you specify any attributes that
are part of an index key, then the data types for those attributes
must match those of the schema in the table's attribute definition.
:type return_consumed_capacity: string
:param return_consumed_capacity: A value that if set to `TOTAL`, the
response includes ConsumedCapacity data for tables and indexes. If
set to `INDEXES`, the response includes ConsumedCapacity for
indexes. If set to `NONE` (the default), ConsumedCapacity is not
included in the response.
:type return_item_collection_metrics: string
:param return_item_collection_metrics: A value that if set to `SIZE`,
the response includes statistics about item collections, if any,
that were modified during the operation are returned in the
response. If set to `NONE` (the default), no statistics are
returned.
"""
params = {'RequestItems': request_items, }
if return_consumed_capacity is not None:
params['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics is not None:
params['ReturnItemCollectionMetrics'] = return_item_collection_metrics
return self.make_request(action='BatchWriteItem',
body=json.dumps(params))
def create_table(self, attribute_definitions, table_name, key_schema,
provisioned_throughput, local_secondary_indexes=None,
global_secondary_indexes=None):
"""
The CreateTable operation adds a new table to your account. In
an AWS account, table names must be unique within each region.
That is, you can have two tables with same name if you create
the tables in different regions.
CreateTable is an asynchronous operation. Upon receiving a
CreateTable request, DynamoDB immediately returns a response
with a TableStatus of `CREATING`. After the table is created,
DynamoDB sets the TableStatus to `ACTIVE`. You can perform
read and write operations only on an `ACTIVE` table.
You can optionally define secondary indexes on the new table,
as part of the CreateTable operation. If you want to create
multiple tables with secondary indexes on them, you must
create the tables sequentially. Only one table with secondary
indexes can be in the `CREATING` state at any given time.
You can use the DescribeTable API to check the table status.
:type attribute_definitions: list
:param attribute_definitions: An array of attributes that describe the
key schema for the table and indexes.
:type table_name: string
:param table_name: The name of the table to create.
:type key_schema: list
:param key_schema: Specifies the attributes that make up the primary
key for a table or an index. The attributes in KeySchema must also
be defined in the AttributeDefinitions array. For more information,
see `Data Model`_ in the Amazon DynamoDB Developer Guide .
Each KeySchemaElement in the array is composed of:
+ AttributeName - The name of this key attribute.
+ KeyType - Determines whether the key attribute is `HASH` or `RANGE`.
For a primary key that consists of a hash attribute, you must specify
exactly one element with a KeyType of `HASH`.
For a primary key that consists of hash and range attributes, you must
specify exactly two elements, in this order: The first element must
have a KeyType of `HASH`, and the second element must have a
KeyType of `RANGE`.
For more information, see `Specifying the Primary Key`_ in the Amazon
DynamoDB Developer Guide .
:type local_secondary_indexes: list
:param local_secondary_indexes:
One or more local secondary indexes (the maximum is five) to be created
on the table. Each index is scoped to a given hash key value. There
is a 10 GB size limit per hash key; otherwise, the size of a local
secondary index is unconstrained.
Each local secondary index in the array includes the following:
+ IndexName - The name of the local secondary index. Must be unique
only for this table.
+ KeySchema - Specifies the key schema for the local secondary index.
The key schema must begin with the same hash key attribute as the
table.
+ Projection - Specifies attributes that are copied (projected) from
the table into the index. These are in addition to the primary key
attributes and index key attributes, which are automatically
projected. Each attribute specification is composed of:
+ ProjectionType - One of the following:
+ `KEYS_ONLY` - Only the index and primary keys are projected into the
index.
+ `INCLUDE` - Only the specified table attributes are projected into
the index. The list of projected attributes are in NonKeyAttributes
.
+ `ALL` - All of the table attributes are projected into the index.
+ NonKeyAttributes - A list of one or more non-key attribute names that
are projected into the secondary index. The total count of
attributes specified in NonKeyAttributes , summed across all of the
secondary indexes, must not exceed 20. If you project the same
attribute into two different indexes, this counts as two distinct
attributes when determining the total.
:type global_secondary_indexes: list
:param global_secondary_indexes:
One or more global secondary indexes (the maximum is five) to be
created on the table. Each global secondary index in the array
includes the following:
+ IndexName - The name of the global secondary index. Must be unique
only for this table.
+ KeySchema - Specifies the key schema for the global secondary index.
+ Projection - Specifies attributes that are copied (projected) from
the table into the index. These are in addition to the primary key
attributes and index key attributes, which are automatically
projected. Each attribute specification is composed of:
+ ProjectionType - One of the following:
+ `KEYS_ONLY` - Only the index and primary keys are projected into the
index.
+ `INCLUDE` - Only the specified table attributes are projected into
the index. The list of projected attributes are in NonKeyAttributes
.
+ `ALL` - All of the table attributes are projected into the index.
+ NonKeyAttributes - A list of one or more non-key attribute names that
are projected into the secondary index. The total count of
attributes specified in NonKeyAttributes , summed across all of the
secondary indexes, must not exceed 20. If you project the same
attribute into two different indexes, this counts as two distinct
attributes when determining the total.
+ ProvisionedThroughput - The provisioned throughput settings for the
global secondary index, consisting of read and write capacity
units.
:type provisioned_throughput: dict
:param provisioned_throughput: Represents the provisioned throughput
settings for a specified table or index. The settings can be
modified using the UpdateTable operation.
For current minimum and maximum provisioned throughput values, see
`Limits`_ in the Amazon DynamoDB Developer Guide .
"""
params = {
'AttributeDefinitions': attribute_definitions,
'TableName': table_name,
'KeySchema': key_schema,
'ProvisionedThroughput': provisioned_throughput,
}
if local_secondary_indexes is not None:
params['LocalSecondaryIndexes'] = local_secondary_indexes
if global_secondary_indexes is not None:
params['GlobalSecondaryIndexes'] = global_secondary_indexes
return self.make_request(action='CreateTable',
body=json.dumps(params))
def delete_item(self, table_name, key, expected=None,
conditional_operator=None, return_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None):
"""
Deletes a single item in a table by primary key. You can
perform a conditional delete operation that deletes the item
if it exists, or if it has an expected attribute value.
In addition to deleting an item, you can also return the
item's attribute values in the same operation, using the
ReturnValues parameter.
Unless you specify conditions, the DeleteItem is an idempotent
operation; running it multiple times on the same item or
attribute does not result in an error response.
Conditional deletes are useful for deleting items only if
specific conditions are met. If those conditions are met,
DynamoDB performs the delete. Otherwise, the item is not
deleted.
:type table_name: string
:param table_name: The name of the table from which to delete the item.
:type key: map
:param key: A map of attribute names to AttributeValue objects,
representing the primary key of the item to delete.
For the primary key, you must provide all of the attributes. For
example, with a hash type primary key, you only need to specify the
hash attribute. For a hash-and-range type primary key, you must
specify both the hash attribute and the range attribute.
:type expected: map
:param expected:
There is a newer parameter available. Use ConditionExpression instead.
Note that if you use Expected and ConditionExpression at the same
time, DynamoDB will return a ValidationException exception.
This parameter does not support lists or maps.
A map of attribute/condition pairs. Expected provides a conditional
block for the DeleteItem operation.
Each element of Expected consists of an attribute name, a comparison
operator, and one or more values. DynamoDB compares the attribute
with the value(s) you supplied, using the comparison operator. For
each Expected element, the result of the evaluation is either true
or false.
If you specify more than one element in the Expected map, then by
default all of the conditions must evaluate to true. In other
words, the conditions are ANDed together. (You can use the
ConditionalOperator parameter to OR the conditions instead. If you
do this, then at least one of the conditions must evaluate to true,
rather than all of them.)
If the Expected map evaluates to true, then the conditional operation
succeeds; otherwise, it fails.
Expected contains the following:
+ AttributeValueList - One or more values to evaluate against the
supplied attribute. The number of values in the list depends on the
ComparisonOperator being used. For type Number, value comparisons
are numeric. String value comparisons for greater than, equals, or
less than are based on ASCII character code values. For example,
`a` is greater than `A`, and `a` is greater than `B`. For a list of
code values, see
`http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_.
For type Binary, DynamoDB treats each byte of the binary data as
unsigned when it compares binary values, for example when
evaluating query expressions.
+ ComparisonOperator - A comparator for evaluating attributes in the
AttributeValueList . When performing the comparison, DynamoDB uses
strongly consistent reads. The following comparison operators are
available: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL |
CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` The following
are descriptions of each comparison operator.
+ `EQ` : Equal. `EQ` is supported for all datatypes, including lists
and maps. AttributeValueList can contain only one AttributeValue
element of type String, Number, Binary, String Set, Number Set, or
Binary Set. If an item contains an AttributeValue element of a
different type than the one specified in the request, the value
does not match. For example, `{"S":"6"}` does not equal
`{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2",
"1"]}`. > <li>
+ `NE` : Not equal. `NE` is supported for all datatypes, including
lists and maps. AttributeValueList can contain only one
AttributeValue of type String, Number, Binary, String Set, Number
Set, or Binary Set. If an item contains an AttributeValue of a
different type than the one specified in the request, the value
does not match. For example, `{"S":"6"}` does not equal
`{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2",
"1"]}`. > <li>
+ `LE` : Less than or equal. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If an item contains an AttributeValue element of a different
type than the one specified in the request, the value does not
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `LT` : Less than. AttributeValueList can contain only one
AttributeValue of type String, Number, or Binary (not a set type).
If an item contains an AttributeValue element of a different type
than the one specified in the request, the value does not match.
For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `GE` : Greater than or equal. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If an item contains an AttributeValue element of a different
type than the one specified in the request, the value does not
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `GT` : Greater than. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If an item contains an AttributeValue element of a different
type than the one specified in the request, the value does not
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `NOT_NULL` : The attribute exists. `NOT_NULL` is supported for all
datatypes, including lists and maps. This operator tests for the
existence of an attribute, not its data type. If the data type of
attribute " `a`" is null, and you evaluate it using `NOT_NULL`, the
result is a Boolean true . This result is because the attribute "
`a`" exists; its data type is not relevant to the `NOT_NULL`
comparison operator.
+ `NULL` : The attribute does not exist. `NULL` is supported for all
datatypes, including lists and maps. This operator tests for the
nonexistence of an attribute, not its data type. If the data type
of attribute " `a`" is null, and you evaluate it using `NULL`, the
result is a Boolean false . This is because the attribute " `a`"
exists; its data type is not relevant to the `NULL` comparison
operator.
+ `CONTAINS` : Checks for a subsequence, or value in a set.
AttributeValueList can contain only one AttributeValue element of
type String, Number, or Binary (not a set type). If the target
attribute of the comparison is of type String, then the operator
checks for a substring match. If the target attribute of the
comparison is of type Binary, then the operator looks for a
subsequence of the target that matches the input. If the target
attribute of the comparison is a set (" `SS`", " `NS`", or "
`BS`"), then the operator evaluates to true if it finds an exact
match with any member of the set. CONTAINS is supported for lists:
When evaluating " `a CONTAINS b`", " `a`" can be a list; however, "
`b`" cannot be a set, a map, or a list.
+ `NOT_CONTAINS` : Checks for absence of a subsequence, or absence of a
value in a set. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If the target attribute of the comparison is a String, then
the operator checks for the absence of a substring match. If the
target attribute of the comparison is Binary, then the operator
checks for the absence of a subsequence of the target that matches
the input. If the target attribute of the comparison is a set ("
`SS`", " `NS`", or " `BS`"), then the operator evaluates to true if
it does not find an exact match with any member of the set.
NOT_CONTAINS is supported for lists: When evaluating " `a NOT
CONTAINS b`", " `a`" can be a list; however, " `b`" cannot be a
set, a map, or a list.
+ `BEGINS_WITH` : Checks for a prefix. AttributeValueList can contain
only one AttributeValue of type String or Binary (not a Number or a
set type). The target attribute of the comparison must be of type
String or Binary (not a Number or a set type). > <li>
+ `IN` : Checks for matching elements within two sets.
AttributeValueList can contain one or more AttributeValue elements
of type String, Number, or Binary (not a set type). These
attributes are compared against an existing set type attribute of
an item. If any elements of the input set are present in the item
attribute, the expression evaluates to true.
+ `BETWEEN` : Greater than or equal to the first value, and less than
or equal to the second value. AttributeValueList must contain two
AttributeValue elements of the same type, either String, Number, or
Binary (not a set type). A target attribute matches if the target
value is greater than, or equal to, the first element and less
than, or equal to, the second element. If an item contains an
AttributeValue element of a different type than the one specified
in the request, the value does not match. For example, `{"S":"6"}`
does not compare to `{"N":"6"}`. Also, `{"N":"6"}` does not compare
to `{"NS":["6", "2", "1"]}`
For usage examples of AttributeValueList and ComparisonOperator , see
`Legacy Conditional Parameters`_ in the Amazon DynamoDB Developer
Guide .
For backward compatibility with previous DynamoDB releases, the
following parameters can be used instead of AttributeValueList and
ComparisonOperator :
+ Value - A value for DynamoDB to compare with an attribute.
+ Exists - A Boolean value that causes DynamoDB to evaluate the value
before attempting the conditional operation:
+ If Exists is `True`, DynamoDB will check to see if that attribute
value already exists in the table. If it is found, then the
condition evaluates to true; otherwise the condition evaluate to
false.
+ If Exists is `False`, DynamoDB assumes that the attribute value does
not exist in the table. If in fact the value does not exist, then
the assumption is valid and the condition evaluates to true. If the
value is found, despite the assumption that it does not exist, the
condition evaluates to false.
Note that the default value for Exists is `True`.
The Value and Exists parameters are incompatible with
AttributeValueList and ComparisonOperator . Note that if you use
both sets of parameters at once, DynamoDB will return a
ValidationException exception.
:type conditional_operator: string
:param conditional_operator:
There is a newer parameter available. Use ConditionExpression instead.
Note that if you use ConditionalOperator and ConditionExpression at
the same time, DynamoDB will return a ValidationException
exception.
This parameter does not support lists or maps.
A logical operator to apply to the conditions in the Expected map:
+ `AND` - If all of the conditions evaluate to true, then the entire
map evaluates to true.
+ `OR` - If at least one of the conditions evaluate to true, then the
entire map evaluates to true.
If you omit ConditionalOperator , then `AND` is the default.
The operation will succeed only if the entire map evaluates to true.
:type return_values: string
:param return_values:
Use ReturnValues if you want to get the item attributes as they
appeared before they were deleted. For DeleteItem , the valid
values are:
+ `NONE` - If ReturnValues is not specified, or if its value is `NONE`,
then nothing is returned. (This setting is the default for
ReturnValues .)
+ `ALL_OLD` - The content of the old item is returned.
:type return_consumed_capacity: string
:param return_consumed_capacity: A value that if set to `TOTAL`, the
response includes ConsumedCapacity data for tables and indexes. If
set to `INDEXES`, the response includes ConsumedCapacity for
indexes. If set to `NONE` (the default), ConsumedCapacity is not
included in the response.
:type return_item_collection_metrics: string
:param return_item_collection_metrics: A value that if set to `SIZE`,
the response includes statistics about item collections, if any,
that were modified during the operation are returned in the
response. If set to `NONE` (the default), no statistics are
returned.
:type condition_expression: string
:param condition_expression: A condition that must be satisfied in
order for a conditional DeleteItem to succeed.
An expression can contain any of the following:
+ Boolean functions: `attribute_exists | attribute_not_exists |
contains | begins_with` These function names are case-sensitive.
+ Comparison operators: ` = | <> | < | > | <=
| >= | BETWEEN | IN`
+ Logical operators: `AND | OR | NOT`
For more information on condition expressions, go to `Specifying
Conditions`_ in the Amazon DynamoDB Developer Guide .
:type expression_attribute_names: map
:param expression_attribute_names: One or more substitution tokens for
simplifying complex expressions. The following are some use cases
for using ExpressionAttributeNames :
+ To shorten an attribute name that is very long or unwieldy in an
expression.
+ To create a placeholder for repeating occurrences of an attribute
name in an expression.
+ To prevent special characters in an attribute name from being
misinterpreted in an expression.
Use the **#** character in an expression to dereference an attribute
name. For example, consider the following expression:
+ `order.customerInfo.LastName = "Smith" OR order.customerInfo.LastName
= "Jones"`
Now suppose that you specified the following for
ExpressionAttributeNames :
+ `{"#name":"order.customerInfo.LastName"}`
The expression can now be simplified as follows:
+ `#name = "Smith" OR #name = "Jones"`
For more information on expression attribute names, go to `Accessing
Item Attributes`_ in the Amazon DynamoDB Developer Guide .
:type expression_attribute_values: map
:param expression_attribute_values: One or more values that can be
substituted in an expression.
Use the **:** (colon) character in an expression to dereference an
attribute value. For example, suppose that you wanted to check
whether the value of the ProductStatus attribute was one of the
following:
`Available | Backordered | Discontinued`
You would first need to specify ExpressionAttributeValues as follows:
`{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
":disc":{"S":"Discontinued"} }`
You could then use these values in an expression, such as this:
`ProductStatus IN (:avail, :back, :disc)`
For more information on expression attribute values, go to `Specifying
Conditions`_ in the Amazon DynamoDB Developer Guide .
"""
params = {'TableName': table_name, 'Key': key, }
if expected is not None:
params['Expected'] = expected
if conditional_operator is not None:
params['ConditionalOperator'] = conditional_operator
if return_values is not None:
params['ReturnValues'] = return_values
if return_consumed_capacity is not None:
params['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics is not None:
params['ReturnItemCollectionMetrics'] = return_item_collection_metrics
if condition_expression is not None:
params['ConditionExpression'] = condition_expression
if expression_attribute_names is not None:
params['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values is not None:
params['ExpressionAttributeValues'] = expression_attribute_values
return self.make_request(action='DeleteItem',
body=json.dumps(params))
def delete_table(self, table_name):
"""
The DeleteTable operation deletes a table and all of its
items. After a DeleteTable request, the specified table is in
the `DELETING` state until DynamoDB completes the deletion. If
the table is in the `ACTIVE` state, you can delete it. If a
table is in `CREATING` or `UPDATING` states, then DynamoDB
returns a ResourceInUseException . If the specified table does
not exist, DynamoDB returns a ResourceNotFoundException . If
table is already in the `DELETING` state, no error is
returned.
DynamoDB might continue to accept data read and write
operations, such as GetItem and PutItem , on a table in the
`DELETING` state until the table deletion is complete.
When you delete a table, any indexes on that table are also
deleted.
Use the DescribeTable API to check the status of the table.
:type table_name: string
:param table_name: The name of the table to delete.
"""
params = {'TableName': table_name, }
return self.make_request(action='DeleteTable',
body=json.dumps(params))
def describe_table(self, table_name):
"""
Returns information about the table, including the current
status of the table, when it was created, the primary key
schema, and any indexes on the table.
If you issue a DescribeTable request immediately after a
CreateTable request, DynamoDB might return a
ResourceNotFoundException. This is because DescribeTable uses
an eventually consistent query, and the metadata for your
table might not be available at that moment. Wait for a few
seconds, and then try the DescribeTable request again.
:type table_name: string
:param table_name: The name of the table to describe.
"""
params = {'TableName': table_name, }
return self.make_request(action='DescribeTable',
body=json.dumps(params))
def get_item(self, table_name, key, attributes_to_get=None,
consistent_read=None, return_consumed_capacity=None,
projection_expression=None, expression_attribute_names=None):
"""
The GetItem operation returns a set of attributes for the item
with the given primary key. If there is no matching item,
GetItem does not return any data.
GetItem provides an eventually consistent read by default. If
your application requires a strongly consistent read, set
ConsistentRead to `True`. Although a strongly consistent read
might take more time than an eventually consistent read, it
always returns the last updated value.
:type table_name: string
:param table_name: The name of the table containing the requested item.
:type key: map
:param key: A map of attribute names to AttributeValue objects,
representing the primary key of the item to retrieve.
For the primary key, you must provide all of the attributes. For
example, with a hash type primary key, you only need to specify the
hash attribute. For a hash-and-range type primary key, you must
specify both the hash attribute and the range attribute.
:type attributes_to_get: list
:param attributes_to_get:
There is a newer parameter available. Use ProjectionExpression instead.
Note that if you use AttributesToGet and ProjectionExpression at
the same time, DynamoDB will return a ValidationException
exception.
This parameter allows you to retrieve lists or maps; however, it cannot
retrieve individual list or map elements.
The names of one or more attributes to retrieve. If no attribute names
are specified, then all attributes will be returned. If any of the
requested attributes are not found, they will not appear in the
result.
Note that AttributesToGet has no effect on provisioned throughput
consumption. DynamoDB determines capacity units consumed based on
item size, not on the amount of data that is returned to an
application.
:type consistent_read: boolean
:param consistent_read: A value that if set to `True`, then the
operation uses strongly consistent reads; otherwise, eventually
consistent reads are used.
:type return_consumed_capacity: string
:param return_consumed_capacity: A value that if set to `TOTAL`, the
response includes ConsumedCapacity data for tables and indexes. If
set to `INDEXES`, the response includes ConsumedCapacity for
indexes. If set to `NONE` (the default), ConsumedCapacity is not
included in the response.
:type projection_expression: string
:param projection_expression: A string that identifies one or more
attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in
the expression must be separated by commas.
If no attribute names are specified, then all attributes will be
returned. If any of the requested attributes are not found, they
will not appear in the result.
For more information on projection expressions, go to `Accessing Item
Attributes`_ in the Amazon DynamoDB Developer Guide .
:type expression_attribute_names: map
:param expression_attribute_names: One or more substitution tokens for
simplifying complex expressions. The following are some use cases
for using ExpressionAttributeNames :
+ To shorten an attribute name that is very long or unwieldy in an
expression.
+ To create a placeholder for repeating occurrences of an attribute
name in an expression.
+ To prevent special characters in an attribute name from being
misinterpreted in an expression.
Use the **#** character in an expression to dereference an attribute
name. For example, consider the following expression:
+ `order.customerInfo.LastName = "Smith" OR order.customerInfo.LastName
= "Jones"`
Now suppose that you specified the following for
ExpressionAttributeNames :
+ `{"#name":"order.customerInfo.LastName"}`
The expression can now be simplified as follows:
+ `#name = "Smith" OR #name = "Jones"`
For more information on expression attribute names, go to `Accessing
Item Attributes`_ in the Amazon DynamoDB Developer Guide .
"""
params = {'TableName': table_name, 'Key': key, }
if attributes_to_get is not None:
params['AttributesToGet'] = attributes_to_get
if consistent_read is not None:
params['ConsistentRead'] = consistent_read
if return_consumed_capacity is not None:
params['ReturnConsumedCapacity'] = return_consumed_capacity
if projection_expression is not None:
params['ProjectionExpression'] = projection_expression
if expression_attribute_names is not None:
params['ExpressionAttributeNames'] = expression_attribute_names
return self.make_request(action='GetItem',
body=json.dumps(params))
def list_tables(self, exclusive_start_table_name=None, limit=None):
"""
Returns an array of table names associated with the current
account and endpoint. The output from ListTables is paginated,
with each page returning a maximum of 100 table names.
:type exclusive_start_table_name: string
:param exclusive_start_table_name: The first table name that this
operation will evaluate. Use the value that was returned for
LastEvaluatedTableName in a previous operation, so that you can
obtain the next page of results.
:type limit: integer
:param limit: A maximum number of table names to return. If this
parameter is not specified, the limit is 100.
"""
params = {}
if exclusive_start_table_name is not None:
params['ExclusiveStartTableName'] = exclusive_start_table_name
if limit is not None:
params['Limit'] = limit
return self.make_request(action='ListTables',
body=json.dumps(params))
def put_item(self, table_name, item, expected=None, return_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
conditional_operator=None, condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None):
"""
Creates a new item, or replaces an old item with a new item.
If an item that has the same primary key as the new item
already exists in the specified table, the new item completely
replaces the existing item. You can perform a conditional put
operation (add a new item if one with the specified primary
key doesn't exist), or replace an existing item if it has
certain attribute values.
In addition to putting an item, you can also return the item's
attribute values in the same operation, using the ReturnValues
parameter.
When you add an item, the primary key attribute(s) are the
only required attributes. Attribute values cannot be null.
String and Binary type attributes must have lengths greater
than zero. Set type attributes cannot be empty. Requests with
empty values will be rejected with a ValidationException
exception.
You can request that PutItem return either a copy of the
original item (before the update) or a copy of the updated
item (after the update). For more information, see the
ReturnValues description below.
To prevent a new item from replacing an existing item, use a
conditional put operation with ComparisonOperator set to
`NULL` for the primary key attribute, or attributes.
For more information about using this API, see `Working with
Items`_ in the Amazon DynamoDB Developer Guide .
:type table_name: string
:param table_name: The name of the table to contain the item.
:type item: map
:param item: A map of attribute name/value pairs, one for each
attribute. Only the primary key attributes are required; you can
optionally provide other attribute name-value pairs for the item.
You must provide all of the attributes for the primary key. For
example, with a hash type primary key, you only need to specify the
hash attribute. For a hash-and-range type primary key, you must
specify both the hash attribute and the range attribute.
If you specify any attributes that are part of an index key, then the
data types for those attributes must match those of the schema in
the table's attribute definition.
For more information about primary keys, see `Primary Key`_ in the
Amazon DynamoDB Developer Guide .
Each element in the Item map is an AttributeValue object.
:type expected: map
:param expected:
There is a newer parameter available. Use ConditionExpression instead.
Note that if you use Expected and ConditionExpression at the same
time, DynamoDB will return a ValidationException exception.
This parameter does not support lists or maps.
A map of attribute/condition pairs. Expected provides a conditional
block for the PutItem operation.
Each element of Expected consists of an attribute name, a comparison
operator, and one or more values. DynamoDB compares the attribute
with the value(s) you supplied, using the comparison operator. For
each Expected element, the result of the evaluation is either true
or false.
If you specify more than one element in the Expected map, then by
default all of the conditions must evaluate to true. In other
words, the conditions are ANDed together. (You can use the
ConditionalOperator parameter to OR the conditions instead. If you
do this, then at least one of the conditions must evaluate to true,
rather than all of them.)
If the Expected map evaluates to true, then the conditional operation
succeeds; otherwise, it fails.
Expected contains the following:
+ AttributeValueList - One or more values to evaluate against the
supplied attribute. The number of values in the list depends on the
ComparisonOperator being used. For type Number, value comparisons
are numeric. String value comparisons for greater than, equals, or
less than are based on ASCII character code values. For example,
`a` is greater than `A`, and `a` is greater than `B`. For a list of
code values, see
`http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_.
For type Binary, DynamoDB treats each byte of the binary data as
unsigned when it compares binary values, for example when
evaluating query expressions.
+ ComparisonOperator - A comparator for evaluating attributes in the
AttributeValueList . When performing the comparison, DynamoDB uses
strongly consistent reads. The following comparison operators are
available: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL |
CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` The following
are descriptions of each comparison operator.
+ `EQ` : Equal. `EQ` is supported for all datatypes, including lists
and maps. AttributeValueList can contain only one AttributeValue
element of type String, Number, Binary, String Set, Number Set, or
Binary Set. If an item contains an AttributeValue element of a
different type than the one specified in the request, the value
does not match. For example, `{"S":"6"}` does not equal
`{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2",
"1"]}`. > <li>
+ `NE` : Not equal. `NE` is supported for all datatypes, including
lists and maps. AttributeValueList can contain only one
AttributeValue of type String, Number, Binary, String Set, Number
Set, or Binary Set. If an item contains an AttributeValue of a
different type than the one specified in the request, the value
does not match. For example, `{"S":"6"}` does not equal
`{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2",
"1"]}`. > <li>
+ `LE` : Less than or equal. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If an item contains an AttributeValue element of a different
type than the one specified in the request, the value does not
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `LT` : Less than. AttributeValueList can contain only one
AttributeValue of type String, Number, or Binary (not a set type).
If an item contains an AttributeValue element of a different type
than the one specified in the request, the value does not match.
For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `GE` : Greater than or equal. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If an item contains an AttributeValue element of a different
type than the one specified in the request, the value does not
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `GT` : Greater than. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If an item contains an AttributeValue element of a different
type than the one specified in the request, the value does not
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `NOT_NULL` : The attribute exists. `NOT_NULL` is supported for all
datatypes, including lists and maps. This operator tests for the
existence of an attribute, not its data type. If the data type of
attribute " `a`" is null, and you evaluate it using `NOT_NULL`, the
result is a Boolean true . This result is because the attribute "
`a`" exists; its data type is not relevant to the `NOT_NULL`
comparison operator.
+ `NULL` : The attribute does not exist. `NULL` is supported for all
datatypes, including lists and maps. This operator tests for the
nonexistence of an attribute, not its data type. If the data type
of attribute " `a`" is null, and you evaluate it using `NULL`, the
result is a Boolean false . This is because the attribute " `a`"
exists; its data type is not relevant to the `NULL` comparison
operator.
+ `CONTAINS` : Checks for a subsequence, or value in a set.
AttributeValueList can contain only one AttributeValue element of
type String, Number, or Binary (not a set type). If the target
attribute of the comparison is of type String, then the operator
checks for a substring match. If the target attribute of the
comparison is of type Binary, then the operator looks for a
subsequence of the target that matches the input. If the target
attribute of the comparison is a set (" `SS`", " `NS`", or "
`BS`"), then the operator evaluates to true if it finds an exact
match with any member of the set. CONTAINS is supported for lists:
When evaluating " `a CONTAINS b`", " `a`" can be a list; however, "
`b`" cannot be a set, a map, or a list.
+ `NOT_CONTAINS` : Checks for absence of a subsequence, or absence of a
value in a set. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If the target attribute of the comparison is a String, then
the operator checks for the absence of a substring match. If the
target attribute of the comparison is Binary, then the operator
checks for the absence of a subsequence of the target that matches
the input. If the target attribute of the comparison is a set ("
`SS`", " `NS`", or " `BS`"), then the operator evaluates to true if
it does not find an exact match with any member of the set.
NOT_CONTAINS is supported for lists: When evaluating " `a NOT
CONTAINS b`", " `a`" can be a list; however, " `b`" cannot be a
set, a map, or a list.
+ `BEGINS_WITH` : Checks for a prefix. AttributeValueList can contain
only one AttributeValue of type String or Binary (not a Number or a
set type). The target attribute of the comparison must be of type
String or Binary (not a Number or a set type). > <li>
+ `IN` : Checks for matching elements within two sets.
AttributeValueList can contain one or more AttributeValue elements
of type String, Number, or Binary (not a set type). These
attributes are compared against an existing set type attribute of
an item. If any elements of the input set are present in the item
attribute, the expression evaluates to true.
+ `BETWEEN` : Greater than or equal to the first value, and less than
or equal to the second value. AttributeValueList must contain two
AttributeValue elements of the same type, either String, Number, or
Binary (not a set type). A target attribute matches if the target
value is greater than, or equal to, the first element and less
than, or equal to, the second element. If an item contains an
AttributeValue element of a different type than the one specified
in the request, the value does not match. For example, `{"S":"6"}`
does not compare to `{"N":"6"}`. Also, `{"N":"6"}` does not compare
to `{"NS":["6", "2", "1"]}`
For usage examples of AttributeValueList and ComparisonOperator , see
`Legacy Conditional Parameters`_ in the Amazon DynamoDB Developer
Guide .
For backward compatibility with previous DynamoDB releases, the
following parameters can be used instead of AttributeValueList and
ComparisonOperator :
+ Value - A value for DynamoDB to compare with an attribute.
+ Exists - A Boolean value that causes DynamoDB to evaluate the value
before attempting the conditional operation:
+ If Exists is `True`, DynamoDB will check to see if that attribute
value already exists in the table. If it is found, then the
condition evaluates to true; otherwise the condition evaluate to
false.
+ If Exists is `False`, DynamoDB assumes that the attribute value does
not exist in the table. If in fact the value does not exist, then
the assumption is valid and the condition evaluates to true. If the
value is found, despite the assumption that it does not exist, the
condition evaluates to false.
Note that the default value for Exists is `True`.
The Value and Exists parameters are incompatible with
AttributeValueList and ComparisonOperator . Note that if you use
both sets of parameters at once, DynamoDB will return a
ValidationException exception.
:type return_values: string
:param return_values:
Use ReturnValues if you want to get the item attributes as they
appeared before they were updated with the PutItem request. For
PutItem , the valid values are:
+ `NONE` - If ReturnValues is not specified, or if its value is `NONE`,
then nothing is returned. (This setting is the default for
ReturnValues .)
+ `ALL_OLD` - If PutItem overwrote an attribute name-value pair, then
the content of the old item is returned.
:type return_consumed_capacity: string
:param return_consumed_capacity: A value that if set to `TOTAL`, the
response includes ConsumedCapacity data for tables and indexes. If
set to `INDEXES`, the response includes ConsumedCapacity for
indexes. If set to `NONE` (the default), ConsumedCapacity is not
included in the response.
:type return_item_collection_metrics: string
:param return_item_collection_metrics: A value that if set to `SIZE`,
the response includes statistics about item collections, if any,
that were modified during the operation are returned in the
response. If set to `NONE` (the default), no statistics are
returned.
:type conditional_operator: string
:param conditional_operator:
There is a newer parameter available. Use ConditionExpression instead.
Note that if you use ConditionalOperator and ConditionExpression at
the same time, DynamoDB will return a ValidationException
exception.
This parameter does not support lists or maps.
A logical operator to apply to the conditions in the Expected map:
+ `AND` - If all of the conditions evaluate to true, then the entire
map evaluates to true.
+ `OR` - If at least one of the conditions evaluate to true, then the
entire map evaluates to true.
If you omit ConditionalOperator , then `AND` is the default.
The operation will succeed only if the entire map evaluates to true.
:type condition_expression: string
:param condition_expression: A condition that must be satisfied in
order for a conditional PutItem operation to succeed.
An expression can contain any of the following:
+ Boolean functions: `attribute_exists | attribute_not_exists |
contains | begins_with` These function names are case-sensitive.
+ Comparison operators: ` = | <> | < | > | <=
| >= | BETWEEN | IN`
+ Logical operators: `AND | OR | NOT`
For more information on condition expressions, go to `Specifying
Conditions`_ in the Amazon DynamoDB Developer Guide .
:type expression_attribute_names: map
:param expression_attribute_names: One or more substitution tokens for
simplifying complex expressions. The following are some use cases
for using ExpressionAttributeNames :
+ To shorten an attribute name that is very long or unwieldy in an
expression.
+ To create a placeholder for repeating occurrences of an attribute
name in an expression.
+ To prevent special characters in an attribute name from being
misinterpreted in an expression.
Use the **#** character in an expression to dereference an attribute
name. For example, consider the following expression:
+ `order.customerInfo.LastName = "Smith" OR order.customerInfo.LastName
= "Jones"`
Now suppose that you specified the following for
ExpressionAttributeNames :
+ `{"#name":"order.customerInfo.LastName"}`
The expression can now be simplified as follows:
+ `#name = "Smith" OR #name = "Jones"`
For more information on expression attribute names, go to `Accessing
Item Attributes`_ in the Amazon DynamoDB Developer Guide .
:type expression_attribute_values: map
:param expression_attribute_values: One or more values that can be
substituted in an expression.
Use the **:** (colon) character in an expression to dereference an
attribute value. For example, suppose that you wanted to check
whether the value of the ProductStatus attribute was one of the
following:
`Available | Backordered | Discontinued`
You would first need to specify ExpressionAttributeValues as follows:
`{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
":disc":{"S":"Discontinued"} }`
You could then use these values in an expression, such as this:
`ProductStatus IN (:avail, :back, :disc)`
For more information on expression attribute values, go to `Specifying
Conditions`_ in the Amazon DynamoDB Developer Guide .
"""
params = {'TableName': table_name, 'Item': item, }
if expected is not None:
params['Expected'] = expected
if return_values is not None:
params['ReturnValues'] = return_values
if return_consumed_capacity is not None:
params['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics is not None:
params['ReturnItemCollectionMetrics'] = return_item_collection_metrics
if conditional_operator is not None:
params['ConditionalOperator'] = conditional_operator
if condition_expression is not None:
params['ConditionExpression'] = condition_expression
if expression_attribute_names is not None:
params['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values is not None:
params['ExpressionAttributeValues'] = expression_attribute_values
return self.make_request(action='PutItem',
body=json.dumps(params))
def query(self, table_name, key_conditions, index_name=None, select=None,
attributes_to_get=None, limit=None, consistent_read=None,
query_filter=None, conditional_operator=None,
scan_index_forward=None, exclusive_start_key=None,
return_consumed_capacity=None, projection_expression=None,
filter_expression=None, expression_attribute_names=None,
expression_attribute_values=None):
"""
A Query operation directly accesses items from a table using
the table primary key, or from an index using the index key.
You must provide a specific hash key value. You can narrow the
scope of the query by using comparison operators on the range
key value, or on the index key. You can use the
ScanIndexForward parameter to get results in forward or
reverse order, by range key or by index key.
Queries that do not return results consume the minimum number
of read capacity units for that type of read operation.
If the total number of items meeting the query criteria
exceeds the result set size limit of 1 MB, the query stops and
results are returned to the user with LastEvaluatedKey to
continue the query in a subsequent operation. Unlike a Scan
operation, a Query operation never returns both an empty
result set and a LastEvaluatedKey . The LastEvaluatedKey is
only provided if the results exceed 1 MB, or if you have used
Limit .
You can query a table, a local secondary index, or a global
secondary index. For a query on a table or on a local
secondary index, you can set ConsistentRead to true and obtain
a strongly consistent result. Global secondary indexes support
eventually consistent reads only, so do not specify
ConsistentRead when querying a global secondary index.
:type table_name: string
:param table_name: The name of the table containing the requested
items.
:type index_name: string
:param index_name: The name of an index to query. This index can be any
local secondary index or global secondary index on the table.
:type select: string
:param select: The attributes to be returned in the result. You can
retrieve all item attributes, specific item attributes, the count
of matching items, or in the case of an index, some or all of the
attributes projected into the index.
+ `ALL_ATTRIBUTES` - Returns all of the item attributes from the
specified table or index. If you query a local secondary index,
then for each matching item in the index DynamoDB will fetch the
entire item from the parent table. If the index is configured to
project all item attributes, then all of the data can be obtained
from the local secondary index, and no fetching is required.
+ `ALL_PROJECTED_ATTRIBUTES` - Allowed only when querying an index.
Retrieves all attributes that have been projected into the index.
If the index is configured to project all attributes, this return
value is equivalent to specifying `ALL_ATTRIBUTES`.
+ `COUNT` - Returns the number of matching items, rather than the
matching items themselves.
+ `SPECIFIC_ATTRIBUTES` - Returns only the attributes listed in
AttributesToGet . This return value is equivalent to specifying
AttributesToGet without specifying any value for Select . If you
query a local secondary index and request only attributes that are
projected into that index, the operation will read only the index
and not the table. If any of the requested attributes are not
projected into the local secondary index, DynamoDB will fetch each
of these attributes from the parent table. This extra fetching
incurs additional throughput cost and latency. If you query a
global secondary index, you can only request attributes that are
projected into the index. Global secondary index queries cannot
fetch attributes from the parent table.
If neither Select nor AttributesToGet are specified, DynamoDB defaults
to `ALL_ATTRIBUTES` when accessing a table, and
`ALL_PROJECTED_ATTRIBUTES` when accessing an index. You cannot use
both Select and AttributesToGet together in a single request,
unless the value for Select is `SPECIFIC_ATTRIBUTES`. (This usage
is equivalent to specifying AttributesToGet without any value for
Select .)
:type attributes_to_get: list
:param attributes_to_get:
There is a newer parameter available. Use ProjectionExpression instead.
Note that if you use AttributesToGet and ProjectionExpression at
the same time, DynamoDB will return a ValidationException
exception.
This parameter allows you to retrieve lists or maps; however, it cannot
retrieve individual list or map elements.
The names of one or more attributes to retrieve. If no attribute names
are specified, then all attributes will be returned. If any of the
requested attributes are not found, they will not appear in the
result.
Note that AttributesToGet has no effect on provisioned throughput
consumption. DynamoDB determines capacity units consumed based on
item size, not on the amount of data that is returned to an
application.
You cannot use both AttributesToGet and Select together in a Query
request, unless the value for Select is `SPECIFIC_ATTRIBUTES`.
(This usage is equivalent to specifying AttributesToGet without any
value for Select .)
If you query a local secondary index and request only attributes that
are projected into that index, the operation will read only the
index and not the table. If any of the requested attributes are not
projected into the local secondary index, DynamoDB will fetch each
of these attributes from the parent table. This extra fetching
incurs additional throughput cost and latency.
If you query a global secondary index, you can only request attributes
that are projected into the index. Global secondary index queries
cannot fetch attributes from the parent table.
:type limit: integer
:param limit: The maximum number of items to evaluate (not necessarily
the number of matching items). If DynamoDB processes the number of
items up to the limit while processing the results, it stops the
operation and returns the matching values up to that point, and a
key in LastEvaluatedKey to apply in a subsequent operation, so that
you can pick up where you left off. Also, if the processed data set
size exceeds 1 MB before DynamoDB reaches this limit, it stops the
operation and returns the matching values up to the limit, and a
key in LastEvaluatedKey to apply in a subsequent operation to
continue the operation. For more information, see `Query and Scan`_
in the Amazon DynamoDB Developer Guide .
:type consistent_read: boolean
:param consistent_read: A value that if set to `True`, then the
operation uses strongly consistent reads; otherwise, eventually
consistent reads are used.
Strongly consistent reads are not supported on global secondary
indexes. If you query a global secondary index with ConsistentRead
set to `True`, you will receive an error message.
:type key_conditions: map
:param key_conditions: The selection criteria for the query. For a
query on a table, you can have conditions only on the table primary
key attributes. You must specify the hash key attribute name and
value as an `EQ` condition. You can optionally specify a second
condition, referring to the range key attribute. If you do not
specify a range key condition, all items under the hash key will be
fetched and processed. Any filters will applied after this.
For a query on an index, you can have conditions only on the index key
attributes. You must specify the index hash attribute name and
value as an EQ condition. You can optionally specify a second
condition, referring to the index key range attribute.
Each KeyConditions element consists of an attribute name to compare,
along with the following:
+ AttributeValueList - One or more values to evaluate against the
supplied attribute. The number of values in the list depends on the
ComparisonOperator being used. For type Number, value comparisons
are numeric. String value comparisons for greater than, equals, or
less than are based on ASCII character code values. For example,
`a` is greater than `A`, and `a` is greater than `B`. For a list of
code values, see
`http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_.
For Binary, DynamoDB treats each byte of the binary data as
unsigned when it compares binary values, for example when
evaluating query expressions.
+ ComparisonOperator - A comparator for evaluating attributes, for
example, equals, greater than, less than, and so on. For
KeyConditions , only the following comparison operators are
supported: `EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN` The
following are descriptions of these comparison operators.
+ `EQ` : Equal. AttributeValueList can contain only one AttributeValue
of type String, Number, or Binary (not a set type). If an item
contains an AttributeValue element of a different type than the one
specified in the request, the value does not match. For example,
`{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not
equal `{"NS":["6", "2", "1"]}`.
+ `LE` : Less than or equal. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If an item contains an AttributeValue element of a different
type than the one specified in the request, the value does not
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `LT` : Less than. AttributeValueList can contain only one
AttributeValue of type String, Number, or Binary (not a set type).
If an item contains an AttributeValue element of a different type
than the one specified in the request, the value does not match.
For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `GE` : Greater than or equal. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If an item contains an AttributeValue element of a different
type than the one specified in the request, the value does not
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `GT` : Greater than. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If an item contains an AttributeValue element of a different
type than the one specified in the request, the value does not
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `BEGINS_WITH` : Checks for a prefix. AttributeValueList can contain
only one AttributeValue of type String or Binary (not a Number or a
set type). The target attribute of the comparison must be of type
String or Binary (not a Number or a set type). > <li>
+ `BETWEEN` : Greater than or equal to the first value, and less than
or equal to the second value. AttributeValueList must contain two
AttributeValue elements of the same type, either String, Number, or
Binary (not a set type). A target attribute matches if the target
value is greater than, or equal to, the first element and less
than, or equal to, the second element. If an item contains an
AttributeValue element of a different type than the one specified
in the request, the value does not match. For example, `{"S":"6"}`
does not compare to `{"N":"6"}`. Also, `{"N":"6"}` does not compare
to `{"NS":["6", "2", "1"]}`
For usage examples of AttributeValueList and ComparisonOperator , see
`Legacy Conditional Parameters`_ in the Amazon DynamoDB Developer
Guide .
:type query_filter: map
:param query_filter:
There is a newer parameter available. Use FilterExpression instead.
Note that if you use QueryFilter and FilterExpression at the same
time, DynamoDB will return a ValidationException exception.
This parameter does not support lists or maps.
A condition that evaluates the query results after the items are read
and returns only the desired values.
Query filters are applied after the items are read, so they do not
limit the capacity used.
If you specify more than one condition in the QueryFilter map, then by
default all of the conditions must evaluate to true. In other
words, the conditions are ANDed together. (You can use the
ConditionalOperator parameter to OR the conditions instead. If you
do this, then at least one of the conditions must evaluate to true,
rather than all of them.)
QueryFilter does not allow key attributes. You cannot define a filter
condition on a hash key or range key.
Each QueryFilter element consists of an attribute name to compare,
along with the following:
+ AttributeValueList - One or more values to evaluate against the
supplied attribute. The number of values in the list depends on the
operator specified in ComparisonOperator . For type Number, value
comparisons are numeric. String value comparisons for greater than,
equals, or less than are based on ASCII character code values. For
example, `a` is greater than `A`, and `a` is greater than `B`. For
a list of code values, see
`http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_.
For type Binary, DynamoDB treats each byte of the binary data as
unsigned when it compares binary values, for example when
evaluating query expressions. For information on specifying data
types in JSON, see `JSON Data Format`_ in the Amazon DynamoDB
Developer Guide .
+ ComparisonOperator - A comparator for evaluating attributes. For
example, equals, greater than, less than, etc. The following
comparison operators are available: `EQ | NE | LE | LT | GE | GT |
NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN |
BETWEEN` For complete descriptions of all comparison operators, see
`API_Condition.html`_.
:type conditional_operator: string
:param conditional_operator:
This parameter does not support lists or maps.
A logical operator to apply to the conditions in the QueryFilter map:
+ `AND` - If all of the conditions evaluate to true, then the entire
map evaluates to true.
+ `OR` - If at least one of the conditions evaluate to true, then the
entire map evaluates to true.
If you omit ConditionalOperator , then `AND` is the default.
The operation will succeed only if the entire map evaluates to true.
:type scan_index_forward: boolean
:param scan_index_forward: A value that specifies ascending (true) or
descending (false) traversal of the index. DynamoDB returns results
reflecting the requested order determined by the range key. If the
data type is Number, the results are returned in numeric order. For
type String, the results are returned in order of ASCII character
code values. For type Binary, DynamoDB treats each byte of the
binary data as unsigned when it compares binary values.
If ScanIndexForward is not specified, the results are returned in
ascending order.
:type exclusive_start_key: map
:param exclusive_start_key: The primary key of the first item that this
operation will evaluate. Use the value that was returned for
LastEvaluatedKey in the previous operation.
The data type for ExclusiveStartKey must be String, Number or Binary.
No set data types are allowed.
:type return_consumed_capacity: string
:param return_consumed_capacity: A value that if set to `TOTAL`, the
response includes ConsumedCapacity data for tables and indexes. If
set to `INDEXES`, the response includes ConsumedCapacity for
indexes. If set to `NONE` (the default), ConsumedCapacity is not
included in the response.
:type projection_expression: string
:param projection_expression: A string that identifies one or more
attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in
the expression must be separated by commas.
If no attribute names are specified, then all attributes will be
returned. If any of the requested attributes are not found, they
will not appear in the result.
For more information on projection expressions, go to `Accessing Item
Attributes`_ in the Amazon DynamoDB Developer Guide .
:type filter_expression: string
:param filter_expression: A condition that evaluates the query results
after the items are read and returns only the desired values.
The condition you specify is applied to the items queried; any items
that do not match the expression are not returned.
Filter expressions are applied after the items are read, so they do not
limit the capacity used.
A FilterExpression has the same syntax as a ConditionExpression . For
more information on expression syntax, go to `Specifying
Conditions`_ in the Amazon DynamoDB Developer Guide .
:type expression_attribute_names: map
:param expression_attribute_names: One or more substitution tokens for
simplifying complex expressions. The following are some use cases
for using ExpressionAttributeNames :
+ To shorten an attribute name that is very long or unwieldy in an
expression.
+ To create a placeholder for repeating occurrences of an attribute
name in an expression.
+ To prevent special characters in an attribute name from being
misinterpreted in an expression.
Use the **#** character in an expression to dereference an attribute
name. For example, consider the following expression:
+ `order.customerInfo.LastName = "Smith" OR order.customerInfo.LastName
= "Jones"`
Now suppose that you specified the following for
ExpressionAttributeNames :
+ `{"#name":"order.customerInfo.LastName"}`
The expression can now be simplified as follows:
+ `#name = "Smith" OR #name = "Jones"`
For more information on expression attribute names, go to `Accessing
Item Attributes`_ in the Amazon DynamoDB Developer Guide .
:type expression_attribute_values: map
:param expression_attribute_values: One or more values that can be
substituted in an expression.
Use the **:** (colon) character in an expression to dereference an
attribute value. For example, suppose that you wanted to check
whether the value of the ProductStatus attribute was one of the
following:
`Available | Backordered | Discontinued`
You would first need to specify ExpressionAttributeValues as follows:
`{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
":disc":{"S":"Discontinued"} }`
You could then use these values in an expression, such as this:
`ProductStatus IN (:avail, :back, :disc)`
For more information on expression attribute values, go to `Specifying
Conditions`_ in the Amazon DynamoDB Developer Guide .
"""
params = {
'TableName': table_name,
'KeyConditions': key_conditions,
}
if index_name is not None:
params['IndexName'] = index_name
if select is not None:
params['Select'] = select
if attributes_to_get is not None:
params['AttributesToGet'] = attributes_to_get
if limit is not None:
params['Limit'] = limit
if consistent_read is not None:
params['ConsistentRead'] = consistent_read
if query_filter is not None:
params['QueryFilter'] = query_filter
if conditional_operator is not None:
params['ConditionalOperator'] = conditional_operator
if scan_index_forward is not None:
params['ScanIndexForward'] = scan_index_forward
if exclusive_start_key is not None:
params['ExclusiveStartKey'] = exclusive_start_key
if return_consumed_capacity is not None:
params['ReturnConsumedCapacity'] = return_consumed_capacity
if projection_expression is not None:
params['ProjectionExpression'] = projection_expression
if filter_expression is not None:
params['FilterExpression'] = filter_expression
if expression_attribute_names is not None:
params['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values is not None:
params['ExpressionAttributeValues'] = expression_attribute_values
return self.make_request(action='Query',
body=json.dumps(params))
def scan(self, table_name, attributes_to_get=None, limit=None,
select=None, scan_filter=None, conditional_operator=None,
exclusive_start_key=None, return_consumed_capacity=None,
total_segments=None, segment=None, projection_expression=None,
filter_expression=None, expression_attribute_names=None,
expression_attribute_values=None):
"""
The Scan operation returns one or more items and item
attributes by accessing every item in the table. To have
DynamoDB return fewer items, you can provide a ScanFilter
operation.
If the total number of scanned items exceeds the maximum data
set size limit of 1 MB, the scan stops and results are
returned to the user as a LastEvaluatedKey value to continue
the scan in a subsequent operation. The results also include
the number of items exceeding the limit. A scan can result in
no table data meeting the filter criteria.
The result set is eventually consistent.
By default, Scan operations proceed sequentially; however, for
faster performance on large tables, applications can request a
parallel Scan operation by specifying the Segment and
TotalSegments parameters. For more information, see `Parallel
Scan`_ in the Amazon DynamoDB Developer Guide .
:type table_name: string
:param table_name: The name of the table containing the requested
items.
:type attributes_to_get: list
:param attributes_to_get:
There is a newer parameter available. Use ProjectionExpression instead.
Note that if you use AttributesToGet and ProjectionExpression at
the same time, DynamoDB will return a ValidationException
exception.
This parameter allows you to retrieve lists or maps; however, it cannot
retrieve individual list or map elements.
The names of one or more attributes to retrieve. If no attribute names
are specified, then all attributes will be returned. If any of the
requested attributes are not found, they will not appear in the
result.
Note that AttributesToGet has no effect on provisioned throughput
consumption. DynamoDB determines capacity units consumed based on
item size, not on the amount of data that is returned to an
application.
:type limit: integer
:param limit: The maximum number of items to evaluate (not necessarily
the number of matching items). If DynamoDB processes the number of
items up to the limit while processing the results, it stops the
operation and returns the matching values up to that point, and a
key in LastEvaluatedKey to apply in a subsequent operation, so that
you can pick up where you left off. Also, if the processed data set
size exceeds 1 MB before DynamoDB reaches this limit, it stops the
operation and returns the matching values up to the limit, and a
key in LastEvaluatedKey to apply in a subsequent operation to
continue the operation. For more information, see `Query and Scan`_
in the Amazon DynamoDB Developer Guide .
:type select: string
:param select: The attributes to be returned in the result. You can
retrieve all item attributes, specific item attributes, or the
count of matching items.
+ `ALL_ATTRIBUTES` - Returns all of the item attributes.
+ `COUNT` - Returns the number of matching items, rather than the
matching items themselves.
+ `SPECIFIC_ATTRIBUTES` - Returns only the attributes listed in
AttributesToGet . This return value is equivalent to specifying
AttributesToGet without specifying any value for Select .
If neither Select nor AttributesToGet are specified, DynamoDB defaults
to `ALL_ATTRIBUTES`. You cannot use both AttributesToGet and Select
together in a single request, unless the value for Select is
`SPECIFIC_ATTRIBUTES`. (This usage is equivalent to specifying
AttributesToGet without any value for Select .)
:type scan_filter: map
:param scan_filter:
There is a newer parameter available. Use FilterExpression instead.
Note that if you use ScanFilter and FilterExpression at the same
time, DynamoDB will return a ValidationException exception.
This parameter does not support lists or maps.
A condition that evaluates the scan results and returns only the
desired values.
If you specify more than one condition in the ScanFilter map, then by
default all of the conditions must evaluate to true. In other
words, the conditions are ANDed together. (You can use the
ConditionalOperator parameter to OR the conditions instead. If you
do this, then at least one of the conditions must evaluate to true,
rather than all of them.)
Each ScanFilter element consists of an attribute name to compare, along
with the following:
+ AttributeValueList - One or more values to evaluate against the
supplied attribute. The number of values in the list depends on the
operator specified in ComparisonOperator . For type Number, value
comparisons are numeric. String value comparisons for greater than,
equals, or less than are based on ASCII character code values. For
example, `a` is greater than `A`, and `a` is greater than `B`. For
a list of code values, see
`http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_.
For Binary, DynamoDB treats each byte of the binary data as
unsigned when it compares binary values, for example when
evaluating query expressions. For information on specifying data
types in JSON, see `JSON Data Format`_ in the Amazon DynamoDB
Developer Guide .
+ ComparisonOperator - A comparator for evaluating attributes. For
example, equals, greater than, less than, etc. The following
comparison operators are available: `EQ | NE | LE | LT | GE | GT |
NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN |
BETWEEN` For complete descriptions of all comparison operators, see
`Condition`_.
:type conditional_operator: string
:param conditional_operator:
There is a newer parameter available. Use ConditionExpression instead.
Note that if you use ConditionalOperator and ConditionExpression at
the same time, DynamoDB will return a ValidationException
exception.
This parameter does not support lists or maps.
A logical operator to apply to the conditions in the ScanFilter map:
+ `AND` - If all of the conditions evaluate to true, then the entire
map evaluates to true.
+ `OR` - If at least one of the conditions evaluate to true, then the
entire map evaluates to true.
If you omit ConditionalOperator , then `AND` is the default.
The operation will succeed only if the entire map evaluates to true.
:type exclusive_start_key: map
:param exclusive_start_key: The primary key of the first item that this
operation will evaluate. Use the value that was returned for
LastEvaluatedKey in the previous operation.
The data type for ExclusiveStartKey must be String, Number or Binary.
No set data types are allowed.
In a parallel scan, a Scan request that includes ExclusiveStartKey must
specify the same segment whose previous Scan returned the
corresponding value of LastEvaluatedKey .
:type return_consumed_capacity: string
:param return_consumed_capacity: A value that if set to `TOTAL`, the
response includes ConsumedCapacity data for tables and indexes. If
set to `INDEXES`, the response includes ConsumedCapacity for
indexes. If set to `NONE` (the default), ConsumedCapacity is not
included in the response.
:type total_segments: integer
:param total_segments: For a parallel Scan request, TotalSegments
represents the total number of segments into which the Scan
operation will be divided. The value of TotalSegments corresponds
to the number of application workers that will perform the parallel
scan. For example, if you want to scan a table using four
application threads, specify a TotalSegments value of 4.
The value for TotalSegments must be greater than or equal to 1, and
less than or equal to 1000000. If you specify a TotalSegments value
of 1, the Scan operation will be sequential rather than parallel.
If you specify TotalSegments , you must also specify Segment .
:type segment: integer
:param segment: For a parallel Scan request, Segment identifies an
individual segment to be scanned by an application worker.
Segment IDs are zero-based, so the first segment is always 0. For
example, if you want to scan a table using four application
threads, the first thread specifies a Segment value of 0, the
second thread specifies 1, and so on.
The value of LastEvaluatedKey returned from a parallel Scan request
must be used as ExclusiveStartKey with the same segment ID in a
subsequent Scan operation.
The value for Segment must be greater than or equal to 0, and less than
the value provided for TotalSegments .
If you specify Segment , you must also specify TotalSegments .
:type projection_expression: string
:param projection_expression: A string that identifies one or more
attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in
the expression must be separated by commas.
If no attribute names are specified, then all attributes will be
returned. If any of the requested attributes are not found, they
will not appear in the result.
For more information on projection expressions, go to `Accessing Item
Attributes`_ in the Amazon DynamoDB Developer Guide .
:type filter_expression: string
:param filter_expression: A condition that evaluates the scan results
and returns only the desired values.
The condition you specify is applied to the items scanned; any items
that do not match the expression are not returned.
:type expression_attribute_names: map
:param expression_attribute_names: One or more substitution tokens for
simplifying complex expressions. The following are some use cases
for using ExpressionAttributeNames :
+ To shorten an attribute name that is very long or unwieldy in an
expression.
+ To create a placeholder for repeating occurrences of an attribute
name in an expression.
+ To prevent special characters in an attribute name from being
misinterpreted in an expression.
Use the **#** character in an expression to dereference an attribute
name. For example, consider the following expression:
+ `order.customerInfo.LastName = "Smith" OR order.customerInfo.LastName
= "Jones"`
Now suppose that you specified the following for
ExpressionAttributeNames :
+ `{"#name":"order.customerInfo.LastName"}`
The expression can now be simplified as follows:
+ `#name = "Smith" OR #name = "Jones"`
For more information on expression attribute names, go to `Accessing
Item Attributes`_ in the Amazon DynamoDB Developer Guide .
:type expression_attribute_values: map
:param expression_attribute_values: One or more values that can be
substituted in an expression.
Use the **:** (colon) character in an expression to dereference an
attribute value. For example, suppose that you wanted to check
whether the value of the ProductStatus attribute was one of the
following:
`Available | Backordered | Discontinued`
You would first need to specify ExpressionAttributeValues as follows:
`{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
":disc":{"S":"Discontinued"} }`
You could then use these values in an expression, such as this:
`ProductStatus IN (:avail, :back, :disc)`
For more information on expression attribute values, go to `Specifying
Conditions`_ in the Amazon DynamoDB Developer Guide .
"""
params = {'TableName': table_name, }
if attributes_to_get is not None:
params['AttributesToGet'] = attributes_to_get
if limit is not None:
params['Limit'] = limit
if select is not None:
params['Select'] = select
if scan_filter is not None:
params['ScanFilter'] = scan_filter
if conditional_operator is not None:
params['ConditionalOperator'] = conditional_operator
if exclusive_start_key is not None:
params['ExclusiveStartKey'] = exclusive_start_key
if return_consumed_capacity is not None:
params['ReturnConsumedCapacity'] = return_consumed_capacity
if total_segments is not None:
params['TotalSegments'] = total_segments
if segment is not None:
params['Segment'] = segment
if projection_expression is not None:
params['ProjectionExpression'] = projection_expression
if filter_expression is not None:
params['FilterExpression'] = filter_expression
if expression_attribute_names is not None:
params['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values is not None:
params['ExpressionAttributeValues'] = expression_attribute_values
return self.make_request(action='Scan',
body=json.dumps(params))
def update_item(self, table_name, key, attribute_updates=None,
expected=None, conditional_operator=None,
return_values=None, return_consumed_capacity=None,
return_item_collection_metrics=None,
update_expression=None, condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None):
"""
Edits an existing item's attributes, or adds a new item to the
table if it does not already exist. You can put, delete, or
add attribute values. You can also perform a conditional
update (insert a new attribute name-value pair if it doesn't
exist, or replace an existing name-value pair if it has
certain expected attribute values).
You can also return the item's attribute values in the same
UpdateItem operation using the ReturnValues parameter.
:type table_name: string
:param table_name: The name of the table containing the item to update.
:type key: map
:param key: The primary key of the item to be updated. Each element
consists of an attribute name and a value for that attribute.
For the primary key, you must provide all of the attributes. For
example, with a hash type primary key, you only need to specify the
hash attribute. For a hash-and-range type primary key, you must
specify both the hash attribute and the range attribute.
:type attribute_updates: map
:param attribute_updates:
There is a newer parameter available. Use UpdateExpression instead.
Note that if you use AttributeUpdates and UpdateExpression at the
same time, DynamoDB will return a ValidationException exception.
This parameter can be used for modifying top-level attributes; however,
it does not support individual list or map elements.
The names of attributes to be modified, the action to perform on each,
and the new value for each. If you are updating an attribute that
is an index key attribute for any indexes on that table, the
attribute type must match the index key type defined in the
AttributesDefinition of the table description. You can use
UpdateItem to update any nonkey attributes.
Attribute values cannot be null. String and Binary type attributes must
have lengths greater than zero. Set type attributes must not be
empty. Requests with empty values will be rejected with a
ValidationException exception.
Each AttributeUpdates element consists of an attribute name to modify,
along with the following:
+ Value - The new value, if applicable, for this attribute.
+ Action - A value that specifies how to perform the update. This
action is only valid for an existing attribute whose data type is
Number or is a set; do not use `ADD` for other data types. If an
item with the specified primary key is found in the table, the
following values perform the following actions:
+ `PUT` - Adds the specified attribute to the item. If the attribute
already exists, it is replaced by the new value.
+ `DELETE` - Removes the attribute and its value, if no value is
specified for `DELETE`. The data type of the specified value must
match the existing value's data type. If a set of values is
specified, then those values are subtracted from the old set. For
example, if the attribute value was the set `[a,b,c]` and the
`DELETE` action specifies `[a,c]`, then the final attribute value
is `[b]`. Specifying an empty set is an error.
+ `ADD` - Adds the specified value to the item, if the attribute does
not already exist. If the attribute does exist, then the behavior
of `ADD` depends on the data type of the attribute:
+ If the existing attribute is a number, and if Value is also a number,
then Value is mathematically added to the existing attribute. If
Value is a negative number, then it is subtracted from the existing
attribute. If you use `ADD` to increment or decrement a number
value for an item that doesn't exist before the update, DynamoDB
uses 0 as the initial value. Similarly, if you use `ADD` for an
existing item to increment or decrement an attribute value that
doesn't exist before the update, DynamoDB uses `0` as the initial
value. For example, suppose that the item you want to update
doesn't have an attribute named itemcount , but you decide to `ADD`
the number `3` to this attribute anyway. DynamoDB will create the
itemcount attribute, set its initial value to `0`, and finally add
`3` to it. The result will be a new itemcount attribute, with a
value of `3`.
+ If the existing data type is a set, and if Value is also a set, then
Value is appended to the existing set. For example, if the
attribute value is the set `[1,2]`, and the `ADD` action specified
`[3]`, then the final attribute value is `[1,2,3]`. An error occurs
if an `ADD` action is specified for a set attribute and the
attribute type specified does not match the existing set type. Both
sets must have the same primitive data type. For example, if the
existing data type is a set of strings, Value must also be a set of
strings.
If no item with the specified key is found in the table, the following
values perform the following actions:
+ `PUT` - Causes DynamoDB to create a new item with the specified
primary key, and then adds the attribute.
+ `DELETE` - Nothing happens, because attributes cannot be deleted from
a nonexistent item. The operation succeeds, but DynamoDB does not
create a new item.
+ `ADD` - Causes DynamoDB to create an item with the supplied primary
key and number (or set of numbers) for the attribute value. The
only data types allowed are Number and Number Set.
If you specify any attributes that are part of an index key, then the
data types for those attributes must match those of the schema in
the table's attribute definition.
:type expected: map
:param expected:
There is a newer parameter available. Use ConditionExpression instead.
Note that if you use Expected and ConditionExpression at the same
time, DynamoDB will return a ValidationException exception.
This parameter does not support lists or maps.
A map of attribute/condition pairs. Expected provides a conditional
block for the UpdateItem operation.
Each element of Expected consists of an attribute name, a comparison
operator, and one or more values. DynamoDB compares the attribute
with the value(s) you supplied, using the comparison operator. For
each Expected element, the result of the evaluation is either true
or false.
If you specify more than one element in the Expected map, then by
default all of the conditions must evaluate to true. In other
words, the conditions are ANDed together. (You can use the
ConditionalOperator parameter to OR the conditions instead. If you
do this, then at least one of the conditions must evaluate to true,
rather than all of them.)
If the Expected map evaluates to true, then the conditional operation
succeeds; otherwise, it fails.
Expected contains the following:
+ AttributeValueList - One or more values to evaluate against the
supplied attribute. The number of values in the list depends on the
ComparisonOperator being used. For type Number, value comparisons
are numeric. String value comparisons for greater than, equals, or
less than are based on ASCII character code values. For example,
`a` is greater than `A`, and `a` is greater than `B`. For a list of
code values, see
`http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_.
For type Binary, DynamoDB treats each byte of the binary data as
unsigned when it compares binary values, for example when
evaluating query expressions.
+ ComparisonOperator - A comparator for evaluating attributes in the
AttributeValueList . When performing the comparison, DynamoDB uses
strongly consistent reads. The following comparison operators are
available: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL |
CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` The following
are descriptions of each comparison operator.
+ `EQ` : Equal. `EQ` is supported for all datatypes, including lists
and maps. AttributeValueList can contain only one AttributeValue
element of type String, Number, Binary, String Set, Number Set, or
Binary Set. If an item contains an AttributeValue element of a
different type than the one specified in the request, the value
does not match. For example, `{"S":"6"}` does not equal
`{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2",
"1"]}`. > <li>
+ `NE` : Not equal. `NE` is supported for all datatypes, including
lists and maps. AttributeValueList can contain only one
AttributeValue of type String, Number, Binary, String Set, Number
Set, or Binary Set. If an item contains an AttributeValue of a
different type than the one specified in the request, the value
does not match. For example, `{"S":"6"}` does not equal
`{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2",
"1"]}`. > <li>
+ `LE` : Less than or equal. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If an item contains an AttributeValue element of a different
type than the one specified in the request, the value does not
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `LT` : Less than. AttributeValueList can contain only one
AttributeValue of type String, Number, or Binary (not a set type).
If an item contains an AttributeValue element of a different type
than the one specified in the request, the value does not match.
For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `GE` : Greater than or equal. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If an item contains an AttributeValue element of a different
type than the one specified in the request, the value does not
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `GT` : Greater than. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If an item contains an AttributeValue element of a different
type than the one specified in the request, the value does not
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `NOT_NULL` : The attribute exists. `NOT_NULL` is supported for all
datatypes, including lists and maps. This operator tests for the
existence of an attribute, not its data type. If the data type of
attribute " `a`" is null, and you evaluate it using `NOT_NULL`, the
result is a Boolean true . This result is because the attribute "
`a`" exists; its data type is not relevant to the `NOT_NULL`
comparison operator.
+ `NULL` : The attribute does not exist. `NULL` is supported for all
datatypes, including lists and maps. This operator tests for the
nonexistence of an attribute, not its data type. If the data type
of attribute " `a`" is null, and you evaluate it using `NULL`, the
result is a Boolean false . This is because the attribute " `a`"
exists; its data type is not relevant to the `NULL` comparison
operator.
+ `CONTAINS` : Checks for a subsequence, or value in a set.
AttributeValueList can contain only one AttributeValue element of
type String, Number, or Binary (not a set type). If the target
attribute of the comparison is of type String, then the operator
checks for a substring match. If the target attribute of the
comparison is of type Binary, then the operator looks for a
subsequence of the target that matches the input. If the target
attribute of the comparison is a set (" `SS`", " `NS`", or "
`BS`"), then the operator evaluates to true if it finds an exact
match with any member of the set. CONTAINS is supported for lists:
When evaluating " `a CONTAINS b`", " `a`" can be a list; however, "
`b`" cannot be a set, a map, or a list.
+ `NOT_CONTAINS` : Checks for absence of a subsequence, or absence of a
value in a set. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If the target attribute of the comparison is a String, then
the operator checks for the absence of a substring match. If the
target attribute of the comparison is Binary, then the operator
checks for the absence of a subsequence of the target that matches
the input. If the target attribute of the comparison is a set ("
`SS`", " `NS`", or " `BS`"), then the operator evaluates to true if
it does not find an exact match with any member of the set.
NOT_CONTAINS is supported for lists: When evaluating " `a NOT
CONTAINS b`", " `a`" can be a list; however, " `b`" cannot be a
set, a map, or a list.
+ `BEGINS_WITH` : Checks for a prefix. AttributeValueList can contain
only one AttributeValue of type String or Binary (not a Number or a
set type). The target attribute of the comparison must be of type
String or Binary (not a Number or a set type). > <li>
+ `IN` : Checks for matching elements within two sets.
AttributeValueList can contain one or more AttributeValue elements
of type String, Number, or Binary (not a set type). These
attributes are compared against an existing set type attribute of
an item. If any elements of the input set are present in the item
attribute, the expression evaluates to true.
+ `BETWEEN` : Greater than or equal to the first value, and less than
or equal to the second value. AttributeValueList must contain two
AttributeValue elements of the same type, either String, Number, or
Binary (not a set type). A target attribute matches if the target
value is greater than, or equal to, the first element and less
than, or equal to, the second element. If an item contains an
AttributeValue element of a different type than the one specified
in the request, the value does not match. For example, `{"S":"6"}`
does not compare to `{"N":"6"}`. Also, `{"N":"6"}` does not compare
to `{"NS":["6", "2", "1"]}`
For usage examples of AttributeValueList and ComparisonOperator , see
`Legacy Conditional Parameters`_ in the Amazon DynamoDB Developer
Guide .
For backward compatibility with previous DynamoDB releases, the
following parameters can be used instead of AttributeValueList and
ComparisonOperator :
+ Value - A value for DynamoDB to compare with an attribute.
+ Exists - A Boolean value that causes DynamoDB to evaluate the value
before attempting the conditional operation:
+ If Exists is `True`, DynamoDB will check to see if that attribute
value already exists in the table. If it is found, then the
condition evaluates to true; otherwise the condition evaluate to
false.
+ If Exists is `False`, DynamoDB assumes that the attribute value does
not exist in the table. If in fact the value does not exist, then
the assumption is valid and the condition evaluates to true. If the
value is found, despite the assumption that it does not exist, the
condition evaluates to false.
Note that the default value for Exists is `True`.
The Value and Exists parameters are incompatible with
AttributeValueList and ComparisonOperator . Note that if you use
both sets of parameters at once, DynamoDB will return a
ValidationException exception.
:type conditional_operator: string
:param conditional_operator:
There is a newer parameter available. Use ConditionExpression instead.
Note that if you use ConditionalOperator and ConditionExpression at
the same time, DynamoDB will return a ValidationException
exception.
This parameter does not support lists or maps.
A logical operator to apply to the conditions in the Expected map:
+ `AND` - If all of the conditions evaluate to true, then the entire
map evaluates to true.
+ `OR` - If at least one of the conditions evaluate to true, then the
entire map evaluates to true.
If you omit ConditionalOperator , then `AND` is the default.
The operation will succeed only if the entire map evaluates to true.
:type return_values: string
:param return_values:
Use ReturnValues if you want to get the item attributes as they
appeared either before or after they were updated. For UpdateItem ,
the valid values are:
+ `NONE` - If ReturnValues is not specified, or if its value is `NONE`,
then nothing is returned. (This setting is the default for
ReturnValues .)
+ `ALL_OLD` - If UpdateItem overwrote an attribute name-value pair,
then the content of the old item is returned.
+ `UPDATED_OLD` - The old versions of only the updated attributes are
returned.
+ `ALL_NEW` - All of the attributes of the new version of the item are
returned.
+ `UPDATED_NEW` - The new versions of only the updated attributes are
returned.
:type return_consumed_capacity: string
:param return_consumed_capacity: A value that if set to `TOTAL`, the
response includes ConsumedCapacity data for tables and indexes. If
set to `INDEXES`, the response includes ConsumedCapacity for
indexes. If set to `NONE` (the default), ConsumedCapacity is not
included in the response.
:type return_item_collection_metrics: string
:param return_item_collection_metrics: A value that if set to `SIZE`,
the response includes statistics about item collections, if any,
that were modified during the operation are returned in the
response. If set to `NONE` (the default), no statistics are
returned.
:type update_expression: string
:param update_expression: An expression that defines one or more
attributes to be updated, the action to be performed on them, and
new value(s) for them.
The following action values are available for UpdateExpression .
+ `SET` - Adds one or more attributes and values to an item. If any of
these attribute already exist, they are replaced by the new values.
You can also use `SET` to add or subtract from an attribute that is
of type Number. `SET` supports the following functions:
+ `if_not_exists (path, operand)` - if the item does not contain an
attribute at the specified path, then `if_not_exists` evaluates to
operand; otherwise, it evaluates to path. You can use this function
to avoid overwriting an attribute that may already be present in
the item.
+ `list_append (operand, operand)` - evaluates to a list with a new
element added to it. You can append the new element to the start or
the end of the list by reversing the order of the operands.
These function names are case-sensitive.
+ `REMOVE` - Removes one or more attributes from an item.
+ `ADD` - Adds the specified value to the item, if the attribute does
not already exist. If the attribute does exist, then the behavior
of `ADD` depends on the data type of the attribute:
+ If the existing attribute is a number, and if Value is also a number,
then Value is mathematically added to the existing attribute. If
Value is a negative number, then it is subtracted from the existing
attribute. If you use `ADD` to increment or decrement a number
value for an item that doesn't exist before the update, DynamoDB
uses `0` as the initial value. Similarly, if you use `ADD` for an
existing item to increment or decrement an attribute value that
doesn't exist before the update, DynamoDB uses `0` as the initial
value. For example, suppose that the item you want to update
doesn't have an attribute named itemcount , but you decide to `ADD`
the number `3` to this attribute anyway. DynamoDB will create the
itemcount attribute, set its initial value to `0`, and finally add
`3` to it. The result will be a new itemcount attribute in the
item, with a value of `3`.
+ If the existing data type is a set and if Value is also a set, then
Value is added to the existing set. For example, if the attribute
value is the set `[1,2]`, and the `ADD` action specified `[3]`,
then the final attribute value is `[1,2,3]`. An error occurs if an
`ADD` action is specified for a set attribute and the attribute
type specified does not match the existing set type. Both sets must
have the same primitive data type. For example, if the existing
data type is a set of strings, the Value must also be a set of
strings.
The `ADD` action only supports Number and set data types. In addition,
`ADD` can only be used on top-level attributes, not nested
attributes.
+ `DELETE` - Deletes an element from a set. If a set of values is
specified, then those values are subtracted from the old set. For
example, if the attribute value was the set `[a,b,c]` and the
`DELETE` action specifies `[a,c]`, then the final attribute value
is `[b]`. Specifying an empty set is an error. The `DELETE` action
only supports Number and set data types. In addition, `DELETE` can
only be used on top-level attributes, not nested attributes.
You can have many actions in a single expression, such as the
following: `SET a=:value1, b=:value2 DELETE :value3, :value4,
:value5`
For more information on update expressions, go to `Modifying Items and
Attributes`_ in the Amazon DynamoDB Developer Guide .
:type condition_expression: string
:param condition_expression: A condition that must be satisfied in
order for a conditional update to succeed.
An expression can contain any of the following:
+ Boolean functions: `attribute_exists | attribute_not_exists |
contains | begins_with` These function names are case-sensitive.
+ Comparison operators: ` = | <> | < | > | <=
| >= | BETWEEN | IN`
+ Logical operators: `AND | OR | NOT`
For more information on condition expressions, go to `Specifying
Conditions`_ in the Amazon DynamoDB Developer Guide .
:type expression_attribute_names: map
:param expression_attribute_names: One or more substitution tokens for
simplifying complex expressions. The following are some use cases
for using ExpressionAttributeNames :
+ To shorten an attribute name that is very long or unwieldy in an
expression.
+ To create a placeholder for repeating occurrences of an attribute
name in an expression.
+ To prevent special characters in an attribute name from being
misinterpreted in an expression.
Use the **#** character in an expression to dereference an attribute
name. For example, consider the following expression:
+ `order.customerInfo.LastName = "Smith" OR order.customerInfo.LastName
= "Jones"`
Now suppose that you specified the following for
ExpressionAttributeNames :
+ `{"#name":"order.customerInfo.LastName"}`
The expression can now be simplified as follows:
+ `#name = "Smith" OR #name = "Jones"`
For more information on expression attribute names, go to `Accessing
Item Attributes`_ in the Amazon DynamoDB Developer Guide .
:type expression_attribute_values: map
:param expression_attribute_values: One or more values that can be
substituted in an expression.
Use the **:** (colon) character in an expression to dereference an
attribute value. For example, suppose that you wanted to check
whether the value of the ProductStatus attribute was one of the
following:
`Available | Backordered | Discontinued`
You would first need to specify ExpressionAttributeValues as follows:
`{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
":disc":{"S":"Discontinued"} }`
You could then use these values in an expression, such as this:
`ProductStatus IN (:avail, :back, :disc)`
For more information on expression attribute values, go to `Specifying
Conditions`_ in the Amazon DynamoDB Developer Guide .
"""
params = {'TableName': table_name, 'Key': key, }
if attribute_updates is not None:
params['AttributeUpdates'] = attribute_updates
if expected is not None:
params['Expected'] = expected
if conditional_operator is not None:
params['ConditionalOperator'] = conditional_operator
if return_values is not None:
params['ReturnValues'] = return_values
if return_consumed_capacity is not None:
params['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics is not None:
params['ReturnItemCollectionMetrics'] = return_item_collection_metrics
if update_expression is not None:
params['UpdateExpression'] = update_expression
if condition_expression is not None:
params['ConditionExpression'] = condition_expression
if expression_attribute_names is not None:
params['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values is not None:
params['ExpressionAttributeValues'] = expression_attribute_values
return self.make_request(action='UpdateItem',
body=json.dumps(params))
def update_table(self, table_name, provisioned_throughput=None,
global_secondary_index_updates=None,
attribute_definitions=None):
"""
Updates the provisioned throughput for the given table, or
manages the global secondary indexes on the table.
You can increase or decrease the table's provisioned
throughput values within the maximums and minimums listed in
the `Limits`_ section in the Amazon DynamoDB Developer Guide .
In addition, you can use UpdateTable to add, modify or delete
global secondary indexes on the table. For more information,
see `Managing Global Secondary Indexes`_ in the Amazon
DynamoDB Developer Guide .
The table must be in the `ACTIVE` state for UpdateTable to
succeed. UpdateTable is an asynchronous operation; while
executing the operation, the table is in the `UPDATING` state.
While the table is in the `UPDATING` state, the table still
has the provisioned throughput from before the call. The
table's new provisioned throughput settings go into effect
when the table returns to the `ACTIVE` state; at that point,
the UpdateTable operation is complete.
:type attribute_definitions: list
:param attribute_definitions: An array of attributes that describe the
key schema for the table and indexes. If you are adding a new
global secondary index to the table, AttributeDefinitions must
include the key element(s) of the new index.
:type table_name: string
:param table_name: The name of the table to be updated.
:type provisioned_throughput: dict
:param provisioned_throughput: Represents the provisioned throughput
settings for a specified table or index. The settings can be
modified using the UpdateTable operation.
For current minimum and maximum provisioned throughput values, see
`Limits`_ in the Amazon DynamoDB Developer Guide .
:type global_secondary_index_updates: list
:param global_secondary_index_updates:
An array of one or more global secondary indexes for the table. For
each index in the array, you can specify one action:
+ Create - add a new global secondary index to the table.
+ Update - modify the provisioned throughput settings of an existing
global secondary index.
+ Delete - remove a global secondary index from the table.
"""
params = {'TableName': table_name, }
if attribute_definitions is not None:
params['AttributeDefinitions'] = attribute_definitions
if provisioned_throughput is not None:
params['ProvisionedThroughput'] = provisioned_throughput
if global_secondary_index_updates is not None:
params['GlobalSecondaryIndexUpdates'] = global_secondary_index_updates
return self.make_request(action='UpdateTable',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.host,
'Content-Type': 'application/x-amz-json-1.0',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body, host=self.host)
response = self._mexe(http_request, sender=None,
override_num_retries=self.NumberRetries,
retry_handler=self._retry_handler)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
def _retry_handler(self, response, i, next_sleep):
status = None
boto.log.debug("Saw HTTP status: %s" % response.status)
if response.status == 400:
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
data = json.loads(response_body)
if 'ProvisionedThroughputExceededException' in data.get('__type'):
self.throughput_exceeded_events += 1
msg = "%s, retry attempt %s" % (
'ProvisionedThroughputExceededException',
i
)
next_sleep = self._truncated_exponential_time(i)
i += 1
status = (msg, i, next_sleep)
if i == self.NumberRetries:
# If this was our last retry attempt, raise
# a specific error saying that the throughput
# was exceeded.
raise exceptions.ProvisionedThroughputExceededException(
response.status, response.reason, data)
elif 'ConditionalCheckFailedException' in data.get('__type'):
raise exceptions.ConditionalCheckFailedException(
response.status, response.reason, data)
elif 'ValidationException' in data.get('__type'):
raise exceptions.ValidationException(
response.status, response.reason, data)
else:
raise self.ResponseError(response.status, response.reason,
data)
expected_crc32 = response.getheader('x-amz-crc32')
if self._validate_checksums and expected_crc32 is not None:
boto.log.debug('Validating crc32 checksum for body: %s',
response.read())
actual_crc32 = crc32(response.read()) & 0xffffffff
expected_crc32 = int(expected_crc32)
if actual_crc32 != expected_crc32:
msg = ("The calculated checksum %s did not match the expected "
"checksum %s" % (actual_crc32, expected_crc32))
status = (msg, i + 1, self._truncated_exponential_time(i))
return status
def _truncated_exponential_time(self, i):
if i == 0:
next_sleep = 0
else:
next_sleep = min(0.05 * (2 ** i),
boto.config.get('Boto', 'max_retry_delay', 60))
return next_sleep
|
mit
|
LarryHillyer/PoolHost
|
PoolHost/env/Lib/site-packages/django/core/serializers/pyyaml.py
|
439
|
2843
|
"""
YAML serializer.
Requires PyYaml (http://pyyaml.org/), but that's checked for in __init__.
"""
import collections
import decimal
import sys
from io import StringIO
import yaml
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import (
Deserializer as PythonDeserializer, Serializer as PythonSerializer,
)
from django.db import models
from django.utils import six
# Use the C (faster) implementation if possible
try:
from yaml import CSafeLoader as SafeLoader
from yaml import CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader, SafeDumper
class DjangoSafeDumper(SafeDumper):
def represent_decimal(self, data):
return self.represent_scalar('tag:yaml.org,2002:str', str(data))
def represent_ordered_dict(self, data):
return self.represent_mapping('tag:yaml.org,2002:map', data.items())
DjangoSafeDumper.add_representer(decimal.Decimal, DjangoSafeDumper.represent_decimal)
DjangoSafeDumper.add_representer(collections.OrderedDict, DjangoSafeDumper.represent_ordered_dict)
class Serializer(PythonSerializer):
"""
Convert a queryset to YAML.
"""
internal_use_only = False
def handle_field(self, obj, field):
# A nasty special case: base YAML doesn't support serialization of time
# types (as opposed to dates or datetimes, which it does support). Since
# we want to use the "safe" serializer for better interoperability, we
# need to do something with those pesky times. Converting 'em to strings
# isn't perfect, but it's better than a "!!python/time" type which would
# halt deserialization under any other language.
if isinstance(field, models.TimeField) and getattr(obj, field.name) is not None:
self._current[field.name] = str(getattr(obj, field.name))
else:
super(Serializer, self).handle_field(obj, field)
def end_serialization(self):
yaml.dump(self.objects, self.stream, Dumper=DjangoSafeDumper, **self.options)
def getvalue(self):
# Grand-parent super
return super(PythonSerializer, self).getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of YAML data.
"""
if isinstance(stream_or_string, bytes):
stream_or_string = stream_or_string.decode('utf-8')
if isinstance(stream_or_string, six.string_types):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
try:
for obj in PythonDeserializer(yaml.load(stream, Loader=SafeLoader), **options):
yield obj
except GeneratorExit:
raise
except Exception as e:
# Map to deserializer error
six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
|
gpl-3.0
|
israeleriston/scientific-week
|
backend/venv/lib/python3.5/site-packages/setuptools/namespaces.py
|
196
|
3199
|
import os
from distutils import log
import itertools
from setuptools.extern.six.moves import map
flatten = itertools.chain.from_iterable
class Installer:
nspkg_ext = '-nspkg.pth'
def install_namespaces(self):
nsp = self._get_all_ns_packages()
if not nsp:
return
filename, ext = os.path.splitext(self._get_target())
filename += self.nspkg_ext
self.outputs.append(filename)
log.info("Installing %s", filename)
lines = map(self._gen_nspkg_line, nsp)
if self.dry_run:
# always generate the lines, even in dry run
list(lines)
return
with open(filename, 'wt') as f:
f.writelines(lines)
def uninstall_namespaces(self):
filename, ext = os.path.splitext(self._get_target())
filename += self.nspkg_ext
if not os.path.exists(filename):
return
log.info("Removing %s", filename)
os.remove(filename)
def _get_target(self):
return self.target
_nspkg_tmpl = (
"import sys, types, os",
"has_mfs = sys.version_info > (3, 5)",
"p = os.path.join(%(root)s, *%(pth)r)",
"importlib = has_mfs and __import__('importlib.util')",
"has_mfs and __import__('importlib.machinery')",
"m = has_mfs and "
"sys.modules.setdefault(%(pkg)r, "
"importlib.util.module_from_spec("
"importlib.machinery.PathFinder.find_spec(%(pkg)r, "
"[os.path.dirname(p)])))",
"m = m or "
"sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))",
"mp = (m or []) and m.__dict__.setdefault('__path__',[])",
"(p not in mp) and mp.append(p)",
)
"lines for the namespace installer"
_nspkg_tmpl_multi = (
'm and setattr(sys.modules[%(parent)r], %(child)r, m)',
)
"additional line(s) when a parent package is indicated"
def _get_root(self):
return "sys._getframe(1).f_locals['sitedir']"
def _gen_nspkg_line(self, pkg):
# ensure pkg is not a unicode string under Python 2.7
pkg = str(pkg)
pth = tuple(pkg.split('.'))
root = self._get_root()
tmpl_lines = self._nspkg_tmpl
parent, sep, child = pkg.rpartition('.')
if parent:
tmpl_lines += self._nspkg_tmpl_multi
return ';'.join(tmpl_lines) % locals() + '\n'
def _get_all_ns_packages(self):
"""Return sorted list of all package namespaces"""
pkgs = self.distribution.namespace_packages or []
return sorted(flatten(map(self._pkg_names, pkgs)))
@staticmethod
def _pkg_names(pkg):
"""
Given a namespace package, yield the components of that
package.
>>> names = Installer._pkg_names('a.b.c')
>>> set(names) == set(['a', 'a.b', 'a.b.c'])
True
"""
parts = pkg.split('.')
while parts:
yield '.'.join(parts)
parts.pop()
class DevelopInstaller(Installer):
def _get_root(self):
return repr(str(self.egg_path))
def _get_target(self):
return self.egg_link
|
mit
|
conan-io/conan
|
conans/client/graph/build_mode.py
|
1
|
4085
|
import fnmatch
from conans.errors import ConanException
class BuildMode(object):
""" build_mode => ["*"] if user wrote "--build"
=> ["hello*", "bye*"] if user wrote "--build hello --build bye"
=> ["hello/0.1@foo/bar"] if user wrote "--build hello/0.1@foo/bar"
=> False if user wrote "never"
=> True if user wrote "missing"
=> "outdated" if user wrote "--build outdated"
=> ["!foo"] means exclude when building all from sources
"""
def __init__(self, params, output):
self._out = output
self.outdated = False
self.missing = False
self.never = False
self.cascade = False
self.patterns = []
self._unused_patterns = []
self._excluded_patterns = []
self.all = False
if params is None:
return
assert isinstance(params, list)
if len(params) == 0:
self.all = True
else:
for param in params:
if param == "outdated":
self.outdated = True
elif param == "missing":
self.missing = True
elif param == "never":
self.never = True
elif param == "cascade":
self.cascade = True
else:
# Remove the @ at the end, to match for "conan install pkg/0.1@ --build=pkg/0.1@"
clean_pattern = param[:-1] if param.endswith("@") else param
clean_pattern = clean_pattern.replace("@#", "#")
if clean_pattern and clean_pattern[0] == "!":
self._excluded_patterns.append(clean_pattern[1:])
else:
self.patterns.append(clean_pattern)
if self.never and (self.outdated or self.missing or self.patterns or self.cascade):
raise ConanException("--build=never not compatible with other options")
self._unused_patterns = list(self.patterns) + self._excluded_patterns
def forced(self, conan_file, ref, with_deps_to_build=False):
def pattern_match(pattern_):
return (fnmatch.fnmatchcase(ref.name, pattern_) or
fnmatch.fnmatchcase(repr(ref.copy_clear_rev()), pattern_) or
fnmatch.fnmatchcase(repr(ref), pattern_))
for pattern in self._excluded_patterns:
if pattern_match(pattern):
try:
self._unused_patterns.remove(pattern)
except ValueError:
pass
conan_file.output.info("Excluded build from source")
return False
if conan_file.build_policy == "never": # this package has been export-pkg
return False
if self.never:
return False
if self.all:
return True
if conan_file.build_policy_always:
conan_file.output.info("Building package from source as defined by "
"build_policy='always'")
return True
if self.cascade and with_deps_to_build:
return True
# Patterns to match, if package matches pattern, build is forced
for pattern in self.patterns:
if pattern_match(pattern):
try:
self._unused_patterns.remove(pattern)
except ValueError:
pass
return True
return False
def allowed(self, conan_file):
if self.missing or self.outdated:
return True
if conan_file.build_policy_missing:
conan_file.output.info("Building package from source as defined by "
"build_policy='missing'")
return True
return False
def report_matches(self):
for pattern in self._unused_patterns:
self._out.error("No package matching '%s' pattern found." % pattern)
|
mit
|
qrkourier/ansible
|
lib/ansible/modules/cloud/amazon/elb_target_group_facts.py
|
8
|
9841
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: elb_target_group_facts
short_description: Gather facts about ELB target groups in AWS
description:
- Gather facts about ELB target groups in AWS
version_added: "2.4"
author: Rob White (@wimnat)
options:
load_balancer_arn:
description:
- The Amazon Resource Name (ARN) of the load balancer.
required: false
target_group_arns:
description:
- The Amazon Resource Names (ARN) of the target groups.
required: false
names:
description:
- The names of the target groups.
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all target groups
- elb_target_group_facts:
# Gather facts about the target group attached to a particular ELB
- elb_target_group_facts:
load_balancer_arn: "arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-elb/aabbccddeeff"
# Gather facts about a target groups named 'tg1' and 'tg2'
- elb_target_group_facts:
names:
- tg1
- tg2
'''
RETURN = '''
target_groups:
description: a list of target groups
returned: always
type: complex
contains:
deregistration_delay_timeout_seconds:
description: The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused.
returned: always
type: int
sample: 300
health_check_interval_seconds:
description: The approximate amount of time, in seconds, between health checks of an individual target.
returned: always
type: int
sample: 30
health_check_path:
description: The destination for the health check request.
returned: always
type: string
sample: /index.html
health_check_port:
description: The port to use to connect with the target.
returned: always
type: string
sample: traffic-port
health_check_protocol:
description: The protocol to use to connect with the target.
returned: always
type: string
sample: HTTP
health_check_timeout_seconds:
description: The amount of time, in seconds, during which no response means a failed health check.
returned: always
type: int
sample: 5
healthy_threshold_count:
description: The number of consecutive health checks successes required before considering an unhealthy target healthy.
returned: always
type: int
sample: 5
load_balancer_arns:
description: The Amazon Resource Names (ARN) of the load balancers that route traffic to this target group.
returned: always
type: list
sample: []
matcher:
description: The HTTP codes to use when checking for a successful response from a target.
returned: always
type: dict
sample: {
"http_code": "200"
}
port:
description: The port on which the targets are listening.
returned: always
type: int
sample: 80
protocol:
description: The protocol to use for routing traffic to the targets.
returned: always
type: string
sample: HTTP
stickiness_enabled:
description: Indicates whether sticky sessions are enabled.
returned: always
type: bool
sample: true
stickiness_lb_cookie_duration_seconds:
description: Indicates whether sticky sessions are enabled.
returned: always
type: int
sample: 86400
stickiness_type:
description: The type of sticky sessions.
returned: always
type: string
sample: lb_cookie
tags:
description: The tags attached to the target group.
returned: always
type: dict
sample: "{
'Tag': 'Example'
}"
target_group_arn:
description: The Amazon Resource Name (ARN) of the target group.
returned: always
type: string
sample: "arn:aws:elasticloadbalancing:ap-southeast-2:01234567890:targetgroup/mytargetgroup/aabbccddee0044332211"
target_group_name:
description: The name of the target group.
returned: always
type: string
sample: mytargetgroup
unhealthy_threshold_count:
description: The number of consecutive health check failures required before considering the target unhealthy.
returned: always
type: int
sample: 2
vpc_id:
description: The ID of the VPC for the targets.
returned: always
type: string
sample: vpc-0123456
'''
import traceback
try:
import boto3
from botocore.exceptions import ClientError, NoCredentialsError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
ec2_argument_spec, get_aws_connection_info)
def get_target_group_attributes(connection, module, target_group_arn):
try:
target_group_attributes = boto3_tag_list_to_ansible_dict(connection.describe_target_group_attributes(TargetGroupArn=target_group_arn)['Attributes'])
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
# Replace '.' with '_' in attribute key names to make it more Ansibley
for k, v in target_group_attributes.items():
target_group_attributes[k.replace('.', '_')] = v
del target_group_attributes[k]
return target_group_attributes
def get_target_group_tags(connection, module, target_group_arn):
try:
return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags'])
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
def list_target_groups(connection, module):
load_balancer_arn = module.params.get("load_balancer_arn")
target_group_arns = module.params.get("target_group_arns")
names = module.params.get("names")
try:
target_group_paginator = connection.get_paginator('describe_target_groups')
if not load_balancer_arn and not target_group_arns and not names:
target_groups = target_group_paginator.paginate().build_full_result()
if load_balancer_arn:
target_groups = target_group_paginator.paginate(LoadBalancerArn=load_balancer_arn).build_full_result()
if target_group_arns:
target_groups = target_group_paginator.paginate(TargetGroupArns=target_group_arns).build_full_result()
if names:
target_groups = target_group_paginator.paginate(Names=names).build_full_result()
except ClientError as e:
if e.response['Error']['Code'] == 'TargetGroupNotFound':
module.exit_json(target_groups=[])
else:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except NoCredentialsError as e:
module.fail_json(msg="AWS authentication problem. " + e.message, exception=traceback.format_exc())
# Get the attributes and tags for each target group
for target_group in target_groups['TargetGroups']:
target_group.update(get_target_group_attributes(connection, module, target_group['TargetGroupArn']))
# Turn the boto3 result in to ansible_friendly_snaked_names
snaked_target_groups = [camel_dict_to_snake_dict(target_group) for target_group in target_groups['TargetGroups']]
# Get tags for each target group
for snaked_target_group in snaked_target_groups:
snaked_target_group['tags'] = get_target_group_tags(connection, module, snaked_target_group['target_group_arn'])
module.exit_json(target_groups=snaked_target_groups)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
load_balancer_arn=dict(type='str'),
target_group_arns=dict(type='list'),
names=dict(type='list')
)
)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=['load_balancer_arn', 'target_group_arns', 'names'],
supports_check_mode=True
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
list_target_groups(connection, module)
if __name__ == '__main__':
main()
|
gpl-3.0
|
HXLStandard/hxl-proxy
|
tests/test_dao.py
|
1
|
3313
|
"""
Unit tests for hxl-proxy dao module
David Megginson
February 2016
License: Public Domain
"""
import unittest, os
from hxl_proxy import app, dao
from . import base
class AbstractDAOTest(base.AbstractDBTest):
"""Abstract base class for DAO tests."""
def setUp(self):
super().setUp()
def tearDown(self):
super().tearDown()
def assertEquiv(self, model, actual):
"""Test equivalence where everything in model must be the same in actual
(but actual can have extra values)."""
for key in model:
self.assertEqual(model.get(key), actual.get(key), key)
class TestUser(AbstractDAOTest):
"""Test user DAO functionality"""
NEW_USER = {
'user_id': 'user3',
'email': '[email protected]',
'name': 'User Three',
'name_given': 'User',
'name_family': 'Three'
}
def test_create(self):
dao.users.create(self.NEW_USER)
result = dao.users.read(self.NEW_USER['user_id'])
self.assertEquiv(self.NEW_USER, result)
assert result.get('last_login') is not None
def test_read(self):
user = {
'user_id': 'user1',
'email': '[email protected]',
'name': 'User One',
'name_given': 'User',
'name_family': 'One'
}
self.assertEquiv(user, dao.users.read('user1'))
def test_update(self):
user = dict(self.NEW_USER)
user['user_id'] = 'user1'
dao.users.update(user)
self.assertEquiv(user, dao.users.read(user['user_id']))
def test_delete(self):
dao.users.create(self.NEW_USER)
assert dao.users.read(self.NEW_USER['user_id']) is not None
dao.users.delete(self.NEW_USER['user_id'])
assert dao.users.read(self.NEW_USER['user_id']) is None
class TestRecipe(AbstractDAOTest):
NEW_RECIPE = {
'recipe_id': 'XXXXX',
'passhash': '5f4dcc3b5aa765d61d8327deb882cf99',
'name': 'Recipe X',
'description': 'New test recipe',
'cloneable': 1,
'stub': 'recipex',
'args': {}
}
def test_create(self):
dao.recipes.create(self.NEW_RECIPE)
result = dao.recipes.read(self.NEW_RECIPE['recipe_id'])
self.assertEquiv(self.NEW_RECIPE, result)
assert result['date_created']
self.assertEqual(result['date_created'], result['date_modified'])
def test_read(self):
recipe = {
'recipe_id': 'AAAAA',
'passhash': '5f4dcc3b5aa765d61d8327deb882cf99',
'name': 'Recipe #1',
'description': 'First test recipe',
'cloneable': 1,
'stub': 'recipe1',
'args': {'url':'http://example.org/basic-dataset.csv'}
}
self.assertEquiv(recipe, dao.recipes.read(recipe['recipe_id']))
def test_update(self):
recipe = dict(self.NEW_RECIPE)
recipe['recipe_id'] = 'AAAAA'
dao.recipes.update(recipe)
result = dao.recipes.read('AAAAA')
self.assertEquiv(recipe, result)
self.assertNotEqual(result['date_created'], result['date_modified'])
def test_delete(self):
assert dao.recipes.read('AAAAA') is not None
dao.recipes.delete('AAAAA')
assert dao.recipes.read('AAAAA') is None
|
unlicense
|
grlee77/scipy
|
benchmarks/benchmarks/signal_filtering.py
|
7
|
3025
|
import numpy as np
import timeit
from concurrent.futures import ThreadPoolExecutor, wait
from .common import Benchmark, safe_import
with safe_import():
from scipy.signal import (lfilter, firwin, decimate, butter, sosfilt,
medfilt2d)
class Decimate(Benchmark):
param_names = ['q', 'ftype', 'zero_phase']
params = [
[2, 10, 30],
['iir', 'fir'],
[True, False]
]
def setup(self, q, ftype, zero_phase):
np.random.seed(123456)
sample_rate = 10000.
t = np.arange(int(1e6), dtype=np.float64) / sample_rate
self.sig = np.sin(2*np.pi*500*t) + 0.3 * np.sin(2*np.pi*4e3*t)
def time_decimate(self, q, ftype, zero_phase):
decimate(self.sig, q, ftype=ftype, zero_phase=zero_phase)
class Lfilter(Benchmark):
param_names = ['n_samples', 'numtaps']
params = [
[1e3, 50e3, 1e6],
[9, 23, 51]
]
def setup(self, n_samples, numtaps):
np.random.seed(125678)
sample_rate = 25000.
t = np.arange(n_samples, dtype=np.float64) / sample_rate
nyq_rate = sample_rate / 2.
cutoff_hz = 3000.0
self.sig = np.sin(2*np.pi*500*t) + 0.3 * np.sin(2*np.pi*11e3*t)
self.coeff = firwin(numtaps, cutoff_hz/nyq_rate)
def time_lfilter(self, n_samples, numtaps):
lfilter(self.coeff, 1.0, self.sig)
class ParallelSosfilt(Benchmark):
timeout = 100
timer = timeit.default_timer
param_names = ['n_samples', 'threads']
params = [
[1e3, 10e3],
[1, 2, 4]
]
def setup(self, n_samples, threads):
self.filt = butter(8, 8e-6, "lowpass", output="sos")
self.data = np.arange(int(n_samples) * 3000).reshape(int(n_samples), 3000)
self.chunks = np.array_split(self.data, threads)
def time_sosfilt(self, n_samples, threads):
with ThreadPoolExecutor(max_workers=threads) as pool:
futures = []
for i in range(threads):
futures.append(pool.submit(sosfilt, self.filt, self.chunks[i]))
wait(futures)
class Sosfilt(Benchmark):
param_names = ['n_samples', 'order']
params = [
[1000, 1000000],
[6, 20]
]
def setup(self, n_samples, order):
self.sos = butter(order, [0.1575, 0.1625], 'band', output='sos')
self.y = np.random.RandomState(0).randn(n_samples)
def time_sosfilt_basic(self, n_samples, order):
sosfilt(self.sos, self.y)
class MedFilt2D(Benchmark):
param_names = ['threads']
params = [[1, 2, 4]]
def setup(self, threads):
np.random.seed(8176)
self.chunks = np.array_split(np.random.randn(250, 349), threads)
def _medfilt2d(self, threads):
with ThreadPoolExecutor(max_workers=threads) as pool:
wait({pool.submit(medfilt2d, chunk, 5) for chunk in self.chunks})
def time_medfilt2d(self, threads):
self._medfilt2d(threads)
def peakmem_medfilt2d(self, threads):
self._medfilt2d(threads)
|
bsd-3-clause
|
jusdng/odoo
|
addons/account/wizard/account_invoice_state.py
|
340
|
2875
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class account_invoice_confirm(osv.osv_memory):
"""
This wizard will confirm the all the selected draft invoices
"""
_name = "account.invoice.confirm"
_description = "Confirm the selected invoices"
def invoice_confirm(self, cr, uid, ids, context=None):
if context is None:
context = {}
active_ids = context.get('active_ids', []) or []
proxy = self.pool['account.invoice']
for record in proxy.browse(cr, uid, active_ids, context=context):
if record.state not in ('draft', 'proforma', 'proforma2'):
raise osv.except_osv(_('Warning!'), _("Selected invoice(s) cannot be confirmed as they are not in 'Draft' or 'Pro-Forma' state."))
record.signal_workflow('invoice_open')
return {'type': 'ir.actions.act_window_close'}
class account_invoice_cancel(osv.osv_memory):
"""
This wizard will cancel the all the selected invoices.
If in the journal, the option allow cancelling entry is not selected then it will give warning message.
"""
_name = "account.invoice.cancel"
_description = "Cancel the Selected Invoices"
def invoice_cancel(self, cr, uid, ids, context=None):
if context is None:
context = {}
proxy = self.pool['account.invoice']
active_ids = context.get('active_ids', []) or []
for record in proxy.browse(cr, uid, active_ids, context=context):
if record.state in ('cancel','paid'):
raise osv.except_osv(_('Warning!'), _("Selected invoice(s) cannot be cancelled as they are already in 'Cancelled' or 'Done' state."))
record.signal_workflow('invoice_cancel')
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
angelapper/edx-platform
|
lms/djangoapps/student_account/test/test_views.py
|
2
|
37721
|
# -*- coding: utf-8 -*-
""" Tests for student account views. """
import logging
import re
from unittest import skipUnless
from urllib import urlencode
import ddt
import mock
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.messages.middleware import MessageMiddleware
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.http import HttpRequest
from django.test import TestCase
from django.test.utils import override_settings
from edx_oauth2_provider.tests.factories import AccessTokenFactory, ClientFactory, RefreshTokenFactory
from edx_rest_api_client import exceptions
from nose.plugins.attrib import attr
from oauth2_provider.models import AccessToken as dot_access_token
from oauth2_provider.models import RefreshToken as dot_refresh_token
from provider.oauth2.models import AccessToken as dop_access_token
from provider.oauth2.models import RefreshToken as dop_refresh_token
from testfixtures import LogCapture
from commerce.models import CommerceConfiguration
from commerce.tests import factories
from commerce.tests.mocks import mock_get_orders
from course_modes.models import CourseMode
from http.cookies import SimpleCookie
from openedx.core.djangoapps.oauth_dispatch.tests import factories as dot_factories
from openedx.core.djangoapps.programs.tests.mixins import ProgramsApiConfigMixin
from openedx.core.djangoapps.site_configuration.tests.mixins import SiteMixin
from openedx.core.djangoapps.theming.tests.test_util import with_comprehensive_theme_context
from openedx.core.djangoapps.user_api.accounts.api import activate_account, create_account
from openedx.core.djangolib.js_utils import dump_js_escaped_json
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from student.tests.factories import UserFactory
from student_account.views import account_settings_context, get_user_orders
from third_party_auth.tests.testutil import ThirdPartyAuthTestMixin, simulate_running_pipeline
from util.testing import UrlResetMixin
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
LOGGER_NAME = 'audit'
User = get_user_model() # pylint:disable=invalid-name
@ddt.ddt
class StudentAccountUpdateTest(CacheIsolationTestCase, UrlResetMixin):
""" Tests for the student account views that update the user's account information. """
USERNAME = u"heisenberg"
ALTERNATE_USERNAME = u"walt"
OLD_PASSWORD = u"ḅḷüëṡḳÿ"
NEW_PASSWORD = u"🄱🄸🄶🄱🄻🅄🄴"
OLD_EMAIL = u"[email protected]"
NEW_EMAIL = u"[email protected]"
INVALID_ATTEMPTS = 100
INVALID_KEY = u"123abc"
URLCONF_MODULES = ['student_accounts.urls']
ENABLED_CACHES = ['default']
def setUp(self):
super(StudentAccountUpdateTest, self).setUp()
# Create/activate a new account
activation_key = create_account(self.USERNAME, self.OLD_PASSWORD, self.OLD_EMAIL)
activate_account(activation_key)
# Login
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
self.assertTrue(result)
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in LMS')
def test_password_change(self):
# Request a password change while logged in, simulating
# use of the password reset link from the account page
response = self._change_password()
self.assertEqual(response.status_code, 200)
# Check that an email was sent
self.assertEqual(len(mail.outbox), 1)
# Retrieve the activation link from the email body
email_body = mail.outbox[0].body
result = re.search(r'(?P<url>https?://[^\s]+)', email_body)
self.assertIsNot(result, None)
activation_link = result.group('url')
# Visit the activation link
response = self.client.get(activation_link)
self.assertEqual(response.status_code, 200)
# Submit a new password and follow the redirect to the success page
response = self.client.post(
activation_link,
# These keys are from the form on the current password reset confirmation page.
{'new_password1': self.NEW_PASSWORD, 'new_password2': self.NEW_PASSWORD},
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Your password has been reset.")
# Log the user out to clear session data
self.client.logout()
# Verify that the new password can be used to log in
result = self.client.login(username=self.USERNAME, password=self.NEW_PASSWORD)
self.assertTrue(result)
# Try reusing the activation link to change the password again
# Visit the activation link again.
response = self.client.get(activation_link)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "This password reset link is invalid. It may have been used already.")
self.client.logout()
# Verify that the old password cannot be used to log in
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
self.assertFalse(result)
# Verify that the new password continues to be valid
result = self.client.login(username=self.USERNAME, password=self.NEW_PASSWORD)
self.assertTrue(result)
@ddt.data(True, False)
def test_password_change_logged_out(self, send_email):
# Log the user out
self.client.logout()
# Request a password change while logged out, simulating
# use of the password reset link from the login page
if send_email:
response = self._change_password(email=self.OLD_EMAIL)
self.assertEqual(response.status_code, 200)
else:
# Don't send an email in the POST data, simulating
# its (potentially accidental) omission in the POST
# data sent from the login page
response = self._change_password()
self.assertEqual(response.status_code, 400)
def test_access_token_invalidation_logged_out(self):
self.client.logout()
user = User.objects.get(email=self.OLD_EMAIL)
self._create_dop_tokens(user)
self._create_dot_tokens(user)
response = self._change_password(email=self.OLD_EMAIL)
self.assertEqual(response.status_code, 200)
self.assert_access_token_destroyed(user)
def test_access_token_invalidation_logged_in(self):
user = User.objects.get(email=self.OLD_EMAIL)
self._create_dop_tokens(user)
self._create_dot_tokens(user)
response = self._change_password()
self.assertEqual(response.status_code, 200)
self.assert_access_token_destroyed(user)
def test_password_change_inactive_user(self):
# Log out the user created during test setup
self.client.logout()
# Create a second user, but do not activate it
create_account(self.ALTERNATE_USERNAME, self.OLD_PASSWORD, self.NEW_EMAIL)
# Send the view the email address tied to the inactive user
response = self._change_password(email=self.NEW_EMAIL)
# Expect that the activation email is still sent,
# since the user may have lost the original activation email.
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
def test_password_change_no_user(self):
# Log out the user created during test setup
self.client.logout()
with LogCapture(LOGGER_NAME, level=logging.INFO) as logger:
# Send the view an email address not tied to any user
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, 200)
logger.check((LOGGER_NAME, 'INFO', 'Invalid password reset attempt'))
def test_password_change_rate_limited(self):
# Log out the user created during test setup, to prevent the view from
# selecting the logged-in user's email address over the email provided
# in the POST data
self.client.logout()
# Make many consecutive bad requests in an attempt to trigger the rate limiter
for __ in xrange(self.INVALID_ATTEMPTS):
self._change_password(email=self.NEW_EMAIL)
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, 403)
@ddt.data(
('post', 'password_change_request', []),
)
@ddt.unpack
def test_require_http_method(self, correct_method, url_name, args):
wrong_methods = {'get', 'put', 'post', 'head', 'options', 'delete'} - {correct_method}
url = reverse(url_name, args=args)
for method in wrong_methods:
response = getattr(self.client, method)(url)
self.assertEqual(response.status_code, 405)
def _change_password(self, email=None):
"""Request to change the user's password. """
data = {}
if email:
data['email'] = email
return self.client.post(path=reverse('password_change_request'), data=data)
def _create_dop_tokens(self, user=None):
"""Create dop access token for given user if user provided else for default user."""
if not user:
user = User.objects.get(email=self.OLD_EMAIL)
client = ClientFactory()
access_token = AccessTokenFactory(user=user, client=client)
RefreshTokenFactory(user=user, client=client, access_token=access_token)
def _create_dot_tokens(self, user=None):
"""Create dop access token for given user if user provided else for default user."""
if not user:
user = User.objects.get(email=self.OLD_EMAIL)
application = dot_factories.ApplicationFactory(user=user)
access_token = dot_factories.AccessTokenFactory(user=user, application=application)
dot_factories.RefreshTokenFactory(user=user, application=application, access_token=access_token)
def assert_access_token_destroyed(self, user):
"""Assert all access tokens are destroyed."""
self.assertFalse(dot_access_token.objects.filter(user=user).exists())
self.assertFalse(dot_refresh_token.objects.filter(user=user).exists())
self.assertFalse(dop_access_token.objects.filter(user=user).exists())
self.assertFalse(dop_refresh_token.objects.filter(user=user).exists())
@attr(shard=3)
@ddt.ddt
class StudentAccountLoginAndRegistrationTest(ThirdPartyAuthTestMixin, UrlResetMixin, ModuleStoreTestCase):
""" Tests for the student account views that update the user's account information. """
USERNAME = "bob"
EMAIL = "[email protected]"
PASSWORD = "password"
URLCONF_MODULES = ['openedx.core.djangoapps.embargo']
@mock.patch.dict(settings.FEATURES, {'EMBARGO': True})
def setUp(self):
super(StudentAccountLoginAndRegistrationTest, self).setUp()
# Several third party auth providers are created for these tests:
self.google_provider = self.configure_google_provider(enabled=True, visible=True)
self.configure_facebook_provider(enabled=True, visible=True)
self.configure_dummy_provider(
visible=True,
enabled=True,
icon_class='',
icon_image=SimpleUploadedFile('icon.svg', '<svg><rect width="50" height="100"/></svg>'),
)
self.hidden_enabled_provider = self.configure_linkedin_provider(
visible=False,
enabled=True,
)
self.hidden_disabled_provider = self.configure_azure_ad_provider()
@ddt.data(
("signin_user", "login"),
("register_user", "register"),
)
@ddt.unpack
def test_login_and_registration_form(self, url_name, initial_mode):
response = self.client.get(reverse(url_name))
expected_data = '"initial_mode": "{mode}"'.format(mode=initial_mode)
self.assertContains(response, expected_data)
@ddt.data("signin_user", "register_user")
def test_login_and_registration_form_already_authenticated(self, url_name):
# Create/activate a new account and log in
activation_key = create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
activate_account(activation_key)
result = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(result)
# Verify that we're redirected to the dashboard
response = self.client.get(reverse(url_name))
self.assertRedirects(response, reverse("dashboard"))
@ddt.data(
(None, "signin_user"),
(None, "register_user"),
("edx.org", "signin_user"),
("edx.org", "register_user"),
)
@ddt.unpack
def test_login_and_registration_form_signin_not_preserves_params(self, theme, url_name):
params = [
('course_id', 'edX/DemoX/Demo_Course'),
('enrollment_action', 'enroll'),
]
# The response should not have a "Sign In" button with the URL
# that preserves the querystring params
with with_comprehensive_theme_context(theme):
response = self.client.get(reverse(url_name), params, HTTP_ACCEPT="text/html")
expected_url = '/login?{}'.format(self._finish_auth_url_param(params + [('next', '/dashboard')]))
self.assertNotContains(response, expected_url)
# Add additional parameters:
params = [
('course_id', 'edX/DemoX/Demo_Course'),
('enrollment_action', 'enroll'),
('course_mode', CourseMode.DEFAULT_MODE_SLUG),
('email_opt_in', 'true'),
('next', '/custom/final/destination')
]
# Verify that this parameter is also preserved
with with_comprehensive_theme_context(theme):
response = self.client.get(reverse(url_name), params, HTTP_ACCEPT="text/html")
expected_url = '/login?{}'.format(self._finish_auth_url_param(params))
self.assertNotContains(response, expected_url)
@mock.patch.dict(settings.FEATURES, {"ENABLE_THIRD_PARTY_AUTH": False})
@ddt.data("signin_user", "register_user")
def test_third_party_auth_disabled(self, url_name):
response = self.client.get(reverse(url_name))
self._assert_third_party_auth_data(response, None, None, [], None)
@mock.patch('student_account.views.enterprise_customer_for_request')
@ddt.data(
("signin_user", None, None, None),
("register_user", None, None, None),
("signin_user", "google-oauth2", "Google", None),
("register_user", "google-oauth2", "Google", None),
("signin_user", "facebook", "Facebook", None),
("register_user", "facebook", "Facebook", None),
("signin_user", "dummy", "Dummy", None),
("register_user", "dummy", "Dummy", None),
(
"signin_user",
"google-oauth2",
"Google",
{
'name': 'FakeName',
'logo': 'https://host.com/logo.jpg',
'welcome_msg': 'No message'
}
)
)
@ddt.unpack
def test_third_party_auth(
self,
url_name,
current_backend,
current_provider,
expected_enterprise_customer_mock_attrs,
enterprise_customer_mock
):
params = [
('course_id', 'course-v1:Org+Course+Run'),
('enrollment_action', 'enroll'),
('course_mode', CourseMode.DEFAULT_MODE_SLUG),
('email_opt_in', 'true'),
('next', '/custom/final/destination'),
]
if expected_enterprise_customer_mock_attrs:
expected_ec = mock.MagicMock(
branding_configuration=mock.MagicMock(
logo=mock.MagicMock(
url=expected_enterprise_customer_mock_attrs['logo']
),
welcome_message=expected_enterprise_customer_mock_attrs['welcome_msg']
)
)
expected_ec.name = expected_enterprise_customer_mock_attrs['name']
else:
expected_ec = None
enterprise_customer_mock.return_value = expected_ec
# Simulate a running pipeline
if current_backend is not None:
pipeline_target = "student_account.views.third_party_auth.pipeline"
with simulate_running_pipeline(pipeline_target, current_backend):
response = self.client.get(reverse(url_name), params, HTTP_ACCEPT="text/html")
# Do NOT simulate a running pipeline
else:
response = self.client.get(reverse(url_name), params, HTTP_ACCEPT="text/html")
# This relies on the THIRD_PARTY_AUTH configuration in the test settings
expected_providers = [
{
"id": "oa2-dummy",
"name": "Dummy",
"iconClass": None,
"iconImage": settings.MEDIA_URL + "icon.svg",
"loginUrl": self._third_party_login_url("dummy", "login", params),
"registerUrl": self._third_party_login_url("dummy", "register", params)
},
{
"id": "oa2-facebook",
"name": "Facebook",
"iconClass": "fa-facebook",
"iconImage": None,
"loginUrl": self._third_party_login_url("facebook", "login", params),
"registerUrl": self._third_party_login_url("facebook", "register", params)
},
{
"id": "oa2-google-oauth2",
"name": "Google",
"iconClass": "fa-google-plus",
"iconImage": None,
"loginUrl": self._third_party_login_url("google-oauth2", "login", params),
"registerUrl": self._third_party_login_url("google-oauth2", "register", params)
},
]
self._assert_third_party_auth_data(
response,
current_backend,
current_provider,
expected_providers,
expected_ec
)
def test_hinted_login(self):
params = [("next", "/courses/something/?tpa_hint=oa2-google-oauth2")]
response = self.client.get(reverse('signin_user'), params, HTTP_ACCEPT="text/html")
self.assertContains(response, '"third_party_auth_hint": "oa2-google-oauth2"')
tpa_hint = self.hidden_enabled_provider.provider_id
params = [("next", "/courses/something/?tpa_hint={0}".format(tpa_hint))]
response = self.client.get(reverse('signin_user'), params, HTTP_ACCEPT="text/html")
self.assertContains(response, '"third_party_auth_hint": "{0}"'.format(tpa_hint))
tpa_hint = self.hidden_disabled_provider.provider_id
params = [("next", "/courses/something/?tpa_hint={0}".format(tpa_hint))]
response = self.client.get(reverse('signin_user'), params, HTTP_ACCEPT="text/html")
self.assertNotIn(response.content, tpa_hint)
@ddt.data(
('signin_user', 'login'),
('register_user', 'register'),
)
@ddt.unpack
def test_hinted_login_dialog_disabled(self, url_name, auth_entry):
"""Test that the dialog doesn't show up for hinted logins when disabled. """
self.google_provider.skip_hinted_login_dialog = True
self.google_provider.save()
params = [("next", "/courses/something/?tpa_hint=oa2-google-oauth2")]
response = self.client.get(reverse(url_name), params, HTTP_ACCEPT="text/html")
self.assertRedirects(
response,
'auth/login/google-oauth2/?auth_entry={}&next=%2Fcourses%2Fsomething%2F%3Ftpa_hint%3Doa2-google-oauth2'.format(auth_entry),
target_status_code=302
)
@override_settings(FEATURES=dict(settings.FEATURES, THIRD_PARTY_AUTH_HINT='oa2-google-oauth2'))
@ddt.data(
'signin_user',
'register_user',
)
def test_settings_tpa_hinted_login(self, url_name):
"""
Ensure that settings.FEATURES['THIRD_PARTY_AUTH_HINT'] can set third_party_auth_hint.
"""
params = [("next", "/courses/something/")]
response = self.client.get(reverse(url_name), params, HTTP_ACCEPT="text/html")
self.assertContains(response, '"third_party_auth_hint": "oa2-google-oauth2"')
# THIRD_PARTY_AUTH_HINT can be overridden via the query string
tpa_hint = self.hidden_enabled_provider.provider_id
params = [("next", "/courses/something/?tpa_hint={0}".format(tpa_hint))]
response = self.client.get(reverse(url_name), params, HTTP_ACCEPT="text/html")
self.assertContains(response, '"third_party_auth_hint": "{0}"'.format(tpa_hint))
# Even disabled providers in the query string will override THIRD_PARTY_AUTH_HINT
tpa_hint = self.hidden_disabled_provider.provider_id
params = [("next", "/courses/something/?tpa_hint={0}".format(tpa_hint))]
response = self.client.get(reverse(url_name), params, HTTP_ACCEPT="text/html")
self.assertNotIn(response.content, tpa_hint)
@override_settings(FEATURES=dict(settings.FEATURES, THIRD_PARTY_AUTH_HINT='oa2-google-oauth2'))
@ddt.data(
('signin_user', 'login'),
('register_user', 'register'),
)
@ddt.unpack
def test_settings_tpa_hinted_login_dialog_disabled(self, url_name, auth_entry):
"""Test that the dialog doesn't show up for hinted logins when disabled via settings.THIRD_PARTY_AUTH_HINT. """
self.google_provider.skip_hinted_login_dialog = True
self.google_provider.save()
params = [("next", "/courses/something/")]
response = self.client.get(reverse(url_name), params, HTTP_ACCEPT="text/html")
self.assertRedirects(
response,
'auth/login/google-oauth2/?auth_entry={}&next=%2Fcourses%2Fsomething%2F%3Ftpa_hint%3Doa2-google-oauth2'.format(auth_entry),
target_status_code=302
)
@mock.patch('student_account.views.enterprise_customer_for_request')
@ddt.data(
('signin_user', False, None, None),
('register_user', False, None, None),
('signin_user', True, 'Fake EC', 'http://logo.com/logo.jpg'),
('register_user', True, 'Fake EC', 'http://logo.com/logo.jpg'),
('signin_user', True, 'Fake EC', None),
('register_user', True, 'Fake EC', None),
)
@ddt.unpack
def test_enterprise_register(self, url_name, ec_present, ec_name, logo_url, mock_get_ec):
"""
Verify that when an EnterpriseCustomer is received on the login and register views,
the appropriate sidebar is rendered.
"""
if ec_present:
mock_get_ec.return_value = {
'name': ec_name,
'branding_configuration': {'logo': logo_url}
}
else:
mock_get_ec.return_value = None
response = self.client.get(reverse(url_name), HTTP_ACCEPT="text/html")
enterprise_sidebar_div_id = u'enterprise-content-container'
if not ec_present:
self.assertNotContains(response, text=enterprise_sidebar_div_id)
else:
self.assertContains(response, text=enterprise_sidebar_div_id)
welcome_message = settings.ENTERPRISE_SPECIFIC_BRANDED_WELCOME_TEMPLATE
expected_message = welcome_message.format(
start_bold=u'<b>',
end_bold=u'</b>',
enterprise_name=ec_name,
platform_name=settings.PLATFORM_NAME
)
self.assertContains(response, expected_message)
if logo_url:
self.assertContains(response, logo_url)
def test_enterprise_cookie_delete(self):
"""
Test that enterprise cookies are deleted in login/registration views.
Cookies must be deleted in login/registration views so that *default* login/registration branding
is displayed to subsequent requests from non-enterprise customers.
"""
cookies = SimpleCookie()
cookies[settings.ENTERPRISE_CUSTOMER_COOKIE_NAME] = 'test-enterprise-customer'
response = self.client.get(reverse('signin_user'), HTTP_ACCEPT="text/html", cookies=cookies)
self.assertIn(settings.ENTERPRISE_CUSTOMER_COOKIE_NAME, response.cookies) # pylint:disable=no-member
enterprise_cookie = response.cookies[settings.ENTERPRISE_CUSTOMER_COOKIE_NAME] # pylint:disable=no-member
self.assertEqual(enterprise_cookie['domain'], settings.BASE_COOKIE_DOMAIN)
self.assertEqual(enterprise_cookie.value, '')
@override_settings(SITE_NAME=settings.MICROSITE_TEST_HOSTNAME)
def test_microsite_uses_old_login_page(self):
# Retrieve the login page from a microsite domain
# and verify that we're served the old page.
resp = self.client.get(
reverse("signin_user"),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertContains(resp, "Log into your Test Site Account")
self.assertContains(resp, "login-form")
def test_microsite_uses_old_register_page(self):
# Retrieve the register page from a microsite domain
# and verify that we're served the old page.
resp = self.client.get(
reverse("register_user"),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertContains(resp, "Register for Test Site")
self.assertContains(resp, "register-form")
def test_login_registration_xframe_protected(self):
resp = self.client.get(
reverse("register_user"),
{},
HTTP_REFERER="http://localhost/iframe"
)
self.assertEqual(resp['X-Frame-Options'], 'DENY')
self.configure_lti_provider(name='Test', lti_hostname='localhost', lti_consumer_key='test_key', enabled=True)
resp = self.client.get(
reverse("register_user"),
HTTP_REFERER="http://localhost/iframe"
)
self.assertEqual(resp['X-Frame-Options'], 'ALLOW')
def _assert_third_party_auth_data(self, response, current_backend, current_provider, providers, expected_ec):
"""Verify that third party auth info is rendered correctly in a DOM data attribute. """
finish_auth_url = None
if current_backend:
finish_auth_url = reverse("social:complete", kwargs={"backend": current_backend}) + "?"
auth_info = {
"currentProvider": current_provider,
"providers": providers,
"secondaryProviders": [],
"finishAuthUrl": finish_auth_url,
"errorMessage": None,
"registerFormSubmitButtonText": "Create Account",
}
if expected_ec is not None:
# If we set an EnterpriseCustomer, third-party auth providers ought to be hidden.
auth_info['providers'] = []
auth_info = dump_js_escaped_json(auth_info)
expected_data = '"third_party_auth": {auth_info}'.format(
auth_info=auth_info
)
self.assertContains(response, expected_data)
def _third_party_login_url(self, backend_name, auth_entry, login_params):
"""Construct the login URL to start third party authentication. """
return u"{url}?auth_entry={auth_entry}&{param_str}".format(
url=reverse("social:begin", kwargs={"backend": backend_name}),
auth_entry=auth_entry,
param_str=self._finish_auth_url_param(login_params),
)
def _finish_auth_url_param(self, params):
"""
Make the next=... URL parameter that indicates where the user should go next.
>>> _finish_auth_url_param([('next', '/dashboard')])
'/account/finish_auth?next=%2Fdashboard'
"""
return urlencode({
'next': '/account/finish_auth?{}'.format(urlencode(params))
})
def test_english_by_default(self):
response = self.client.get(reverse('signin_user'), [], HTTP_ACCEPT="text/html")
self.assertEqual(response['Content-Language'], 'en')
def test_unsupported_language(self):
response = self.client.get(reverse('signin_user'), [], HTTP_ACCEPT="text/html", HTTP_ACCEPT_LANGUAGE="ts-zx")
self.assertEqual(response['Content-Language'], 'en')
def test_browser_language(self):
response = self.client.get(reverse('signin_user'), [], HTTP_ACCEPT="text/html", HTTP_ACCEPT_LANGUAGE="es")
self.assertEqual(response['Content-Language'], 'es-419')
def test_browser_language_dialent(self):
response = self.client.get(reverse('signin_user'), [], HTTP_ACCEPT="text/html", HTTP_ACCEPT_LANGUAGE="es-es")
self.assertEqual(response['Content-Language'], 'es-es')
class AccountSettingsViewTest(ThirdPartyAuthTestMixin, TestCase, ProgramsApiConfigMixin):
""" Tests for the account settings view. """
USERNAME = 'student'
PASSWORD = 'password'
FIELDS = [
'country',
'gender',
'language',
'level_of_education',
'password',
'year_of_birth',
'preferred_language',
'time_zone',
]
@mock.patch("django.conf.settings.MESSAGE_STORAGE", 'django.contrib.messages.storage.cookie.CookieStorage')
def setUp(self):
super(AccountSettingsViewTest, self).setUp()
self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD)
CommerceConfiguration.objects.create(cache_ttl=10, enabled=True)
self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.request = HttpRequest()
self.request.user = self.user
# For these tests, two third party auth providers are enabled by default:
self.configure_google_provider(enabled=True, visible=True)
self.configure_facebook_provider(enabled=True, visible=True)
# Python-social saves auth failure notifcations in Django messages.
# See pipeline.get_duplicate_provider() for details.
self.request.COOKIES = {}
MessageMiddleware().process_request(self.request)
messages.error(self.request, 'Facebook is already in use.', extra_tags='Auth facebook')
def test_context(self):
context = account_settings_context(self.request)
user_accounts_api_url = reverse("accounts_api", kwargs={'username': self.user.username})
self.assertEqual(context['user_accounts_api_url'], user_accounts_api_url)
user_preferences_api_url = reverse('preferences_api', kwargs={'username': self.user.username})
self.assertEqual(context['user_preferences_api_url'], user_preferences_api_url)
for attribute in self.FIELDS:
self.assertIn(attribute, context['fields'])
self.assertEqual(
context['user_accounts_api_url'], reverse("accounts_api", kwargs={'username': self.user.username})
)
self.assertEqual(
context['user_preferences_api_url'], reverse('preferences_api', kwargs={'username': self.user.username})
)
self.assertEqual(context['duplicate_provider'], 'facebook')
self.assertEqual(context['auth']['providers'][0]['name'], 'Facebook')
self.assertEqual(context['auth']['providers'][1]['name'], 'Google')
def test_view(self):
"""
Test that all fields are visible
"""
view_path = reverse('account_settings')
response = self.client.get(path=view_path)
for attribute in self.FIELDS:
self.assertIn(attribute, response.content)
def test_header_with_programs_listing_enabled(self):
"""
Verify that tabs header will be shown while program listing is enabled.
"""
self.create_programs_config()
view_path = reverse('account_settings')
response = self.client.get(path=view_path)
self.assertContains(response, '<li class="tab-nav-item">')
def test_header_with_programs_listing_disabled(self):
"""
Verify that nav header will be shown while program listing is disabled.
"""
self.create_programs_config(enabled=False)
view_path = reverse('account_settings')
response = self.client.get(path=view_path)
self.assertContains(response, '<li class="item nav-global-01">')
def test_commerce_order_detail(self):
"""
Verify that get_user_orders returns the correct order data.
"""
with mock_get_orders():
order_detail = get_user_orders(self.user)
for i, order in enumerate(mock_get_orders.default_response['results']):
expected = {
'number': order['number'],
'price': order['total_excl_tax'],
'order_date': 'Jan 01, 2016',
'receipt_url': '/checkout/receipt/?order_number=' + order['number'],
'lines': order['lines'],
}
self.assertEqual(order_detail[i], expected)
def test_commerce_order_detail_exception(self):
with mock_get_orders(exception=exceptions.HttpNotFoundError):
order_detail = get_user_orders(self.user)
self.assertEqual(order_detail, [])
def test_incomplete_order_detail(self):
response = {
'results': [
factories.OrderFactory(
status='Incomplete',
lines=[
factories.OrderLineFactory(
product=factories.ProductFactory(attribute_values=[factories.ProductAttributeFactory()])
)
]
)
]
}
with mock_get_orders(response=response):
order_detail = get_user_orders(self.user)
self.assertEqual(order_detail, [])
def test_order_history_with_no_product(self):
response = {
'results': [
factories.OrderFactory(
lines=[
factories.OrderLineFactory(
product=None
),
factories.OrderLineFactory(
product=factories.ProductFactory(attribute_values=[factories.ProductAttributeFactory(
name='certificate_type',
value='verified'
)])
)
]
)
]
}
with mock_get_orders(response=response):
order_detail = get_user_orders(self.user)
self.assertEqual(len(order_detail), 1)
@override_settings(SITE_NAME=settings.MICROSITE_LOGISTRATION_HOSTNAME)
class MicrositeLogistrationTests(TestCase):
"""
Test to validate that microsites can display the logistration page
"""
def test_login_page(self):
"""
Make sure that we get the expected logistration page on our specialized
microsite
"""
resp = self.client.get(
reverse('signin_user'),
HTTP_HOST=settings.MICROSITE_LOGISTRATION_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertIn('<div id="login-and-registration-container"', resp.content)
def test_registration_page(self):
"""
Make sure that we get the expected logistration page on our specialized
microsite
"""
resp = self.client.get(
reverse('register_user'),
HTTP_HOST=settings.MICROSITE_LOGISTRATION_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertIn('<div id="login-and-registration-container"', resp.content)
@override_settings(SITE_NAME=settings.MICROSITE_TEST_HOSTNAME)
def test_no_override(self):
"""
Make sure we get the old style login/registration if we don't override
"""
resp = self.client.get(
reverse('signin_user'),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('<div id="login-and-registration-container"', resp.content)
resp = self.client.get(
reverse('register_user'),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('<div id="login-and-registration-container"', resp.content)
class AccountCreationTestCaseWithSiteOverrides(SiteMixin, TestCase):
"""
Test cases for Feature flag ALLOW_PUBLIC_ACCOUNT_CREATION which when
turned off disables the account creation options in lms
"""
def setUp(self):
"""Set up the tests"""
super(AccountCreationTestCaseWithSiteOverrides, self).setUp()
# Set the feature flag ALLOW_PUBLIC_ACCOUNT_CREATION to False
self.site_configuration_values = {
'ALLOW_PUBLIC_ACCOUNT_CREATION': False
}
self.site_domain = 'testserver1.com'
self.set_up_site(self.site_domain, self.site_configuration_values)
def test_register_option_login_page(self):
"""
Navigate to the login page and check the Register option is hidden when
ALLOW_PUBLIC_ACCOUNT_CREATION flag is turned off
"""
response = self.client.get(reverse('signin_user'))
self.assertNotIn('<a class="btn-neutral" href="/register?next=%2Fdashboard">Register</a>',
response.content)
|
agpl-3.0
|
Orav/kbengine
|
kbe/res/scripts/common/Lib/test/test_frame.py
|
2
|
4693
|
import gc
import sys
import types
import unittest
import weakref
from test import support
class ClearTest(unittest.TestCase):
"""
Tests for frame.clear().
"""
def inner(self, x=5, **kwargs):
1/0
def outer(self, **kwargs):
try:
self.inner(**kwargs)
except ZeroDivisionError as e:
exc = e
return exc
def clear_traceback_frames(self, tb):
"""
Clear all frames in a traceback.
"""
while tb is not None:
tb.tb_frame.clear()
tb = tb.tb_next
def test_clear_locals(self):
class C:
pass
c = C()
wr = weakref.ref(c)
exc = self.outer(c=c)
del c
support.gc_collect()
# A reference to c is held through the frames
self.assertIsNot(None, wr())
self.clear_traceback_frames(exc.__traceback__)
support.gc_collect()
# The reference was released by .clear()
self.assertIs(None, wr())
def test_clear_generator(self):
endly = False
def g():
nonlocal endly
try:
yield
inner()
finally:
endly = True
gen = g()
next(gen)
self.assertFalse(endly)
# Clearing the frame closes the generator
gen.gi_frame.clear()
self.assertTrue(endly)
def test_clear_executing(self):
# Attempting to clear an executing frame is forbidden.
try:
1/0
except ZeroDivisionError as e:
f = e.__traceback__.tb_frame
with self.assertRaises(RuntimeError):
f.clear()
with self.assertRaises(RuntimeError):
f.f_back.clear()
def test_clear_executing_generator(self):
# Attempting to clear an executing generator frame is forbidden.
endly = False
def g():
nonlocal endly
try:
1/0
except ZeroDivisionError as e:
f = e.__traceback__.tb_frame
with self.assertRaises(RuntimeError):
f.clear()
with self.assertRaises(RuntimeError):
f.f_back.clear()
yield f
finally:
endly = True
gen = g()
f = next(gen)
self.assertFalse(endly)
# Clearing the frame closes the generator
f.clear()
self.assertTrue(endly)
@support.cpython_only
def test_clear_refcycles(self):
# .clear() doesn't leave any refcycle behind
with support.disable_gc():
class C:
pass
c = C()
wr = weakref.ref(c)
exc = self.outer(c=c)
del c
self.assertIsNot(None, wr())
self.clear_traceback_frames(exc.__traceback__)
self.assertIs(None, wr())
class FrameLocalsTest(unittest.TestCase):
"""
Tests for the .f_locals attribute.
"""
def make_frames(self):
def outer():
x = 5
y = 6
def inner():
z = x + 2
1/0
t = 9
return inner()
try:
outer()
except ZeroDivisionError as e:
tb = e.__traceback__
frames = []
while tb:
frames.append(tb.tb_frame)
tb = tb.tb_next
return frames
def test_locals(self):
f, outer, inner = self.make_frames()
outer_locals = outer.f_locals
self.assertIsInstance(outer_locals.pop('inner'), types.FunctionType)
self.assertEqual(outer_locals, {'x': 5, 'y': 6})
inner_locals = inner.f_locals
self.assertEqual(inner_locals, {'x': 5, 'z': 7})
def test_clear_locals(self):
# Test f_locals after clear() (issue #21897)
f, outer, inner = self.make_frames()
outer.clear()
inner.clear()
self.assertEqual(outer.f_locals, {})
self.assertEqual(inner.f_locals, {})
def test_locals_clear_locals(self):
# Test f_locals before and after clear() (to exercise caching)
f, outer, inner = self.make_frames()
outer.f_locals
inner.f_locals
outer.clear()
inner.clear()
self.assertEqual(outer.f_locals, {})
self.assertEqual(inner.f_locals, {})
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
lgpl-3.0
|
Ubuntu-Solutions-Engineering/glance-simplestreams-sync-charm
|
hooks/charmhelpers/contrib/storage/linux/loopback.py
|
8
|
1698
|
import os
import re
from subprocess import (
check_call,
check_output,
)
##################################################
# loopback device helpers.
##################################################
def loopback_devices():
'''
Parse through 'losetup -a' output to determine currently mapped
loopback devices. Output is expected to look like:
/dev/loop0: [0807]:961814 (/tmp/my.img)
:returns: dict: a dict mapping {loopback_dev: backing_file}
'''
loopbacks = {}
cmd = ['losetup', '-a']
devs = [d.strip().split(' ') for d in
check_output(cmd).splitlines() if d != '']
for dev, _, f in devs:
loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
return loopbacks
def create_loopback(file_path):
'''
Create a loopback device for a given backing file.
:returns: str: Full path to new loopback device (eg, /dev/loop0)
'''
file_path = os.path.abspath(file_path)
check_call(['losetup', '--find', file_path])
for d, f in loopback_devices().iteritems():
if f == file_path:
return d
def ensure_loopback_device(path, size):
'''
Ensure a loopback device exists for a given backing file path and size.
If it a loopback device is not mapped to file, a new one will be created.
TODO: Confirm size of found loopback device.
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
'''
for d, f in loopback_devices().iteritems():
if f == path:
return d
if not os.path.exists(path):
cmd = ['truncate', '--size', size, path]
check_call(cmd)
return create_loopback(path)
|
agpl-3.0
|
indie1982/osmc-fixes
|
package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x0a2.py
|
253
|
4503
|
data = (
'kax', # 0x00
'ka', # 0x01
'kap', # 0x02
'kuox', # 0x03
'kuo', # 0x04
'kuop', # 0x05
'kot', # 0x06
'kox', # 0x07
'ko', # 0x08
'kop', # 0x09
'ket', # 0x0a
'kex', # 0x0b
'ke', # 0x0c
'kep', # 0x0d
'kut', # 0x0e
'kux', # 0x0f
'ku', # 0x10
'kup', # 0x11
'kurx', # 0x12
'kur', # 0x13
'ggit', # 0x14
'ggix', # 0x15
'ggi', # 0x16
'ggiex', # 0x17
'ggie', # 0x18
'ggiep', # 0x19
'ggat', # 0x1a
'ggax', # 0x1b
'gga', # 0x1c
'ggap', # 0x1d
'gguot', # 0x1e
'gguox', # 0x1f
'gguo', # 0x20
'gguop', # 0x21
'ggot', # 0x22
'ggox', # 0x23
'ggo', # 0x24
'ggop', # 0x25
'gget', # 0x26
'ggex', # 0x27
'gge', # 0x28
'ggep', # 0x29
'ggut', # 0x2a
'ggux', # 0x2b
'ggu', # 0x2c
'ggup', # 0x2d
'ggurx', # 0x2e
'ggur', # 0x2f
'mgiex', # 0x30
'mgie', # 0x31
'mgat', # 0x32
'mgax', # 0x33
'mga', # 0x34
'mgap', # 0x35
'mguox', # 0x36
'mguo', # 0x37
'mguop', # 0x38
'mgot', # 0x39
'mgox', # 0x3a
'mgo', # 0x3b
'mgop', # 0x3c
'mgex', # 0x3d
'mge', # 0x3e
'mgep', # 0x3f
'mgut', # 0x40
'mgux', # 0x41
'mgu', # 0x42
'mgup', # 0x43
'mgurx', # 0x44
'mgur', # 0x45
'hxit', # 0x46
'hxix', # 0x47
'hxi', # 0x48
'hxip', # 0x49
'hxiet', # 0x4a
'hxiex', # 0x4b
'hxie', # 0x4c
'hxiep', # 0x4d
'hxat', # 0x4e
'hxax', # 0x4f
'hxa', # 0x50
'hxap', # 0x51
'hxuot', # 0x52
'hxuox', # 0x53
'hxuo', # 0x54
'hxuop', # 0x55
'hxot', # 0x56
'hxox', # 0x57
'hxo', # 0x58
'hxop', # 0x59
'hxex', # 0x5a
'hxe', # 0x5b
'hxep', # 0x5c
'ngiex', # 0x5d
'ngie', # 0x5e
'ngiep', # 0x5f
'ngat', # 0x60
'ngax', # 0x61
'nga', # 0x62
'ngap', # 0x63
'nguot', # 0x64
'nguox', # 0x65
'nguo', # 0x66
'ngot', # 0x67
'ngox', # 0x68
'ngo', # 0x69
'ngop', # 0x6a
'ngex', # 0x6b
'nge', # 0x6c
'ngep', # 0x6d
'hit', # 0x6e
'hiex', # 0x6f
'hie', # 0x70
'hat', # 0x71
'hax', # 0x72
'ha', # 0x73
'hap', # 0x74
'huot', # 0x75
'huox', # 0x76
'huo', # 0x77
'huop', # 0x78
'hot', # 0x79
'hox', # 0x7a
'ho', # 0x7b
'hop', # 0x7c
'hex', # 0x7d
'he', # 0x7e
'hep', # 0x7f
'wat', # 0x80
'wax', # 0x81
'wa', # 0x82
'wap', # 0x83
'wuox', # 0x84
'wuo', # 0x85
'wuop', # 0x86
'wox', # 0x87
'wo', # 0x88
'wop', # 0x89
'wex', # 0x8a
'we', # 0x8b
'wep', # 0x8c
'zit', # 0x8d
'zix', # 0x8e
'zi', # 0x8f
'zip', # 0x90
'ziex', # 0x91
'zie', # 0x92
'ziep', # 0x93
'zat', # 0x94
'zax', # 0x95
'za', # 0x96
'zap', # 0x97
'zuox', # 0x98
'zuo', # 0x99
'zuop', # 0x9a
'zot', # 0x9b
'zox', # 0x9c
'zo', # 0x9d
'zop', # 0x9e
'zex', # 0x9f
'ze', # 0xa0
'zep', # 0xa1
'zut', # 0xa2
'zux', # 0xa3
'zu', # 0xa4
'zup', # 0xa5
'zurx', # 0xa6
'zur', # 0xa7
'zyt', # 0xa8
'zyx', # 0xa9
'zy', # 0xaa
'zyp', # 0xab
'zyrx', # 0xac
'zyr', # 0xad
'cit', # 0xae
'cix', # 0xaf
'ci', # 0xb0
'cip', # 0xb1
'ciet', # 0xb2
'ciex', # 0xb3
'cie', # 0xb4
'ciep', # 0xb5
'cat', # 0xb6
'cax', # 0xb7
'ca', # 0xb8
'cap', # 0xb9
'cuox', # 0xba
'cuo', # 0xbb
'cuop', # 0xbc
'cot', # 0xbd
'cox', # 0xbe
'co', # 0xbf
'cop', # 0xc0
'cex', # 0xc1
'ce', # 0xc2
'cep', # 0xc3
'cut', # 0xc4
'cux', # 0xc5
'cu', # 0xc6
'cup', # 0xc7
'curx', # 0xc8
'cur', # 0xc9
'cyt', # 0xca
'cyx', # 0xcb
'cy', # 0xcc
'cyp', # 0xcd
'cyrx', # 0xce
'cyr', # 0xcf
'zzit', # 0xd0
'zzix', # 0xd1
'zzi', # 0xd2
'zzip', # 0xd3
'zziet', # 0xd4
'zziex', # 0xd5
'zzie', # 0xd6
'zziep', # 0xd7
'zzat', # 0xd8
'zzax', # 0xd9
'zza', # 0xda
'zzap', # 0xdb
'zzox', # 0xdc
'zzo', # 0xdd
'zzop', # 0xde
'zzex', # 0xdf
'zze', # 0xe0
'zzep', # 0xe1
'zzux', # 0xe2
'zzu', # 0xe3
'zzup', # 0xe4
'zzurx', # 0xe5
'zzur', # 0xe6
'zzyt', # 0xe7
'zzyx', # 0xe8
'zzy', # 0xe9
'zzyp', # 0xea
'zzyrx', # 0xeb
'zzyr', # 0xec
'nzit', # 0xed
'nzix', # 0xee
'nzi', # 0xef
'nzip', # 0xf0
'nziex', # 0xf1
'nzie', # 0xf2
'nziep', # 0xf3
'nzat', # 0xf4
'nzax', # 0xf5
'nza', # 0xf6
'nzap', # 0xf7
'nzuox', # 0xf8
'nzuo', # 0xf9
'nzox', # 0xfa
'nzop', # 0xfb
'nzex', # 0xfc
'nze', # 0xfd
'nzux', # 0xfe
'nzu', # 0xff
)
|
gpl-2.0
|
laszlocsomor/tensorflow
|
tensorflow/contrib/tpu/python/tpu/util.py
|
48
|
1148
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""Utilities for the functionalities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
def check_positive_integer(value, name):
"""Checks whether `value` is a positive integer."""
if not isinstance(value, six.integer_types):
raise TypeError('{} must be int, got {}'.format(name, type(value)))
if value <= 0:
raise ValueError('{} must be positive, got {}'.format(name, value))
|
apache-2.0
|
kargakis/test-infra
|
gubernator/pb_glance.py
|
22
|
2862
|
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A tiny, minimal protobuf2 parser that's able to extract enough information
to be useful.
"""
import cStringIO as StringIO
def parse_protobuf(data, schema=None):
"""
Do a simple parse of a protobuf2 given minimal type information.
Args:
data: a string containing the encoded protocol buffer.
schema: a dict containing information about each field number.
The keys are field numbers, and the values represent:
- str: the name of the field
- dict: schema to recursively decode an embedded message.
May contain a 'name' key to name the field.
Returns:
dict: mapping from fields to values. The fields may be strings instead of
numbers if schema named them, and the value will *always* be
a list of values observed for that key.
"""
if schema is None:
schema = {}
buf = StringIO.StringIO(data)
def read_varint():
out = 0
shift = 0
c = 0x80
while c & 0x80:
c = ord(buf.read(1))
out = out | ((c & 0x7f) << shift)
shift += 7
return out
values = {}
while buf.tell() < len(data):
key = read_varint()
wire_type = key & 0b111
field_number = key >> 3
field_name = field_number
if wire_type == 0:
value = read_varint()
elif wire_type == 1: # 64-bit
value = buf.read(8)
elif wire_type == 2: # length-delim
length = read_varint()
value = buf.read(length)
if isinstance(schema.get(field_number), basestring):
field_name = schema[field_number]
elif field_number in schema:
# yes, I'm using dynamic features of a dynamic language.
# pylint: disable=redefined-variable-type
value = parse_protobuf(value, schema[field_number])
field_name = schema[field_number].get('name', field_name)
elif wire_type == 5: # 32-bit
value = buf.read(4)
else:
raise ValueError('unhandled wire type %d' % wire_type)
values.setdefault(field_name, []).append(value)
return values
|
apache-2.0
|
Woile/commitizen
|
commitizen/commands/commit.py
|
1
|
2734
|
import contextlib
import os
import tempfile
import questionary
from commitizen import factory, git, out
from commitizen.config import BaseConfig
from commitizen.cz.exceptions import CzException
from commitizen.exceptions import (
CommitError,
CustomError,
DryRunExit,
NoAnswersError,
NoCommitBackupError,
NotAGitProjectError,
NothingToCommitError,
)
class Commit:
"""Show prompt for the user to create a guided commit."""
def __init__(self, config: BaseConfig, arguments: dict):
if not git.is_git_project():
raise NotAGitProjectError()
self.config: BaseConfig = config
self.cz = factory.commiter_factory(self.config)
self.arguments = arguments
self.temp_file: str = os.path.join(
tempfile.gettempdir(),
"cz.commit{user}.backup".format(user=os.environ.get("USER", "")),
)
def read_backup_message(self) -> str:
# Check the commit backup file exists
if not os.path.isfile(self.temp_file):
raise NoCommitBackupError()
# Read commit message from backup
with open(self.temp_file, "r") as f:
return f.read().strip()
def prompt_commit_questions(self) -> str:
# Prompt user for the commit message
cz = self.cz
questions = cz.questions()
try:
answers = questionary.prompt(questions, style=cz.style)
except ValueError as err:
root_err = err.__context__
if isinstance(root_err, CzException):
raise CustomError(root_err.__str__())
raise err
if not answers:
raise NoAnswersError()
return cz.message(answers)
def __call__(self):
dry_run: bool = self.arguments.get("dry_run")
if git.is_staging_clean() and not dry_run:
raise NothingToCommitError("No files added to staging!")
retry: bool = self.arguments.get("retry")
if retry:
m = self.read_backup_message()
else:
m = self.prompt_commit_questions()
out.info(f"\n{m}\n")
if dry_run:
raise DryRunExit()
c = git.commit(m)
if c.return_code != 0:
out.error(c.err)
# Create commit backup
with open(self.temp_file, "w") as f:
f.write(m)
raise CommitError()
if "nothing added" in c.out or "no changes added to commit" in c.out:
out.error(c.out)
else:
with contextlib.suppress(FileNotFoundError):
os.remove(self.temp_file)
out.write(c.err)
out.write(c.out)
out.success("Commit successful!")
|
mit
|
CUCWD/edx-platform
|
lms/djangoapps/verify_student/tests/test_ssencrypt.py
|
19
|
4149
|
"""
Tests of the encryption and decryption utilities in the ssencrypt module.
"""
import base64
from lms.djangoapps.verify_student.ssencrypt import (
aes_decrypt,
aes_encrypt,
decode_and_decrypt,
encrypt_and_encode,
rsa_decrypt,
rsa_encrypt
)
AES_KEY_BYTES = b'32fe72aaf2abb44de9e161131b5435c8d37cbdb6f5df242ae860b283115f2dae'.decode('hex')
# Make up some garbage keys for testing purposes.
PUB_KEY_BYTES = b"""-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1hLVjP0oV0Uy/+jQ+Upz
c+eYc4Pyflb/WpfgYATggkoQdnsdplmvPtQr85+utgqKPxOh+PvYGW8QNUzjLIu4
5/GlmvBa82i1jRMgEAxGI95bz7j9DtH+7mnj+06zR5xHwT49jK0zMs5MjMaz5WRq
BUNkz7dxWzDrYJZQx230sPp6upy1Y5H5O8SnJVdghsh8sNciS4Bo4ZONQ3giBwxz
h5svjspz1MIsOoShjbAdfG+4VX7sVwYlw2rnQeRsMH5/xpnNeqtScyOMoz0N9UDG
dtRMNGa2MihAg7zh7/zckbUrtf+o5wQtlCJL1Kdj4EjshqYvCxzWnSM+MaYAjb3M
EQIDAQAB
-----END PUBLIC KEY-----"""
PRIV_KEY_BYTES = b"""-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEA1hLVjP0oV0Uy/+jQ+Upzc+eYc4Pyflb/WpfgYATggkoQdnsd
plmvPtQr85+utgqKPxOh+PvYGW8QNUzjLIu45/GlmvBa82i1jRMgEAxGI95bz7j9
DtH+7mnj+06zR5xHwT49jK0zMs5MjMaz5WRqBUNkz7dxWzDrYJZQx230sPp6upy1
Y5H5O8SnJVdghsh8sNciS4Bo4ZONQ3giBwxzh5svjspz1MIsOoShjbAdfG+4VX7s
VwYlw2rnQeRsMH5/xpnNeqtScyOMoz0N9UDGdtRMNGa2MihAg7zh7/zckbUrtf+o
5wQtlCJL1Kdj4EjshqYvCxzWnSM+MaYAjb3MEQIDAQABAoIBAQCviuA87fdfoOoS
OerrEacc20QDLaby/QoGUtZ2RmmHzY40af7FQ3PWFIw6Ca5trrTwxnuivXnWWWG0
I2mCRM0Kvfgr1n7ubOW7WnyHTFlT3mnxK2Ov/HmNLZ36nO2cgkXA6/Xy3rBGMC9L
nUE1kSLzT/Fh965ntfS9zmVNNBhb6no0rVkGx5nK3vTI6kUmaa0m+E7KL/HweO4c
JodhN8CX4gpxSrkuwJ7IHEPYspqc0jInMYKLmD3d2g3BiOctjzFmaj3lV5AUlujW
z7/LVe5WAEaaxjwaMvwqrJLv9ogxWU3etJf22+Yy7r5gbPtqpqJrCZ5+WpGnUHws
3mMGP2QBAoGBAOc3pzLFgGUREVPSFQlJ06QFtfKYqg9fFHJCgWu/2B2aVZc2aO/t
Zhuoz+AgOdzsw+CWv7K0FH9sUkffk2VKPzwwwufLK3avD9gI0bhmBAYvdhS6A3nO
YM3W+lvmaJtFL00K6kdd+CzgRnBS9cZ70WbcbtqjdXI6+mV1WdGUTLhBAoGBAO0E
xhD4z+GjubSgfHYEZPgRJPqyUIfDH+5UmFGpr6zlvNN/depaGxsbhW8t/V6xkxsG
MCgic7GLMihEiUMx1+/snVs5bBUx7OT9API0d+vStHCFlTTe6aTdmiduFD4PbDsq
6E4DElVRqZhpIYusdDh7Z3fO2hm5ad4FfMlx65/RAoGAPYEfV7ETs06z9kEG2X6q
7pGaUZrsecRH8xDfzmKswUshg2S0y0WyCJ+CFFNeMPdGL4LKIWYnobGVvYqqcaIr
af5qijAQMrTkmQnXh56TaXXMijzk2czdEUQjOrjykIL5zxudMDi94GoUMqLOv+qF
zD/MuRoMDsPDgaOSrd4t/kECgYEAzwBNT8NOIz3P0Z4cNSJPYIvwpPaY+IkE2SyO
vzuYj0Mx7/Ew9ZTueXVGyzv6PfqOhJqZ8mNscZIlIyAAVWwxsHwRTfvPlo882xzP
97i1R4OFTYSNNFi+69sSZ/9utGjZ2K73pjJuj487tD2VK5xZAH9edTd2KeNSP7LB
MlpJNBECgYAmIswPdldm+G8SJd5j9O2fcDVTURjKAoSXCv2j4gEZzzfudpLWNHYu
l8N6+LEIVTMAytPk+/bImHvGHKZkCz5rEMSuYJWOmqKI92rUtI6fz5DUb3XSbrwT
3W+sdGFUK3GH1NAX71VxbAlFVLUetcMwai1+wXmGkRw6A7YezVFnhw==
-----END RSA PRIVATE KEY-----"""
def test_aes():
def assert_roundtrip(data):
"""
Verify that the original data is retrieved after encrypting and
decrypting, and again when also using base64 encoding/decoding.
"""
assert aes_decrypt(aes_encrypt(data, AES_KEY_BYTES), AES_KEY_BYTES) == data
assert decode_and_decrypt(encrypt_and_encode(data, AES_KEY_BYTES), AES_KEY_BYTES) == data
assert_roundtrip(b"Hello World!")
assert_roundtrip(b"1234567890123456") # AES block size, padding corner case
# Longer string
assert_roundtrip(b"12345678901234561234567890123456123456789012345601")
assert_roundtrip(b"")
assert_roundtrip(b"\xe9\xe1a\x13\x1bT5\xc8") # Random, non-ASCII text
def test_rsa():
_assert_rsa(AES_KEY_BYTES, PUB_KEY_BYTES, PRIV_KEY_BYTES)
def test_rsa_unicode_data():
data = u'12345678901234567890123456789012'
_assert_rsa(data, PUB_KEY_BYTES, PRIV_KEY_BYTES)
def test_rsa_unicode_keys():
_assert_rsa(AES_KEY_BYTES, PUB_KEY_BYTES.decode('utf-8'), PRIV_KEY_BYTES.decode('utf-8'))
def _assert_rsa(data, public_key, private_key):
"""
Assert that the original provided data is retrieved after RSA encryption
and decryption using the given keys.
"""
encrypted_data = rsa_encrypt(data, public_key)
assert rsa_decrypt(encrypted_data, private_key) == data
# Even though our test data is only 32 bytes, RSA encryption will make it 256
# bytes, and base64 encoding will blow that up to 344
assert len(base64.urlsafe_b64encode(encrypted_data)) == 344
|
agpl-3.0
|
dongsenfo/pymatgen
|
pymatgen/core/ion.py
|
2
|
6398
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Module containing class to create an ion
"""
__author__ = "Sai Jayaraman"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.0"
__maintainer__ = "Sai Jayaraman"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "Dec 10, 2012"
import re
import numpy as np
from pymatgen.core.composition import Composition
from monty.json import MSONable
from pymatgen.util.string import formula_double_format
class Ion(Composition, MSONable):
"""
Basic ion object. It is just a Composition object with an additional
variable to store charge.
The net charge can either be represented as Mn++, or Mn+2, or Mn[2+].
Note the order of the sign and magnitude in each representation.
"""
def __init__(self, composition, charge=0.0, properties=None):
"""
Flexible Ion construction, similar to Composition.
For more information, please see pymatgen.core.Composition
"""
super().__init__(composition)
self._charge = charge
@classmethod
def from_formula(cls, formula):
charge = 0.0
f = formula
m = re.search(r"\[([^\[\]]+)\]", f)
if m:
m_chg = re.search(r"([\.\d]*)([+-])", m.group(1))
if m_chg:
if m_chg.group(1) != "":
charge += float(m_chg.group(1)) * \
(float(m_chg.group(2) + "1"))
else:
charge += float(m_chg.group(2) + "1")
f = f.replace(m.group(), "", 1)
m = re.search(r"\(aq\)", f)
if m:
f = f.replace(m.group(), "", 1)
for m_chg in re.finditer(r"([+-])([\.\d]*)", f):
sign = m_chg.group(1)
sgn = float(str(sign + "1"))
if m_chg.group(2).strip() != "":
charge += float(m_chg.group(2)) * sgn
else:
charge += sgn
f = f.replace(m_chg.group(), "", 1)
composition = Composition(f)
return cls(composition, charge)
@property
def formula(self):
"""
Returns a formula string, with elements sorted by electronegativity,
e.g., Li4 Fe4 P4 O16.
"""
formula = super().formula
chg_str = ""
if self.charge > 0:
chg_str = " +" + formula_double_format(self.charge, False)
elif self._charge < 0:
chg_str = " " + formula_double_format(self.charge, False)
return formula + chg_str
@property
def anonymized_formula(self):
"""
An anonymized formula. Appends charge to the end
of anonymized composition
"""
anon_formula = super().anonymized_formula
chg = self._charge
chg_str = ""
if chg > 0:
chg_str += ("{}{}".format('+', str(int(chg))))
elif chg < 0:
chg_str += ("{}{}".format('-', str(int(np.abs(chg)))))
return anon_formula + chg_str
@property
def reduced_formula(self):
"""
Returns a reduced formula string with appended charge.
"""
reduced_formula = super().reduced_formula
charge = self._charge / self.get_reduced_composition_and_factor()[1]
if charge > 0:
if abs(charge) == 1:
chg_str = "[+]"
else:
chg_str = "[" + formula_double_format(charge, False) + "+]"
elif charge < 0:
if abs(charge) == 1:
chg_str = "[-]"
else:
chg_str = "[{}-]".format(formula_double_format(abs(charge),
False))
else:
chg_str = "(aq)"
return reduced_formula + chg_str
@property
def alphabetical_formula(self):
"""
Returns a reduced formula string with appended charge
"""
alph_formula = super().alphabetical_formula
chg_str = ""
if self.charge > 0:
chg_str = " +" + formula_double_format(self.charge, False)
elif self.charge < 0:
chg_str = " " + formula_double_format(self.charge, False)
return alph_formula + chg_str
@property
def charge(self):
"""
Charge of the ion
"""
return self._charge
def as_dict(self):
"""
Returns:
dict with composition, as well as charge
"""
d = super().as_dict()
d['charge'] = self.charge
return d
@classmethod
def from_dict(cls, d):
"""
Generates an ion object from a dict created by as_dict().
Args:
d:
{symbol: amount} dict.
"""
charge = d.pop('charge')
composition = Composition(d)
return Ion(composition, charge)
@property
def to_reduced_dict(self):
"""
Returns:
dict with element symbol and reduced amount e.g.,
{"Fe": 2.0, "O":3.0}.
"""
d = self.composition.to_reduced_dict
d['charge'] = self.charge
return d
@property
def composition(self):
return Composition(self._data)
def __eq__(self, other):
if self.composition != other.composition:
return False
if self.charge != other.charge:
return False
return True
def __add__(self, other):
"""
Addition of two ions.
"""
new_composition = self.composition + other.composition
new_charge = self.charge + other.charge
return Ion(new_composition, new_charge)
def __sub__(self, other):
"""
Subtraction of two ions
"""
new_composition = self.composition - other.composition
new_charge = self.charge - other.charge
return Ion(new_composition, new_charge)
def __mul__(self, other):
"""
Multiplication of an Ion with a factor
"""
new_composition = self.composition * other
new_charge = self.charge * other
return Ion(new_composition, new_charge)
def __hash__(self):
return hash((self.composition, self.charge))
def __str__(self):
return self.formula
def __repr__(self):
return "Ion: " + self.formula
|
mit
|
mbayon/TFG-MachineLearning
|
vbig/lib/python2.7/site-packages/pandas/tests/series/test_indexing.py
|
3
|
88164
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
import pandas._libs.index as _index
from pandas.core.dtypes.common import is_integer, is_scalar
from pandas import (Index, Series, DataFrame, isnull,
date_range, NaT, MultiIndex,
Timestamp, DatetimeIndex, Timedelta)
from pandas.core.indexing import IndexingError
from pandas.tseries.offsets import BDay
from pandas._libs import tslib, lib
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import (slow,
assert_series_equal,
assert_almost_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData):
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
assert result == expected
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
assert result == expected
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
assert result == 'Missing'
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
assert result == 3
result = vc.get(True, default='Missing')
assert result == 'Missing'
def test_get_nan(self):
# GH 8569
s = pd.Float64Index(range(10)).to_series()
assert s.get(np.nan) is None
assert s.get(np.nan, default='Missing') == 'Missing'
# ensure that fixing the above hasn't broken get
# with multiple elements
idx = [20, 30]
assert_series_equal(s.get(idx),
Series([np.nan] * 2, index=idx))
idx = [np.nan, np.nan]
assert_series_equal(s.get(idx),
Series([np.nan] * 2, index=idx))
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
pytest.raises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
assert (result == 5).all()
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
pytest.raises(IndexError, s.__getitem__, -11)
pytest.raises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
assert result == 4
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
assert self.series[idx1] == self.series.get(idx1)
assert self.objSeries[idx2] == self.objSeries.get(idx2)
assert self.series[idx1] == self.series[5]
assert self.objSeries[idx2] == self.objSeries[5]
assert self.series.get(-1) == self.series.get(self.series.index[-1])
assert self.series[5] == self.series.get(self.series.index[5])
# missing
d = self.ts.index[0] - BDay()
pytest.raises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
assert result is None
def test_iloc(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.loc[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
assert (s[1:3] == 0).all()
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iloc_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
assert s.iloc[2] == 2
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
assert (s == 0).all()
s[:-12] = 5
assert (s == 0).all()
def test_getitem_int64(self):
idx = np.int64(5)
assert self.ts[idx] == self.ts[5]
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
assert self.series.index[2] == slice1.index[1]
assert self.objSeries.index[2] == slice2.index[1]
assert self.series[2] == slice1[1]
assert self.objSeries[2] == slice2[1]
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
tm.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
assert s.index.name == 'index_name'
assert s.dtype == np.int64
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
pytest.raises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
pytest.raises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
pytest.raises(Exception, s.__getitem__, omask)
pytest.raises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
pytest.raises(Exception, ts.__getitem__, mask_shifted)
pytest.raises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
pytest.raises(Exception, ts.loc.__getitem__, mask_shifted)
pytest.raises(Exception, ts.loc.__setitem__, mask_shifted, 1)
# ts.loc[mask_shifted]
# ts.loc[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
assert (s[:4] == 0).all()
assert not (s[4:] == 0).any()
def test_getitem_setitem_datetime_tz_pytz(self):
tm._skip_if_no_pytz()
from pytz import timezone as tz
from pandas import date_range
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
# comparison dates with datetime MUST be localized!
date = tz('US/Central').localize(datetime(1990, 1, 1, 3))
result[date] = 0
result[date] = ts[4]
assert_series_equal(result, ts)
def test_getitem_setitem_datetime_tz_dateutil(self):
tm._skip_if_no_dateutil()
from dateutil.tz import tzutc
from pandas._libs.tslib import _dateutil_gettz as gettz
tz = lambda x: tzutc() if x == 'UTC' else gettz(
x) # handle special case for utc in dateutil
from pandas import date_range
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H',
tz='America/New_York')
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result[datetime(1990, 1, 1, 3, tzinfo=tz('America/Chicago'))] = 0
result[datetime(1990, 1, 1, 3, tzinfo=tz('America/Chicago'))] = ts[4]
assert_series_equal(result, ts)
def test_getitem_setitem_datetimeindex(self):
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04:00:00"]
expected = ts[4]
assert result == expected
result = ts.copy()
result["1990-01-01 04:00:00"] = 0
result["1990-01-01 04:00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts["1990-01-01 04:00:00":"1990-01-01 07:00:00"]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = 0
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = ts[4:8]
assert_series_equal(result, ts)
lb = "1990-01-01 04:00:00"
rb = "1990-01-01 07:00:00"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
# repeat all the above with naive datetimes
result = ts[datetime(1990, 1, 1, 4)]
expected = ts[4]
assert result == expected
result = ts.copy()
result[datetime(1990, 1, 1, 4)] = 0
result[datetime(1990, 1, 1, 4)] = ts[4]
assert_series_equal(result, ts)
result = ts[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = 0
result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = ts[4:8]
assert_series_equal(result, ts)
lb = datetime(1990, 1, 1, 4)
rb = datetime(1990, 1, 1, 7)
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts[ts.index[4]]
expected = ts[4]
assert result == expected
result = ts[ts.index[4:8]]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result[4:8] = ts[4:8]
assert_series_equal(result, ts)
# also test partial date slicing
result = ts["1990-01-02"]
expected = ts[24:48]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-02"] = 0
result["1990-01-02"] = ts[24:48]
assert_series_equal(result, ts)
def test_getitem_setitem_periodindex(self):
from pandas import period_range
N = 50
rng = period_range('1/1/1990', periods=N, freq='H')
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04"]
expected = ts[4]
assert result == expected
result = ts.copy()
result["1990-01-01 04"] = 0
result["1990-01-01 04"] = ts[4]
assert_series_equal(result, ts)
result = ts["1990-01-01 04":"1990-01-01 07"]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04":"1990-01-01 07"] = 0
result["1990-01-01 04":"1990-01-01 07"] = ts[4:8]
assert_series_equal(result, ts)
lb = "1990-01-01 04"
rb = "1990-01-01 07"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
# GH 2782
result = ts[ts.index[4]]
expected = ts[4]
assert result == expected
result = ts[ts.index[4:8]]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result[4:8] = ts[4:8]
assert_series_equal(result, ts)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
pytest.raises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
pytest.raises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
assert s.iloc[0] == s['a']
s.iloc[0] = 5
tm.assert_almost_equal(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
assert isinstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
pytest.raises(KeyError, s.__getitem__, 1)
pytest.raises(KeyError, s.loc.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
assert is_scalar(obj['c'])
assert obj['c'] == 0
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .loc internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.loc[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
pytest.raises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
assert result == s.loc['A']
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.loc[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.loc[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
assert self.series.index[9] not in numSlice.index
assert self.objSeries.index[9] not in objSlice.index
assert len(numSlice) == len(numSlice.index)
assert self.series[numSlice.index[0]] == numSlice[numSlice.index[0]]
assert numSlice.index[1] == self.series.index[11]
assert tm.equalContents(numSliceEnd, np.array(self.series)[-10:])
# Test return view.
sl = self.series[10:20]
sl[:] = 0
assert (self.series[10:20] == 0).all()
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
pytest.raises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
pytest.raises(TypeError, f)
pytest.raises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
pytest.raises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
assert len(s.loc[12.0:]) == 8
assert len(s.loc[12.5:]) == 7
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
assert len(s.loc[12.0:]) == 8
assert len(s.loc[12.5:]) == 7
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
assert np.isnan(self.ts[6])
assert np.isnan(self.ts[2])
self.ts[np.isnan(self.ts)] = 5
assert not np.isnan(self.ts[2])
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
assert (series[::2] == 0).all()
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
assert res is self.ts
assert self.ts[idx] == 0
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
assert res is s
assert res.index[-1] == 'foobar'
assert res['foobar'] == 0
s = self.series.copy()
s.loc['foobar'] = 0
assert s.index[-1] == 'foobar'
assert s['foobar'] == 0
def test_setslice(self):
sl = self.ts[5:20]
assert len(sl) == len(sl.index)
assert sl.index.is_unique
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assert_raises_regex(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assert_raises_regex(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
pytest.raises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
pytest.raises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.loc[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
assert result == expected
result = s.iloc[0]
assert result == expected
result = s['a']
assert result == expected
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.loc[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.loc[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.loc[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.loc[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
pytest.raises(Exception, s.__setitem__, inds_notfound, 0)
pytest.raises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
assert result == expected
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
assert result == expected
s2 = s.copy()
s2['a'] = expected
result = s2['a']
assert result == expected
def test_loc_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.loc[inds], self.series.reindex(inds))
assert_series_equal(self.series.iloc[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.loc[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.loc[mask], self.series[mask])
# ask for index value
assert self.ts.loc[d1] == self.ts[d1]
assert self.ts.loc[d2] == self.ts[d2]
def test_loc_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
pytest.raises(KeyError, ts2.loc.__getitem__, slice(d1, d2))
pytest.raises(KeyError, ts2.loc.__setitem__, slice(d1, d2), 0)
def test_loc_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).all()
# so is this
cp = s.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = s.iloc[2:6]
result2 = s.loc[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
pytest.raises(KeyError, s2.loc.__getitem__, slice(3, 11))
pytest.raises(KeyError, s2.loc.__setitem__, slice(3, 11), 0)
def test_loc_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.loc[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
assert orig.dtype == 'datetime64[ns, {0}]'.format(tz)
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
assert vals.dtype == 'datetime64[ns, {0}]'.format(tz)
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
assert orig.dtype == 'datetime64[ns, {0}]'.format(tz)
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00-04:00', tz=tz),
pd.Timestamp('2011-01-01 00:00-05:00', tz=tz),
pd.Timestamp('2016-11-06 01:00-05:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
assert vals.dtype == 'datetime64[ns, {0}]'.format(tz)
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.iloc[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
pytest.raises(ValueError, s.where, 1)
pytest.raises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
pytest.raises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
pytest.raises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
assert s.dtype == expected.dtype
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
assert s.dtype == expected.dtype
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
pytest.raises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
assert s.dtype == expected.dtype
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
pytest.raises(ValueError, f)
def f():
s[mask] = [0] * 5
pytest.raises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
assert isnull(result)
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_array_like(self):
# see gh-15414
s = Series([1, 2, 3])
cond = [False, True, True]
expected = Series([np.nan, 2, 3])
klasses = [list, tuple, np.array, Series]
for klass in klasses:
result = s.where(klass(cond))
assert_series_equal(result, expected)
def test_where_invalid_input(self):
# see gh-15414: only boolean arrays accepted
s = Series([1, 2, 3])
msg = "Boolean array expected for the condition"
conds = [
[1, 0, 1],
Series([2, 5, 7]),
["True", "False", "True"],
[Timestamp("2017-01-01"),
pd.NaT, Timestamp("2017-01-02")]
]
for cond in conds:
with tm.assert_raises_regex(ValueError, msg):
s.where(cond)
msg = "Array conditional must be same shape as self"
with tm.assert_raises_regex(ValueError, msg):
s.where([True])
def test_where_ndframe_align(self):
msg = "Array conditional must be same shape as self"
s = Series([1, 2, 3])
cond = [True]
with tm.assert_raises_regex(ValueError, msg):
s.where(cond)
expected = Series([1, np.nan, np.nan])
out = s.where(Series(cond))
tm.assert_series_equal(out, expected)
cond = np.array([False, True, False, True])
with tm.assert_raises_regex(ValueError, msg):
s.where(cond)
expected = Series([np.nan, 2, np.nan])
out = s.where(Series(cond))
tm.assert_series_equal(out, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
pytest.raises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
pytest.raises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
pytest.raises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(27))
pytest.raises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(2))
pytest.raises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)), 'b', 'c'])
assert_series_equal(s, expected)
def test_where_broadcast(self):
# Test a variety of differently sized series
for size in range(2, 6):
# Test a variety of boolean indices
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
# Test a variety of different numbers as content
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
# Test numpy arrays, lists and tuples as the input to be
# broadcast
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
s[selection] = arr
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, arr)
assert_series_equal(result, expected)
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan],
index=[0, 1, 2, 0, 1, 2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
def test_where_datetime(self):
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10], dtype='datetime64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='datetime64[ns]')
assert_series_equal(rs, expected)
# GH 15701
timestamps = ['2016-12-31 12:00:04+00:00',
'2016-12-31 12:00:04.010000+00:00']
s = Series([pd.Timestamp(t) for t in timestamps])
rs = s.where(Series([False, True]))
expected = Series([pd.NaT, s[1]])
assert_series_equal(rs, expected)
def test_where_timedelta(self):
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10], dtype='timedelta64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='timedelta64[ns]')
assert_series_equal(rs, expected)
def test_mask(self):
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
pytest.raises(ValueError, s.mask, 1)
pytest.raises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.mask(s > 2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
def test_mask_broadcast(self):
# GH 8801
# copied from test_where_broadcast
for size in range(2, 6):
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
result = s.mask(selection, arr)
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(result, expected)
def test_mask_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
assert_series_equal(rs.dropna(), s[~cond])
assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
assert_series_equal(rs, s.mask(cond, -s))
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.loc[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.iloc[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.loc[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.loc[d1] = 4
self.series.loc[d2] = 6
assert self.series[d1] == 4
assert self.series[d2] == 6
def test_where_numeric_with_string(self):
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s > 1, 'X')
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == 'object'
w = s.where(s > 1, ['X', 'Y', 'Z'])
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == 'object'
w = s.where(s > 1, np.array(['X', 'Y', 'Z']))
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == 'object'
def test_setitem_boolean(self):
mask = self.series > self.series.median()
# similiar indexed series
result = self.series.copy()
result[mask] = self.series * 2
expected = self.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = self.series.copy()
result[mask] = (self.series * 2)[0:5]
expected = (self.series * 2)[0:5].reindex_like(self.series)
expected[-mask] = self.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.loc[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.loc[inds] = 5
pytest.raises(Exception, self.series.loc.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.sort_values()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# get's coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8,
9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
pytest.raises(IndexError, s.__getitem__, 5)
pytest.raises(IndexError, s.__setitem__, 5, 0)
pytest.raises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
pytest.raises(IndexError, s.__getitem__, 5)
pytest.raises(IndexError, s.__setitem__, 5, 0)
def test_int_indexing(self):
s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
pytest.raises(KeyError, s.__getitem__, 5)
pytest.raises(KeyError, s.__getitem__, 'c')
# not monotonic
s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
pytest.raises(KeyError, s.__getitem__, 5)
pytest.raises(KeyError, s.__getitem__, 'c')
def test_datetime_indexing(self):
from pandas import date_range
index = date_range('1/1/2000', '1/7/2000')
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp('1/8/2000')
pytest.raises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
assert s[stamp] == 0
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
pytest.raises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
assert s[stamp] == 0
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta('1 days'), index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s, Series(
[np.nan, Timedelta('1 days')], index=['A', 'B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'), index=['A', 'B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
# GH 14155
s = Series(10 * [np.timedelta64(10, 'm')])
s.loc[[1, 2, 3]] = np.timedelta64(20, 'm')
expected = pd.Series(10 * [np.timedelta64(10, 'm')])
expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, 'm'))
tm.assert_series_equal(s, expected)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame(dict((c, [1, 2, 3]) for c in ['a', 'b', 'c']))
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2, 2, 2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(
dict(a=[1, 2, 3], b=[1, 2, 3], c=[1, 2, 3], val=[0, 1, 0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df, expected)
# GH 3970
# these are chained assignments as well
pd.set_option('chained_assignment', None)
df = DataFrame({"aa": range(5), "bb": [2.2] * 5})
df["cc"] = 0.0
ck = [True] * len(df)
df["bb"].iloc[0] = .13
# TODO: unused
df_tmp = df.iloc[ck] # noqa
df["bb"].iloc[0] = .15
assert df['bb'].iloc[0] == 0.15
pd.set_option('chained_assignment', 'raise')
# GH 3217
df = DataFrame(dict(a=[1, 3], b=[np.nan, 2]))
df['c'] = np.nan
df['c'].update(pd.Series(['foo'], index=[0]))
expected = DataFrame(dict(a=[1, 3], b=[np.nan, 2], c=['foo', np.nan]))
tm.assert_frame_equal(df, expected)
def test_preserveRefs(self):
seq = self.ts[[5, 10, 15]]
seq[1] = np.NaN
assert not np.isnan(self.ts[10])
def test_drop(self):
# unique
s = Series([1, 2], index=['one', 'two'])
expected = Series([1], index=['one'])
result = s.drop(['two'])
assert_series_equal(result, expected)
result = s.drop('two', axis='rows')
assert_series_equal(result, expected)
# non-unique
# GH 5248
s = Series([1, 1, 2], index=['one', 'two', 'one'])
expected = Series([1, 2], index=['one', 'one'])
result = s.drop(['two'], axis=0)
assert_series_equal(result, expected)
result = s.drop('two')
assert_series_equal(result, expected)
expected = Series([1], index=['two'])
result = s.drop(['one'])
assert_series_equal(result, expected)
result = s.drop('one')
assert_series_equal(result, expected)
# single string/tuple-like
s = Series(range(3), index=list('abc'))
pytest.raises(ValueError, s.drop, 'bc')
pytest.raises(ValueError, s.drop, ('a', ))
# errors='ignore'
s = Series(range(3), index=list('abc'))
result = s.drop('bc', errors='ignore')
assert_series_equal(result, s)
result = s.drop(['a', 'd'], errors='ignore')
expected = s.iloc[1:]
assert_series_equal(result, expected)
# bad axis
pytest.raises(ValueError, s.drop, 'one', axis='columns')
# GH 8522
s = Series([2, 3], index=[True, False])
assert s.index.is_object()
result = s.drop(True)
expected = Series([3], index=[False])
assert_series_equal(result, expected)
def test_align(self):
def _check_align(a, b, how='left', fill=None):
aa, ab = a.align(b, join=how, fill_value=fill)
join_index = a.index.join(b.index, how=how)
if fill is not None:
diff_a = aa.index.difference(join_index)
diff_b = ab.index.difference(join_index)
if len(diff_a) > 0:
assert (aa.reindex(diff_a) == fill).all()
if len(diff_b) > 0:
assert (ab.reindex(diff_b) == fill).all()
ea = a.reindex(join_index)
eb = b.reindex(join_index)
if fill is not None:
ea = ea.fillna(fill)
eb = eb.fillna(fill)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
assert aa.name == 'ts'
assert ea.name == 'ts'
assert ab.name == 'ts'
assert eb.name == 'ts'
for kind in JOIN_TYPES:
_check_align(self.ts[2:], self.ts[:-5], how=kind)
_check_align(self.ts[2:], self.ts[:-5], how=kind, fill=-1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind)
_check_align(self.ts[:0], self.ts[:-5], how=kind, fill=-1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind)
_check_align(self.ts[:-5], self.ts[:0], how=kind, fill=-1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind)
_check_align(self.ts[:0], self.ts[:0], how=kind, fill=-1)
def test_align_fill_method(self):
def _check_align(a, b, how='left', method='pad', limit=None):
aa, ab = a.align(b, join=how, method=method, limit=limit)
join_index = a.index.join(b.index, how=how)
ea = a.reindex(join_index)
eb = b.reindex(join_index)
ea = ea.fillna(method=method, limit=limit)
eb = eb.fillna(method=method, limit=limit)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
for kind in JOIN_TYPES:
for meth in ['pad', 'bfill']:
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth,
limit=1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth,
limit=1)
def test_align_nocopy(self):
b = self.ts[:5].copy()
# do copy
a = self.ts.copy()
ra, _ = a.align(b, join='left')
ra[:5] = 5
assert not (a[:5] == 5).any()
# do not copy
a = self.ts.copy()
ra, _ = a.align(b, join='left', copy=False)
ra[:5] = 5
assert (a[:5] == 5).all()
# do copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right')
rb[:3] = 5
assert not (b[:3] == 5).any()
# do not copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right', copy=False)
rb[:2] = 5
assert (b[:2] == 5).all()
def test_align_same_index(self):
a, b = self.ts.align(self.ts, copy=False)
assert a.index is self.ts.index
assert b.index is self.ts.index
a, b = self.ts.align(self.ts, copy=True)
assert a.index is not self.ts.index
assert b.index is not self.ts.index
def test_align_multiindex(self):
# GH 10665
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
s1 = pd.Series(np.arange(12, dtype='int64'), index=midx)
s2 = pd.Series(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = s1.align(s2, join='left')
res2l, res2r = s2.align(s1, join='right')
expl = s1
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
res1l, res1r = s1.align(s2, join='right')
res2l, res2r = s2.align(s1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
def test_reindex(self):
identity = self.series.reindex(self.series.index)
# __array_interface__ is not defined for older numpies
# and on some pythons
try:
assert np.may_share_memory(self.series.index, identity.index)
except AttributeError:
pass
assert identity.index.is_(self.series.index)
assert identity.index.identical(self.series.index)
subIndex = self.series.index[10:20]
subSeries = self.series.reindex(subIndex)
for idx, val in compat.iteritems(subSeries):
assert val == self.series[idx]
subIndex2 = self.ts.index[10:20]
subTS = self.ts.reindex(subIndex2)
for idx, val in compat.iteritems(subTS):
assert val == self.ts[idx]
stuffSeries = self.ts.reindex(subIndex)
assert np.isnan(stuffSeries).all()
# This is extremely important for the Cython code to not screw up
nonContigIndex = self.ts.index[::2]
subNonContig = self.ts.reindex(nonContigIndex)
for idx, val in compat.iteritems(subNonContig):
assert val == self.ts[idx]
# return a copy the same index here
result = self.ts.reindex()
assert not (result is self.ts)
def test_reindex_nan(self):
ts = Series([2, 3, 5, 7], index=[1, 4, nan, 8])
i, j = [nan, 1, nan, 8, 4, nan], [2, 0, 2, 3, 1, 2]
assert_series_equal(ts.reindex(i), ts.iloc[j])
ts.index = ts.index.astype('object')
# reindex coerces index.dtype to float, loc/iloc doesn't
assert_series_equal(ts.reindex(i), ts.iloc[j], check_index_type=False)
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
assert np.issubdtype(result.dtype, np.dtype('M8[ns]'))
mask = result.isnull()
assert mask[-5:].all()
assert not mask[:-5].any()
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_reindex_corner(self):
# (don't forget to fix this) I think it's fixed
self.empty.reindex(self.ts.index, method='pad') # it works
# corner case: pad empty series
reindexed = self.empty.reindex(self.ts.index, method='pad')
# pass non-Index
reindexed = self.ts.reindex(list(self.ts.index))
assert_series_equal(self.ts, reindexed)
# bad fill method
ts = self.ts[::2]
pytest.raises(Exception, ts.reindex, self.ts.index, method='foo')
def test_reindex_pad(self):
s = Series(np.arange(10), dtype='int64')
s2 = s[::2]
reindexed = s2.reindex(s.index, method='pad')
reindexed2 = s2.reindex(s.index, method='ffill')
assert_series_equal(reindexed, reindexed2)
expected = Series([0, 0, 2, 2, 4, 4, 6, 6, 8, 8], index=np.arange(10))
assert_series_equal(reindexed, expected)
# GH4604
s = Series([1, 2, 3, 4, 5], index=['a', 'b', 'c', 'd', 'e'])
new_index = ['a', 'g', 'c', 'f']
expected = Series([1, 1, 3, 3], index=new_index)
# this changes dtype because the ffill happens after
result = s.reindex(new_index).ffill()
assert_series_equal(result, expected.astype('float64'))
result = s.reindex(new_index).ffill(downcast='infer')
assert_series_equal(result, expected)
expected = Series([1, 5, 3, 5], index=new_index)
result = s.reindex(new_index, method='ffill')
assert_series_equal(result, expected)
# inferrence of new dtype
s = Series([True, False, False, True], index=list('abcd'))
new_index = 'agc'
result = s.reindex(list(new_index)).ffill()
expected = Series([True, True, False], index=list(new_index))
assert_series_equal(result, expected)
# GH4618 shifted series downcasting
s = Series(False, index=lrange(0, 5))
result = s.shift(1).fillna(method='bfill')
expected = Series(False, index=lrange(0, 5))
assert_series_equal(result, expected)
def test_reindex_nearest(self):
s = Series(np.arange(10, dtype='int64'))
target = [0.1, 0.9, 1.5, 2.0]
actual = s.reindex(target, method='nearest')
expected = Series(np.around(target).astype('int64'), target)
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest')
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest', tolerance=1)
assert_series_equal(expected, actual)
actual = s.reindex(target, method='nearest', tolerance=0.2)
expected = Series([0, 1, np.nan, 2], target)
assert_series_equal(expected, actual)
def test_reindex_backfill(self):
pass
def test_reindex_int(self):
ts = self.ts[::2]
int_ts = Series(np.zeros(len(ts), dtype=int), index=ts.index)
# this should work fine
reindexed_int = int_ts.reindex(self.ts.index)
# if NaNs introduced
assert reindexed_int.dtype == np.float_
# NO NaNs introduced
reindexed_int = int_ts.reindex(int_ts.index[::2])
assert reindexed_int.dtype == np.int_
def test_reindex_bool(self):
# A series other than float, int, string, or object
ts = self.ts[::2]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
# this should work fine
reindexed_bool = bool_ts.reindex(self.ts.index)
# if NaNs introduced
assert reindexed_bool.dtype == np.object_
# NO NaNs introduced
reindexed_bool = bool_ts.reindex(bool_ts.index[::2])
assert reindexed_bool.dtype == np.bool_
def test_reindex_bool_pad(self):
# fail
ts = self.ts[5:]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
filled_bool = bool_ts.reindex(self.ts.index, method='pad')
assert isnull(filled_bool[:5]).all()
def test_reindex_like(self):
other = self.ts[::2]
assert_series_equal(self.ts.reindex(other.index),
self.ts.reindex_like(other))
# GH 7179
day1 = datetime(2013, 3, 5)
day2 = datetime(2013, 5, 5)
day3 = datetime(2014, 3, 5)
series1 = Series([5, None, None], [day1, day2, day3])
series2 = Series([None, None], [day1, day3])
result = series1.reindex_like(series2, method='pad')
expected = Series([5, np.nan], index=[day1, day3])
assert_series_equal(result, expected)
def test_reindex_fill_value(self):
# -----------------------------------------------------------
# floats
floats = Series([1., 2., 3.])
result = floats.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
result = floats.reindex([1, 2, 3], fill_value=0)
expected = Series([2., 3., 0], index=[1, 2, 3])
assert_series_equal(result, expected)
# -----------------------------------------------------------
# ints
ints = Series([1, 2, 3])
result = ints.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
# don't upcast
result = ints.reindex([1, 2, 3], fill_value=0)
expected = Series([2, 3, 0], index=[1, 2, 3])
assert issubclass(result.dtype.type, np.integer)
assert_series_equal(result, expected)
# -----------------------------------------------------------
# objects
objects = Series([1, 2, 3], dtype=object)
result = objects.reindex([1, 2, 3])
expected = Series([2, 3, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = objects.reindex([1, 2, 3], fill_value='foo')
expected = Series([2, 3, 'foo'], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
# ------------------------------------------------------------
# bools
bools = Series([True, False, True])
result = bools.reindex([1, 2, 3])
expected = Series([False, True, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = bools.reindex([1, 2, 3], fill_value=False)
expected = Series([False, True, False], index=[1, 2, 3])
assert_series_equal(result, expected)
def test_select(self):
n = len(self.ts)
result = self.ts.select(lambda x: x >= self.ts.index[n // 2])
expected = self.ts.reindex(self.ts.index[n // 2:])
assert_series_equal(result, expected)
result = self.ts.select(lambda x: x.weekday() == 2)
expected = self.ts[self.ts.index.weekday == 2]
assert_series_equal(result, expected)
def test_cast_on_putmask(self):
# GH 2746
# need to upcast
s = Series([1, 2], index=[1, 2], dtype='int64')
s[[True, False]] = Series([0], index=[1], dtype='int64')
expected = Series([0, 2], index=[1, 2], dtype='int64')
assert_series_equal(s, expected)
def test_type_promote_putmask(self):
# GH8387: test that changing types does not break alignment
ts = Series(np.random.randn(100), index=np.arange(100, 0, -1)).round(5)
left, mask = ts.copy(), ts > 0
right = ts[mask].copy().map(str)
left[mask] = right
assert_series_equal(left, ts.map(lambda t: str(t) if t > 0 else t))
s = Series([0, 1, 2, 0])
mask = s > 0
s2 = s[mask].map(str)
s[mask] = s2
assert_series_equal(s, Series([0, '1', '2', 0]))
s = Series([0, 'foo', 'bar', 0])
mask = Series([False, True, True, False])
s2 = s[mask]
s[mask] = s2
assert_series_equal(s, Series([0, 'foo', 'bar', 0]))
def test_head_tail(self):
assert_series_equal(self.series.head(), self.series[:5])
assert_series_equal(self.series.head(0), self.series[0:0])
assert_series_equal(self.series.tail(), self.series[-5:])
assert_series_equal(self.series.tail(0), self.series[0:0])
def test_multilevel_preserve_name(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(np.random.randn(len(index)), index=index, name='sth')
result = s['foo']
result2 = s.loc['foo']
assert result.name == s.name
assert result2.name == s.name
def test_setitem_scalar_into_readonly_backing_data(self):
# GH14359: test that you cannot mutate a read only buffer
array = np.zeros(5)
array.flags.writeable = False # make the array immutable
series = Series(array)
for n in range(len(series)):
with pytest.raises(ValueError):
series[n] = 1
assert array[n] == 0
def test_setitem_slice_into_readonly_backing_data(self):
# GH14359: test that you cannot mutate a read only buffer
array = np.zeros(5)
array.flags.writeable = False # make the array immutable
series = Series(array)
with pytest.raises(ValueError):
series[1:3] = 1
assert not array.any()
class TestTimeSeriesDuplicates(object):
def setup_method(self, method):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
assert isinstance(self.dups, Series)
assert isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
assert not self.dups.index.is_unique
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
assert uniques.dtype == 'M8[ns]' # sanity
tm.assert_index_equal(uniques, expected)
assert self.dups.index.nunique() == 4
# #2563
assert isinstance(uniques, DatetimeIndex)
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, name='foo')
expected = expected.tz_localize('US/Eastern')
assert result.tz is not None
assert result.name == 'foo'
tm.assert_index_equal(result, expected)
# NaT, note this is excluded
arr = [1370745748 + t for t in range(20)] + [tslib.iNaT]
idx = DatetimeIndex(arr * 3)
tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))
assert idx.nunique() == 20
assert idx.nunique(dropna=False) == 21
arr = [Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t)
for t in range(20)] + [NaT]
idx = DatetimeIndex(arr * 3)
tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))
assert idx.nunique() == 20
assert idx.nunique(dropna=False) == 21
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
assert d in ix
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
pytest.raises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000, 1, 6)] = 0
assert ts[datetime(2000, 1, 6)] == 0
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
assert timestamp in df.index
# it works!
df.loc[timestamp]
assert len(df.loc[[timestamp]]) > 0
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(np.random.rand(len(rng)), index=rng)
ts2 = pd.concat([ts[0:4], ts[-4:], ts[4:-4]])
for t in ts.index:
# TODO: unused?
s = str(t) # noqa
expected = ts[t]
result = ts2[t]
assert expected == result
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result, expected)
compare(slice('2011-01-01', '2011-01-15'))
compare(slice('2010-12-30', '2011-01-15'))
compare(slice('2011-01-01', '2011-01-16'))
# partial ranges
compare(slice('2011-01-01', '2011-01-6'))
compare(slice('2011-01-06', '2011-01-8'))
compare(slice('2011-01-06', '2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result, expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
assert t.year == 2005
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)), index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
expected.name = 'A'
df = DataFrame(dict(A=ts))
result = df['2001']['A']
assert_series_equal(expected, result)
# setting
ts['2001'] = 1
expected = ts['2001']
expected.name = 'A'
df.loc['2001', 'A'] = 1
result = df['2001']['A']
assert_series_equal(expected, result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00',
freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected, ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59',
freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected, ts)
idx = [Timestamp('2013-05-31 00:00'),
Timestamp(datetime(2013, 5, 31, 23, 59, 59, 999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected, ts)
# GH14826, indexing with a seconds resolution string / datetime object
df = DataFrame(np.random.rand(5, 5),
columns=['open', 'high', 'low', 'close', 'volume'],
index=date_range('2012-01-02 18:01:00',
periods=5, tz='US/Central', freq='s'))
expected = df.loc[[df.index[2]]]
# this is a single date, so will raise
pytest.raises(KeyError, df.__getitem__, '2012-01-02 18:01:02', )
pytest.raises(KeyError, df.__getitem__, df.index[2], )
class TestDatetimeIndexing(object):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setup_method(self, method):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
assert s[48] == 48
assert s['1/2/2009'] == 48
assert s['2009-1-2'] == 48
assert s[datetime(2009, 1, 2)] == 48
assert s[lib.Timestamp(datetime(2009, 1, 2))] == 48
pytest.raises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
assert s[48] == -1
s['1/2/2009'] = -2
assert s[48] == -2
s['1/2/2009':'2009-06-05'] = -3
assert (s[48:54] == -3).all()
def test_dti_snap(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
res = dti.snap(freq='W-MON')
exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
exp = exp.repeat([3, 4])
assert (res == exp).all()
res = dti.snap(freq='B')
exp = date_range('1/1/2002', '1/7/2002', freq='b')
exp = exp.repeat([1, 1, 1, 2, 2])
assert (res == exp).all()
def test_dti_reset_index_round_trip(self):
dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D')
d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
assert d2.dtypes[0] == np.dtype('M8[ns]')
d3 = d2.set_index('index')
assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value'])
df = df.set_index('Date')
assert df.index[0] == stamp
assert df.reset_index()['Date'][0] == stamp
def test_series_set_value(self):
# #1561
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
index = DatetimeIndex(dates)
s = Series().set_value(dates[0], 1.)
s2 = s.set_value(dates[1], np.nan)
exp = Series([1., np.nan], index=index)
assert_series_equal(s2, exp)
# s = Series(index[:1], index[:1])
# s2 = s.set_value(dates[1], index[1])
# assert s2.values.dtype == 'M8[ns]'
@slow
def test_slice_locs_indexerror(self):
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10)
for i in range(100000)]
s = Series(lrange(100000), times)
s.loc[datetime(1900, 1, 1):datetime(2100, 1, 1)]
def test_slicing_datetimes(self):
# GH 7523
# unique
df = DataFrame(np.arange(4., dtype='float64'),
index=[datetime(2001, 1, i, 10, 00)
for i in [1, 2, 3, 4]])
result = df.loc[datetime(2001, 1, 1, 10):]
assert_frame_equal(result, df)
result = df.loc[:datetime(2001, 1, 4, 10)]
assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 10):datetime(2001, 1, 4, 10)]
assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 11):]
expected = df.iloc[1:]
assert_frame_equal(result, expected)
result = df.loc['20010101 11':]
assert_frame_equal(result, expected)
# duplicates
df = pd.DataFrame(np.arange(5., dtype='float64'),
index=[datetime(2001, 1, i, 10, 00)
for i in [1, 2, 2, 3, 4]])
result = df.loc[datetime(2001, 1, 1, 10):]
assert_frame_equal(result, df)
result = df.loc[:datetime(2001, 1, 4, 10)]
assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 10):datetime(2001, 1, 4, 10)]
assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 11):]
expected = df.iloc[1:]
assert_frame_equal(result, expected)
result = df.loc['20010101 11':]
assert_frame_equal(result, expected)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
assert (-result).all()
tst = DataFrame({'date': dates})
result = tst.duplicated()
assert (-result).all()
class TestNatIndexing(object):
def setup_method(self, method):
self.series = Series(date_range('1/1/2000', periods=10))
# ---------------------------------------------------------------------
# NaT support
def test_set_none_nan(self):
self.series[3] = None
assert self.series[3] is NaT
self.series[3:5] = None
assert self.series[4] is NaT
self.series[5] = np.nan
assert self.series[5] is NaT
self.series[5:7] = np.nan
assert self.series[6] is NaT
def test_nat_operations(self):
# GH 8617
s = Series([0, pd.NaT], dtype='m8[ns]')
exp = s[0]
assert s.median() == exp
assert s.min() == exp
assert s.max() == exp
def test_round_nat(self):
# GH14940
s = Series([pd.NaT])
expected = Series(pd.NaT)
for method in ["round", "floor", "ceil"]:
round_method = getattr(s.dt, method)
for freq in ["s", "5s", "min", "5min", "h", "5h"]:
assert_series_equal(round_method(freq), expected)
|
mit
|
yousafsyed/casperjs
|
bin/Lib/test/test_threading.py
|
72
|
36686
|
"""
Tests for the threading module.
"""
import test.support
from test.support import verbose, strip_python_stderr, import_module, cpython_only
from test.script_helper import assert_python_ok
import random
import re
import sys
_thread = import_module('_thread')
threading = import_module('threading')
import time
import unittest
import weakref
import os
from test.script_helper import assert_python_ok, assert_python_failure
import subprocess
from test import lock_tests
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertTrue(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertTrue(self.nrunning.get() >= 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertEqual(t.ident, None)
self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertTrue(not t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assertTrue(re.match('<TestThread\(.*, stopped -?\d+\)>',
repr(t)))
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
_thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256kB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1MB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
try:
result = set_async_exc(ctypes.c_long(tid), exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertTrue(not t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertFalse('daemon' in repr(t))
t.daemon = True
self.assertTrue('daemon' in repr(t))
def test_deamon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
sys.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
self.addCleanup(t.join)
pid = os.fork()
if pid == 0:
os._exit(1 if t.is_alive() else 0)
else:
pid, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
os.waitpid(pid, 0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
os.waitpid(pid, 0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=5), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertTrue(t._tstate_lock is None)
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "genereator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
in_f = open(os.__file__, 'rb')
stuff = in_f.read(200)
null_f = open(os.devnull, 'wb')
null_f.write(stuff)
time.sleep(random.random() / 1995)
null_f.close()
in_f.close()
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(0)
else:
os._exit(1)
else:
_, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
class Sleeper:
def __del__(self):
time.sleep(0.05)
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = r"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep(10)
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
@unittest.skipUnless(sys.platform == 'darwin' and test.support.python_is_optimized(),
'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RuntimeError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
if __name__ == "__main__":
unittest.main()
|
mit
|
Bioinformatics-Support-Unit/python-scripts
|
zic1/zic1_correlation.py
|
1
|
2362
|
import sys
from scipy.stats.stats import pearsonr
import matplotlib
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.mlab as mlab
def main(f):
fh = open(f, 'r')
zic1_expression = []
other_expression = {}
for line in fh:
tokens = line.split('\t')
if line.startswith('206373_at'):
i = 0
while i < len(tokens):
if (i-1) % 4 == 0:
zic1_expression.append(tokens[i])
i+=1
zic1_expression = map(float, zic1_expression)
elif line.startswith('Scan'):
pass
else:
other_expression[tokens[0]] = []
i = 0
while i < len(tokens):
if (i-1) % 4 == 0:
other_expression[tokens[0]].append(tokens[i])
i+=1
other_expression[tokens[0]] = map(float, other_expression[tokens[0]])
plotting(zic1_expression, other_expression)
def plotting(zic1,comparators):
"""docstring for plotting"""
from mapping import probe_map
for key in comparators.keys():
corr = pearsonr(zic1, comparators[key])
#the string of correlation stats
s = 'R = '+str(corr[0])+'\nP = '+str(corr[1])
# Create a figure with size 6 x 6 inches.
fig = Figure(figsize=(6,6))
# Create a canvas and add the figure to it.
canvas = FigureCanvas(fig)
# Create a subplot.
ax = fig.add_subplot(111)
# Set the title.
ax.set_title(s,fontsize=10)
# Set the X Axis label.
ax.set_xlabel('Samples',fontsize=8)
# Set the Y Axis label.
ax.set_ylabel('Normalized Expression',fontsize=8)
# Display Grid.
ax.grid(True,linestyle='-',color='0.75')
# Generate the Scatter Plot.
ax.plot(range(1,25), zic1, 'go-', label=probe_map['206373_at'])
ax.plot(range(1,25), comparators[key], 'r^-', label=probe_map[key])
# add the legend
ax.legend()
#ax.text(0.1,max(zic1),s)
# Save the generated Scatter Plot to a PNG file.
canvas.print_figure('correlations/'+key+'.png',dpi=500)
if __name__ == '__main__':
if sys.argv[1]:
main(sys.argv[1])
else:
main('processed_filtered.txt')
|
mit
|
Evervolv/android_external_chromium_org
|
ui/resources/PRESUBMIT.py
|
121
|
1397
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium UI resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl/git cl, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
resources = input_api.PresubmitLocalPath()
# List of paths with their associated scale factor. This is used to verify
# that the images modified in one are the correct scale of the other.
path_scales = [
[(100, 'default_100_percent/'), (200, 'default_200_percent/')],
]
import sys
old_path = sys.path
try:
sys.path = [resources] + old_path
from resource_check import resource_scale_factors
for paths in path_scales:
results.extend(resource_scale_factors.ResourceScaleFactors(
input_api, output_api, paths).RunChecks())
finally:
sys.path = old_path
return results
|
bsd-3-clause
|
proudlygeek/proudlygeek-blog
|
jinja2/testsuite/__init__.py
|
114
|
2758
|
# -*- coding: utf-8 -*-
"""
jinja2.testsuite
~~~~~~~~~~~~~~~~
All the unittests of Jinja2. These tests can be executed by
either running run-tests.py using multiple Python versions at
the same time.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import re
import sys
import unittest
from traceback import format_exception
from jinja2 import loaders
here = os.path.dirname(os.path.abspath(__file__))
dict_loader = loaders.DictLoader({
'justdict.html': 'FOO'
})
package_loader = loaders.PackageLoader('jinja2.testsuite.res', 'templates')
filesystem_loader = loaders.FileSystemLoader(here + '/res/templates')
function_loader = loaders.FunctionLoader({'justfunction.html': 'FOO'}.get)
choice_loader = loaders.ChoiceLoader([dict_loader, package_loader])
prefix_loader = loaders.PrefixLoader({
'a': filesystem_loader,
'b': dict_loader
})
class JinjaTestCase(unittest.TestCase):
### use only these methods for testing. If you need standard
### unittest method, wrap them!
def setup(self):
pass
def teardown(self):
pass
def setUp(self):
self.setup()
def tearDown(self):
self.teardown()
def assert_equal(self, a, b):
return self.assertEqual(a, b)
def assert_raises(self, *args, **kwargs):
return self.assertRaises(*args, **kwargs)
def assert_traceback_matches(self, callback, expected_tb):
try:
callback()
except Exception, e:
tb = format_exception(*sys.exc_info())
if re.search(expected_tb.strip(), ''.join(tb)) is None:
raise self.fail('Traceback did not match:\n\n%s\nexpected:\n%s'
% (''.join(tb), expected_tb))
else:
self.fail('Expected exception')
def suite():
from jinja2.testsuite import ext, filters, tests, core_tags, \
loader, inheritance, imports, lexnparse, security, api, \
regression, debug, utils, doctests
suite = unittest.TestSuite()
suite.addTest(ext.suite())
suite.addTest(filters.suite())
suite.addTest(tests.suite())
suite.addTest(core_tags.suite())
suite.addTest(loader.suite())
suite.addTest(inheritance.suite())
suite.addTest(imports.suite())
suite.addTest(lexnparse.suite())
suite.addTest(security.suite())
suite.addTest(api.suite())
suite.addTest(regression.suite())
suite.addTest(debug.suite())
suite.addTest(utils.suite())
# doctests will not run on python 3 currently. Too many issues
# with that, do not test that on that platform.
if sys.version_info < (3, 0):
suite.addTest(doctests.suite())
return suite
|
mit
|
dgreisen/u2db
|
u1db/vectorclock.py
|
2
|
3167
|
# Copyright 2011 Canonical Ltd.
#
# This file is part of u1db.
#
# u1db is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# u1db is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with u1db. If not, see <http://www.gnu.org/licenses/>.
"""VectorClockRev helper class."""
class VectorClockRev(object):
"""Track vector clocks for multiple replica ids.
This allows simple comparison to determine if one VectorClockRev is
newer/older/in-conflict-with another VectorClockRev without having to
examine history. Every replica has a strictly increasing revision. When
creating a new revision, they include all revisions for all other replicas
which the new revision dominates, and increment their own revision to
something greater than the current value.
"""
def __init__(self, value):
self._values = self._expand(value)
def __repr__(self):
s = self.as_str()
return '%s(%s)' % (self.__class__.__name__, s)
def as_str(self):
s = '|'.join(['%s:%d' % (m, r) for m, r
in sorted(self._values.items())])
return s
def _expand(self, value):
result = {}
if value is None:
return result
for replica_info in value.split('|'):
replica_uid, counter = replica_info.split(':')
counter = int(counter)
result[replica_uid] = counter
return result
def is_newer(self, other):
"""Is this VectorClockRev strictly newer than other.
"""
if not self._values:
return False
if not other._values:
return True
this_is_newer = False
other_expand = dict(other._values)
for key, value in self._values.iteritems():
if key in other_expand:
other_value = other_expand.pop(key)
if other_value > value:
return False
elif other_value < value:
this_is_newer = True
else:
this_is_newer = True
if other_expand:
return False
return this_is_newer
def increment(self, replica_uid):
"""Increase the 'replica_uid' section of this vector clock.
:return: A string representing the new vector clock value
"""
self._values[replica_uid] = self._values.get(replica_uid, 0) + 1
def maximize(self, other_vcr):
for replica_uid, counter in other_vcr._values.iteritems():
if replica_uid not in self._values:
self._values[replica_uid] = counter
else:
this_counter = self._values[replica_uid]
if this_counter < counter:
self._values[replica_uid] = counter
|
gpl-3.0
|
pleaseproject/python-for-android
|
python3-alpha/python3-src/Lib/lib2to3/fixes/fix_exitfunc.py
|
140
|
2497
|
"""
Convert use of sys.exitfunc to use the atexit module.
"""
# Author: Benjamin Peterson
from lib2to3 import pytree, fixer_base
from lib2to3.fixer_util import Name, Attr, Call, Comma, Newline, syms
class FixExitfunc(fixer_base.BaseFix):
keep_line_order = True
BM_compatible = True
PATTERN = """
(
sys_import=import_name<'import'
('sys'
|
dotted_as_names< (any ',')* 'sys' (',' any)* >
)
>
|
expr_stmt<
power< 'sys' trailer< '.' 'exitfunc' > >
'=' func=any >
)
"""
def __init__(self, *args):
super(FixExitfunc, self).__init__(*args)
def start_tree(self, tree, filename):
super(FixExitfunc, self).start_tree(tree, filename)
self.sys_import = None
def transform(self, node, results):
# First, find a the sys import. We'll just hope it's global scope.
if "sys_import" in results:
if self.sys_import is None:
self.sys_import = results["sys_import"]
return
func = results["func"].clone()
func.prefix = ""
register = pytree.Node(syms.power,
Attr(Name("atexit"), Name("register"))
)
call = Call(register, [func], node.prefix)
node.replace(call)
if self.sys_import is None:
# That's interesting.
self.warning(node, "Can't find sys import; Please add an atexit "
"import at the top of your file.")
return
# Now add an atexit import after the sys import.
names = self.sys_import.children[1]
if names.type == syms.dotted_as_names:
names.append_child(Comma())
names.append_child(Name("atexit", " "))
else:
containing_stmt = self.sys_import.parent
position = containing_stmt.children.index(self.sys_import)
stmt_container = containing_stmt.parent
new_import = pytree.Node(syms.import_name,
[Name("import"), Name("atexit", " ")]
)
new = pytree.Node(syms.simple_stmt, [new_import])
containing_stmt.insert_child(position + 1, Newline())
containing_stmt.insert_child(position + 2, new)
|
apache-2.0
|
Azulinho/ansible
|
test/units/modules/network/netscaler/test_netscaler_gslb_service.py
|
39
|
27775
|
# Copyright (c) 2017 Citrix Systems
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from ansible.compat.tests.mock import patch, Mock, MagicMock, call
from units.modules.utils import set_module_args
from .netscaler_module import TestModule, nitro_base_patcher
import sys
if sys.version_info[:2] != (2, 6):
import requests
class TestNetscalerGSLBSiteModule(TestModule):
@classmethod
def setUpClass(cls):
class MockException(Exception):
pass
cls.MockException = MockException
m = MagicMock()
nssrc_modules_mock = {
'nssrc.com.citrix.netscaler.nitro.resource.config.gslb': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbservice': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbservice.gslbservice': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbservice_lbmonitor_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbservice_lbmonitor_binding.gslbservice_lbmonitor_binding': m,
# The following are needed because of monkey_patch_nitro_api()
'nssrc.com.citrix.netscaler.nitro.resource.base': m,
'nssrc.com.citrix.netscaler.nitro.resource.base.Json': m,
'nssrc.com.citrix.netscaler.nitro.resource.base.Json.Json': m,
'nssrc.com.citrix.netscaler.nitro.util': m,
'nssrc.com.citrix.netscaler.nitro.util.nitro_util': m,
'nssrc.com.citrix.netscaler.nitro.util.nitro_util.nitro_util': m,
}
cls.nitro_specific_patcher = patch.dict(sys.modules, nssrc_modules_mock)
cls.nitro_base_patcher = nitro_base_patcher
@classmethod
def tearDownClass(cls):
cls.nitro_base_patcher.stop()
cls.nitro_specific_patcher.stop()
def setUp(self):
super(TestNetscalerGSLBSiteModule, self).setUp()
self.nitro_base_patcher.start()
self.nitro_specific_patcher.start()
# Setup minimal required arguments to pass AnsibleModule argument parsing
def tearDown(self):
super(TestNetscalerGSLBSiteModule, self).tearDown()
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
def test_graceful_nitro_api_import_error(self):
# Stop nitro api patching to cause ImportError
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
from ansible.modules.network.netscaler import netscaler_gslb_service
self.module = netscaler_gslb_service
result = self.failed()
self.assertEqual(result['msg'], 'Could not load nitro python sdk')
def test_graceful_nitro_error_on_login(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
client_mock = Mock()
client_mock.login = Mock(side_effect=MockException)
m = Mock(return_value=client_mock)
with patch('ansible.modules.network.netscaler.netscaler_gslb_service.get_nitro_client', m):
with patch('ansible.modules.network.netscaler.netscaler_gslb_service.nitro_exception', MockException):
self.module = netscaler_gslb_service
result = self.failed()
self.assertTrue(result['msg'].startswith('nitro exception'), msg='nitro exception during login not handled properly')
def test_graceful_no_connection_error(self):
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
class MockException(Exception):
pass
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.ConnectionError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
get_nitro_client=m,
nitro_exception=MockException,
):
self.module = netscaler_gslb_service
result = self.failed()
self.assertTrue(result['msg'].startswith('Connection error'), msg='Connection error was not handled gracefully')
def test_graceful_login_error(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
class MockException(Exception):
pass
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.SSLError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
get_nitro_client=m,
monkey_patch_nitro_api=Mock(),
nitro_exception=MockException,
):
self.module = netscaler_gslb_service
result = self.failed()
self.assertTrue(result['msg'].startswith('SSL Error'), msg='SSL Error was not handled gracefully')
def test_ensure_feature_is_enabled_called(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
gslb_service_proxy_mock = Mock()
ensure_feature_is_enabled_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
get_nitro_client=Mock(return_value=client_mock),
gslb_service_exists=Mock(side_effect=[False, True]),
gslb_service_identical=Mock(side_effect=[True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=ensure_feature_is_enabled_mock,
monkey_patch_nitro_api=Mock(),
ConfigProxy=Mock(return_value=gslb_service_proxy_mock),
):
self.module = netscaler_gslb_service
self.exited()
ensure_feature_is_enabled_mock.assert_called_with(client_mock, 'GSLB')
def test_save_config_called_on_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
gslb_service_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
get_nitro_client=m,
gslb_service_exists=Mock(side_effect=[False, True]),
gslb_service_identical=Mock(side_effect=[True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=Mock(return_value=gslb_service_proxy_mock),
):
self.module = netscaler_gslb_service
self.exited()
self.assertIn(call.save_config(), client_mock.mock_calls)
def test_save_config_called_on_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
gslb_service_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
get_nitro_client=m,
gslb_service_exists=Mock(side_effect=[True, False]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=Mock(return_value=gslb_service_proxy_mock),
):
self.module = netscaler_gslb_service
self.exited()
self.assertIn(call.save_config(), client_mock.mock_calls)
def test_save_config_not_called_on_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
gslb_service_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
get_nitro_client=m,
gslb_service_exists=Mock(side_effect=[False, True]),
gslb_service_identical=Mock(side_effect=[True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=Mock(return_value=gslb_service_proxy_mock),
):
self.module = netscaler_gslb_service
self.exited()
self.assertNotIn(call.save_config(), client_mock.mock_calls)
def test_save_config_not_called_on_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
gslb_service_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
get_nitro_client=m,
gslb_service_exists=Mock(side_effect=[True, False]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=Mock(return_value=gslb_service_proxy_mock),
):
self.module = netscaler_gslb_service
self.exited()
self.assertNotIn(call.save_config(), client_mock.mock_calls)
def test_new_gslb_site_execution_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_service_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_service_proxy_mock = Mock()
gslb_service_proxy_mock.configure_mock(**glsb_service_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_service_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
get_nitro_client=m,
gslb_service_exists=Mock(side_effect=[False, True]),
gslb_service_identical=Mock(side_effect=[True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_service
self.exited()
gslb_service_proxy_mock.assert_has_calls([call.add()])
def test_modified_gslb_site_execution_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_service_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_service_proxy_mock = Mock()
gslb_service_proxy_mock.configure_mock(**glsb_service_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_service_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_service_exists=Mock(side_effect=[True, True]),
gslb_service_identical=Mock(side_effect=[False, False, True]),
monitor_bindings_identical=Mock(side_effect=[True, True, True]),
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
nitro_exception=self.MockException,
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_service
self.exited()
gslb_service_proxy_mock.assert_has_calls([call.update()])
def test_absent_gslb_site_execution_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_service_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_service_proxy_mock = Mock()
gslb_service_proxy_mock.configure_mock(**glsb_service_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_service_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_service_exists=Mock(side_effect=[True, False]),
gslb_service_identical=Mock(side_effect=[False, True]),
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_service
self.exited()
gslb_service_proxy_mock.assert_has_calls([call.delete()])
def test_present_gslb_service_identical_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_service_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_service_proxy_mock = Mock()
gslb_service_proxy_mock.configure_mock(**glsb_service_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_service_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_service_exists=Mock(side_effect=[True, True]),
gslb_service_identical=Mock(side_effect=[True, True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_service
self.exited()
gslb_service_proxy_mock.assert_not_called()
def test_absent_gslb_site_noop_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_service_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_service_proxy_mock = Mock()
gslb_service_proxy_mock.configure_mock(**glsb_service_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_service_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_service_exists=Mock(side_effect=[False, False]),
gslb_service_identical=Mock(side_effect=[False, False]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_service
self.exited()
gslb_service_proxy_mock.assert_not_called()
def test_present_gslb_site_failed_update(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_service_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_service_proxy_mock = Mock()
gslb_service_proxy_mock.configure_mock(**glsb_service_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_service_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_service_exists=Mock(side_effect=[True, True]),
gslb_service_identical=Mock(side_effect=[False, False, False]),
monitor_bindings_identical=Mock(side_effect=[True, True, True]),
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_service
result = self.failed()
self.assertEqual(result['msg'], 'GSLB service differs from configured')
self.assertTrue(result['failed'])
def test_present_gslb_site_failed_monitor_bindings_update(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_service_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_service_proxy_mock = Mock()
gslb_service_proxy_mock.configure_mock(**glsb_service_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_service_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_service_exists=Mock(side_effect=[True, True]),
gslb_service_identical=Mock(side_effect=[False, False, True]),
monitor_bindings_identical=Mock(side_effect=[False, False, False]),
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_service
result = self.failed()
self.assertEqual(result['msg'], 'Monitor bindings differ from configured')
self.assertTrue(result['failed'])
def test_present_gslb_site_failed_create(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_service_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_service_proxy_mock = Mock()
gslb_service_proxy_mock.configure_mock(**glsb_service_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_service_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_service_exists=Mock(side_effect=[False, False]),
gslb_service_identical=Mock(side_effect=[False, False]),
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_service
result = self.failed()
self.assertEqual(result['msg'], 'GSLB service does not exist')
self.assertTrue(result['failed'])
def test_present_gslb_site_update_immutable_attribute(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_service_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_service_proxy_mock = Mock()
gslb_service_proxy_mock.configure_mock(**glsb_service_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_service_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=['domain']),
gslb_service_exists=Mock(side_effect=[True, True]),
gslb_service_identical=Mock(side_effect=[False, False]),
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_service
result = self.failed()
self.assertEqual(result['msg'], 'Cannot update immutable attributes [\'domain\']')
self.assertTrue(result['failed'])
def test_absent_gslb_site_failed_delete(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_service_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_service_proxy_mock = Mock()
gslb_service_proxy_mock.configure_mock(**glsb_service_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_service_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_service_exists=Mock(side_effect=[True, True]),
gslb_service_identical=Mock(side_effect=[False, False]),
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_service
result = self.failed()
self.assertEqual(result['msg'], 'GSLB service still exists')
self.assertTrue(result['failed'])
def test_graceful_nitro_exception_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
m = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
gslb_service_exists=m,
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
nitro_exception=MockException
):
self.module = netscaler_gslb_service
result = self.failed()
self.assertTrue(
result['msg'].startswith('nitro exception'),
msg='Nitro exception not caught on operation absent'
)
def test_graceful_nitro_exception_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_service
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
m = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_service',
gslb_service_exists=m,
ensure_feature_is_enabled=Mock(),
monkey_patch_nitro_api=Mock(),
nitro_exception=MockException
):
self.module = netscaler_gslb_service
result = self.failed()
self.assertTrue(
result['msg'].startswith('nitro exception'),
msg='Nitro exception not caught on operation absent'
)
|
gpl-3.0
|
heytcass/homeassistant-config
|
deps/fuzzywuzzy/process.py
|
1
|
11161
|
#!/usr/bin/env python
# encoding: utf-8
"""
process.py
Copyright (c) 2011 Adam Cohen
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from . import fuzz
from . import utils
import heapq
def extractWithoutOrder(query, choices, processor=None, scorer=None, score_cutoff=0):
"""Select the best match in a list or dictionary of choices.
Find best matches in a list or dictionary of choices, return a
generator of tuples containing the match and it's score. If a dictionary
is used, also returns the key for each match.
Arguments:
query: An object representing the thing we want to find.
choices: An iterable or dictionary-like object containing choices
to be matched against the query. Dictionary arguments of
{key: value} pairs will attempt to match the query against
each value.
processor: Optional function of the form f(a) -> b, where a is an
individual choice and b is the choice to be used in matching.
This can be used to match against, say, the first element of
a list:
lambda x: x[0]
Defaults to fuzzywuzzy.utils.full_process().
scorer: Optional function for scoring matches between the query and
an individual processed choice. This should be a function
of the form f(query, choice) -> int.
By default, fuzz.WRatio() is used and expects both query and
choice to be strings.
score_cutoff: Optional argument for score threshold. No matches with
a score less than this number will be returned. Defaults to 0.
Returns:
Generator of tuples containing the match and its score.
If a list is used for choices, then the result will be 2-tuples.
If a dictionary is used, then the result will be 3-tuples containing
he key for each match.
For example, searching for 'bird' in the dictionary
{'bard': 'train', 'dog': 'man'}
may return
('train', 22, 'bard'), ('man', 0, 'dog')
"""
def no_process(x):
return x
if choices is None:
raise StopIteration
# Catch generators without lengths
try:
if len(choices) == 0:
raise StopIteration
except TypeError:
pass
# default: wratio
if not scorer:
scorer = fuzz.WRatio
# fuzz.WRatio already process string so no need extra step
if not processor:
processor = no_process
# default, turn whatever the choice is into a workable string
if not processor:
processor = utils.full_process
try:
# See if choices is a dictionary-like object.
for key, choice in choices.items():
processed = processor(choice)
score = scorer(query, processed)
if score >= score_cutoff:
yield (choice, score, key)
except AttributeError:
# It's a list; just iterate over it.
for choice in choices:
processed = processor(choice)
score = scorer(query, processed)
if score >= score_cutoff:
yield (choice, score)
def extract(query, choices, processor=None, scorer=None, limit=5):
"""Select the best match in a list or dictionary of choices.
Find best matches in a list or dictionary of choices, return a
list of tuples containing the match and it's score. If a dictionary
is used, also returns the key for each match.
Arguments:
query: An object representing the thing we want to find.
choices: An iterable or dictionary-like object containing choices
to be matched against the query. Dictionary arguments of
{key: value} pairs will attempt to match the query against
each value.
processor: Optional function of the form f(a) -> b, where a is an
individual choice and b is the choice to be used in matching.
This can be used to match against, say, the first element of
a list:
lambda x: x[0]
Defaults to fuzzywuzzy.utils.full_process().
scorer: Optional function for scoring matches between the query and
an individual processed choice. This should be a function
of the form f(query, choice) -> int.
By default, fuzz.WRatio() is used and expects both query and
choice to be strings.
limit: Optional maximum for the number of elements returned. Defaults
to 5.
Returns:
List of tuples containing the match and its score.
If a list is used for choices, then the result will be 2-tuples.
If a dictionary is used, then the result will be 3-tuples containing
he key for each match.
For example, searching for 'bird' in the dictionary
{'bard': 'train', 'dog': 'man'}
may return
[('train', 22, 'bard'), ('man', 0, 'dog')]
"""
sl = extractWithoutOrder(query, choices, processor, scorer)
return heapq.nlargest(limit, sl, key=lambda i: i[1]) if limit is not None else \
sorted(sl, key=lambda i: i[1], reverse=True)
def extractBests(query, choices, processor=None, scorer=None, score_cutoff=0, limit=5):
"""Get a list of the best matches to a collection of choices.
Convenience function for getting the choices with best scores.
Args:
query: A string to match against
choices: A list or dictionary of choices, suitable for use with
extract().
processor: Optional function for transforming choices before matching.
See extract().
scorer: Scoring function for extract().
score_cutoff: Optional argument for score threshold. No matches with
a score less than this number will be returned. Defaults to 0.
limit: Optional maximum for the number of elements returned. Defaults
to 5.
Returns: A a list of (match, score) tuples.
"""
best_list = extractWithoutOrder(query, choices, processor, scorer, score_cutoff)
return heapq.nlargest(limit, best_list, key=lambda i: i[1]) if limit is not None else \
sorted(best_list, key=lambda i: i[1], reverse=True)
def extractOne(query, choices, processor=None, scorer=None, score_cutoff=0):
"""Find the single best match above a score in a list of choices.
This is a convenience method which returns the single best choice.
See extract() for the full arguments list.
Args:
query: A string to match against
choices: A list or dictionary of choices, suitable for use with
extract().
processor: Optional function for transforming choices before matching.
See extract().
scorer: Scoring function for extract().
score_cutoff: Optional argument for score threshold. If the best
match is found, but it is not greater than this number, then
return None anyway ("not a good enough match"). Defaults to 0.
Returns:
A tuple containing a single match and its score, if a match
was found that was above score_cutoff. Otherwise, returns None.
"""
best_list = extractWithoutOrder(query, choices, processor, scorer, score_cutoff)
try:
return max(best_list, key=lambda i: i[1])
except ValueError:
return None
def dedupe(contains_dupes, threshold=70, scorer=fuzz.token_set_ratio):
"""This convenience function takes a list of strings containing duplicates and uses fuzzy matching to identify
and remove duplicates. Specifically, it uses the process.extract to identify duplicates that
score greater than a user defined threshold. Then, it looks for the longest item in the duplicate list
since we assume this item contains the most entity information and returns that. It breaks string
length ties on an alphabetical sort.
Note: as the threshold DECREASES the number of duplicates that are found INCREASES. This means that the
returned deduplicated list will likely be shorter. Raise the threshold for fuzzy_dedupe to be less
sensitive.
Args:
contains_dupes: A list of strings that we would like to dedupe.
threshold: the numerical value (0,100) point at which we expect to find duplicates.
Defaults to 70 out of 100
scorer: Optional function for scoring matches between the query and
an individual processed choice. This should be a function
of the form f(query, choice) -> int.
By default, fuzz.token_set_ratio() is used and expects both query and
choice to be strings.
Returns:
A deduplicated list. For example:
In: contains_dupes = ['Frodo Baggin', 'Frodo Baggins', 'F. Baggins', 'Samwise G.', 'Gandalf', 'Bilbo Baggins']
In: fuzzy_dedupe(contains_dupes)
Out: ['Frodo Baggins', 'Samwise G.', 'Bilbo Baggins', 'Gandalf']
"""
extractor = []
# iterate over items in *contains_dupes*
for item in contains_dupes:
# return all duplicate matches found
matches = extract(item, contains_dupes, limit=None, scorer=scorer)
# filter matches based on the threshold
filtered = [x for x in matches if x[1] > threshold]
# if there is only 1 item in *filtered*, no duplicates were found so append to *extracted*
if len(filtered) == 1:
extractor.append(filtered[0][0])
else:
# alpha sort
filtered = sorted(filtered, key=lambda x: x[0])
# length sort
filter_sort = sorted(filtered, key=lambda x: len(x[0]), reverse=True)
# take first item as our 'canonical example'
extractor.append(filter_sort[0][0])
# uniquify *extractor* list
keys = {}
for e in extractor:
keys[e] = 1
extractor = keys.keys()
# check that extractor differs from contain_dupes (e.g. duplicates were found)
# if not, then return the original list
if len(extractor) == len(contains_dupes):
return contains_dupes
else:
return extractor
|
mit
|
ThornCreekChurch/e2mc
|
main.py
|
1
|
5923
|
## Copyright (C) 2017 ThornCreek Church
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>
##
## Contact:
## Jeremy Lyon <[email protected]>
## ThornCreek Church
## PO Box 1282, Eastlake, CO 80614
import elexio
import mailchimp
import log
import sys
debug = False
adds = []
deletes = []
def sync():
global adds
global deletes
egroups = elexio.get_groups()
mcgroups = mailchimp.get_groups()
# Iterate through all groups found in elexio. If any groups are missing
# from MailChimp, add it in. Iterate through each group's membership as
# well. If they are missing from the Mailchimp group, add them to an
# array that will be passed for a bulk update for addition.
for egroup in egroups:
if not mailchimp.group_exists(egroup['name']):
if(debug):
log.e2mclog("%s NOT found in MailChimp Groups" % egroup['name'])
group_id = mailchimp.create_group(egroup['name'])
if group_id > 0:
log.e2mclog("%s created in MailChimp Groups: id #%d" % (egroup['name'], group_id))
elif debug:
log.e2mclog("%s found in mcgroups" % egroup['name'])
else:
if(debug):
log.e2mclog("%s found in mcgroups" % egroup['name'])
group_id = mailchimp.get_group_id(egroup['name'])
eemails = elexio.get_group_email_list(egroup)
for eemail in eemails:
if not eemail == "":
if not mailchimp.email_exists(egroup['name'], eemail):
if(debug):
log.e2mclog("\t%s NOT found in group %s" % (eemail, egroup['name']))
if(mailchimp.check_user(eemail) == "subscribed"):
adds.append(eemail)
else:
if(debug):
log.e2mclog("\t%s found in group %s" % (eemail, egroup['name']))
# Now iterate through MailChimp e-mail addresses to see if any have
# been removed from Elexio.
memails = mailchimp.get_group_email_list(egroup['name'])
for memail in memails:
if not elexio.email_exists(egroup['name'], memail):
if(debug):
log.e2mclog("\t%s NOT found in Elexio group %s" % (memail, egroup['name']))
deletes.append(memail)
if(len(adds) > 0 or len(deletes) > 0):
log.e2mclog("Adds: %s" % adds)
log.e2mclog("Deletes: %s" % deletes)
response = mailchimp.update(group_id, adds, deletes)
log.e2mclog("Updates for Group %s" % egroup['name'])
log.e2mclog("Members Added:")
for ma in response['members_added']:
log.e2mclog("\t%s" % ma['email_address'])
log.e2mclog("Total Added: %d" % response['total_added'])
log.e2mclog("Members Deleted:")
for md in response['members_removed']:
log.e2mclog("\t%s" % md['email_address'])
log.e2mclog("Total Deleted: %d" % response['total_removed'])
log.e2mclog("Errors:")
for error in response['errors']:
log.e2mclog("%s:" % error['error'])
for ea in error['email_addresses']:
log.e2mclog("\t%s" % ea)
log.e2mclog("Total Errors: %d" % response['error_count'])
else:
log.e2mclog("No Adds or Deletes for Group %s" % egroup['name'])
adds = []
deletes = []
# Iterate through all groups found in MailChimp. If any groups are missing
# from Elexio, then the group will be deleted out of MailChimp. Iterate
# through each group's membership as well. If any are found in MailChimp
# that are not in Elexio, then add them to an array that will be passed
# for a bulk update for deletion.
# Uncomment this section to have Segments removed from MailChimp that are
# not an Elexio Group.
#for mcgroup in mcgroups:
# if not elexio.group_exists(mcgroup):
# if debug:
# log.e2mclog("MailChimp group %s NOT found in Elexio." % mcgroup['name'])
# log.e2mclog("MailChimp %s deleted. E-mails that were in %s:" % (mcgroup['name'], mcgroup['name']))
# for email in mailchimp.get_group_email_list(mcgroup['name']):
# log.e2mclog("\t%s" % email)
# mailchimp.delete_group(mcgroup)
# else:
# if debug:
# log.e2mclog("MailChimp group %s found in Elexio." % mcgroup['name'])
log.open_log()
if len(sys.argv) > 1:
for arg in sys.argv:
if arg == '--debug':
debug = True
if(debug):
log.e2mclog("Debug mode enabled.")
#elexio.init()
mailchimp.init()
egroups = elexio.get_groups()
if(debug):
log.e2mclog("Elexio Group(s) identified...")
if(len(egroups) == 0):
log.e2mclog("None.")
else:
log.e2mclog(egroups)
mcgroups = mailchimp.get_groups()
if(debug):
log.e2mclog("MailChimp Group(s) identified...")
if(len(mcgroups)==0):
log.e2mclog("None.")
else:
log.e2mclog(mcgroups)
if(debug):
log.e2mclog("Syncing...")
sync()
if(debug):
log.e2mclog("Synchronization complete.")
log.close_log()
|
gpl-3.0
|
GodBlessPP/w16b_test
|
static/Brython3.1.1-20150328-091302/Lib/binascii.py
|
620
|
24585
|
"""A pure Python implementation of binascii.
Rather slow and buggy in corner cases.
PyPy provides an RPython version too.
"""
# borrowed from https://bitbucket.org/pypy/pypy/src/f2bf94943a41/lib_pypy/binascii.py
class Error(Exception):
pass
class Done(Exception):
pass
class Incomplete(Exception):
pass
def a2b_uu(s):
if not s:
return ''
length = (ord(s[0]) - 0x20) % 64
def quadruplets_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3])
except IndexError:
s += ' '
yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3])
return
s = s[4:]
try:
result = [''.join(
[chr((A - 0x20) << 2 | (((B - 0x20) >> 4) & 0x3)),
chr(((B - 0x20) & 0xf) << 4 | (((C - 0x20) >> 2) & 0xf)),
chr(((C - 0x20) & 0x3) << 6 | ((D - 0x20) & 0x3f))
]) for A, B, C, D in quadruplets_gen(s[1:].rstrip())]
except ValueError:
raise Error('Illegal char')
result = ''.join(result)
trailingdata = result[length:]
if trailingdata.strip('\x00'):
raise Error('Trailing garbage')
result = result[:length]
if len(result) < length:
result += ((length - len(result)) * '\x00')
return bytes(result, __BRYTHON__.charset)
def b2a_uu(s):
length = len(s)
if length > 45:
raise Error('At most 45 bytes at once')
def triples_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2])
except IndexError:
s += '\0\0'
yield ord(s[0]), ord(s[1]), ord(s[2])
return
s = s[3:]
result = [''.join(
[chr(0x20 + (( A >> 2 ) & 0x3F)),
chr(0x20 + (((A << 4) | ((B >> 4) & 0xF)) & 0x3F)),
chr(0x20 + (((B << 2) | ((C >> 6) & 0x3)) & 0x3F)),
chr(0x20 + (( C ) & 0x3F))])
for A, B, C in triples_gen(s)]
return chr(ord(' ') + (length & 0o77)) + ''.join(result) + '\n'
table_a2b_base64 = {
'A': 0,
'B': 1,
'C': 2,
'D': 3,
'E': 4,
'F': 5,
'G': 6,
'H': 7,
'I': 8,
'J': 9,
'K': 10,
'L': 11,
'M': 12,
'N': 13,
'O': 14,
'P': 15,
'Q': 16,
'R': 17,
'S': 18,
'T': 19,
'U': 20,
'V': 21,
'W': 22,
'X': 23,
'Y': 24,
'Z': 25,
'a': 26,
'b': 27,
'c': 28,
'd': 29,
'e': 30,
'f': 31,
'g': 32,
'h': 33,
'i': 34,
'j': 35,
'k': 36,
'l': 37,
'm': 38,
'n': 39,
'o': 40,
'p': 41,
'q': 42,
'r': 43,
's': 44,
't': 45,
'u': 46,
'v': 47,
'w': 48,
'x': 49,
'y': 50,
'z': 51,
'0': 52,
'1': 53,
'2': 54,
'3': 55,
'4': 56,
'5': 57,
'6': 58,
'7': 59,
'8': 60,
'9': 61,
'+': 62,
'/': 63,
'=': 0,
}
def a2b_base64(s):
if not isinstance(s, (str, bytes)):
raise TypeError("expected string, got %r" % (s,))
s = s.rstrip()
# clean out all invalid characters, this also strips the final '=' padding
# check for correct padding
def next_valid_char(s, pos):
for i in range(pos + 1, len(s)):
c = s[i]
if c < '\x7f':
try:
table_a2b_base64[c]
return c
except KeyError:
pass
return None
quad_pos = 0
leftbits = 0
leftchar = 0
res = []
for i, c in enumerate(s):
if isinstance(c, int):
c = chr(c)
if c > '\x7f' or c == '\n' or c == '\r' or c == ' ':
continue
if c == '=':
if quad_pos < 2 or (quad_pos == 2 and next_valid_char(s, i) != '='):
continue
else:
leftbits = 0
break
try:
next_c = table_a2b_base64[c]
except KeyError:
continue
quad_pos = (quad_pos + 1) & 0x03
leftchar = (leftchar << 6) | next_c
leftbits += 6
if leftbits >= 8:
leftbits -= 8
res.append((leftchar >> leftbits & 0xff))
leftchar &= ((1 << leftbits) - 1)
if leftbits != 0:
raise Error('Incorrect padding')
return bytes(''.join([chr(i) for i in res]),__BRYTHON__.charset)
table_b2a_base64 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"\
"0123456789+/"
def b2a_base64(s):
length = len(s)
final_length = length % 3
def triples_gen(s):
while s:
try:
yield s[0], s[1], s[2]
except IndexError:
s += b'\0\0'
yield s[0], s[1], s[2]
return
s = s[3:]
a = triples_gen(s[ :length - final_length])
result = [''.join(
[table_b2a_base64[( A >> 2 ) & 0x3F],
table_b2a_base64[((A << 4) | ((B >> 4) & 0xF)) & 0x3F],
table_b2a_base64[((B << 2) | ((C >> 6) & 0x3)) & 0x3F],
table_b2a_base64[( C ) & 0x3F]])
for A, B, C in a]
final = s[length - final_length:]
if final_length == 0:
snippet = ''
elif final_length == 1:
a = ord(final[0])
snippet = table_b2a_base64[(a >> 2 ) & 0x3F] + \
table_b2a_base64[(a << 4 ) & 0x3F] + '=='
else:
a = ord(final[0])
b = ord(final[1])
snippet = table_b2a_base64[(a >> 2) & 0x3F] + \
table_b2a_base64[((a << 4) | (b >> 4) & 0xF) & 0x3F] + \
table_b2a_base64[(b << 2) & 0x3F] + '='
return bytes(''.join(result) + snippet + '\n',__BRYTHON__.charset)
def a2b_qp(s, header=False):
inp = 0
odata = []
while inp < len(s):
if s[inp] == '=':
inp += 1
if inp >= len(s):
break
# Soft line breaks
if (s[inp] == '\n') or (s[inp] == '\r'):
if s[inp] != '\n':
while inp < len(s) and s[inp] != '\n':
inp += 1
if inp < len(s):
inp += 1
elif s[inp] == '=':
# broken case from broken python qp
odata.append('=')
inp += 1
elif s[inp] in hex_numbers and s[inp + 1] in hex_numbers:
ch = chr(int(s[inp:inp+2], 16))
inp += 2
odata.append(ch)
else:
odata.append('=')
elif header and s[inp] == '_':
odata.append(' ')
inp += 1
else:
odata.append(s[inp])
inp += 1
return bytes(''.join(odata), __BRYTHON__.charset)
def b2a_qp(data, quotetabs=False, istext=True, header=False):
"""quotetabs=True means that tab and space characters are always
quoted.
istext=False means that \r and \n are treated as regular characters
header=True encodes space characters with '_' and requires
real '_' characters to be quoted.
"""
MAXLINESIZE = 76
# See if this string is using CRLF line ends
lf = data.find('\n')
crlf = lf > 0 and data[lf-1] == '\r'
inp = 0
linelen = 0
odata = []
while inp < len(data):
c = data[inp]
if (c > '~' or
c == '=' or
(header and c == '_') or
(c == '.' and linelen == 0 and (inp+1 == len(data) or
data[inp+1] == '\n' or
data[inp+1] == '\r')) or
(not istext and (c == '\r' or c == '\n')) or
((c == '\t' or c == ' ') and (inp + 1 == len(data))) or
(c <= ' ' and c != '\r' and c != '\n' and
(quotetabs or (not quotetabs and (c != '\t' and c != ' '))))):
linelen += 3
if linelen >= MAXLINESIZE:
odata.append('=')
if crlf: odata.append('\r')
odata.append('\n')
linelen = 3
odata.append('=' + two_hex_digits(ord(c)))
inp += 1
else:
if (istext and
(c == '\n' or (inp+1 < len(data) and c == '\r' and
data[inp+1] == '\n'))):
linelen = 0
# Protect against whitespace on end of line
if (len(odata) > 0 and
(odata[-1] == ' ' or odata[-1] == '\t')):
ch = ord(odata[-1])
odata[-1] = '='
odata.append(two_hex_digits(ch))
if crlf: odata.append('\r')
odata.append('\n')
if c == '\r':
inp += 2
else:
inp += 1
else:
if (inp + 1 < len(data) and
data[inp+1] != '\n' and
(linelen + 1) >= MAXLINESIZE):
odata.append('=')
if crlf: odata.append('\r')
odata.append('\n')
linelen = 0
linelen += 1
if header and c == ' ':
c = '_'
odata.append(c)
inp += 1
return ''.join(odata)
hex_numbers = '0123456789ABCDEF'
def hex(n):
if n == 0:
return '0'
if n < 0:
n = -n
sign = '-'
else:
sign = ''
arr = []
def hex_gen(n):
""" Yield a nibble at a time. """
while n:
yield n % 0x10
n = n / 0x10
for nibble in hex_gen(n):
arr = [hex_numbers[nibble]] + arr
return sign + ''.join(arr)
def two_hex_digits(n):
return hex_numbers[n / 0x10] + hex_numbers[n % 0x10]
def strhex_to_int(s):
i = 0
for c in s:
i = i * 0x10 + hex_numbers.index(c)
return i
hqx_encoding = '!"#$%&\'()*+,-012345689@ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr'
DONE = 0x7f
SKIP = 0x7e
FAIL = 0x7d
table_a2b_hqx = [
#^@ ^A ^B ^C ^D ^E ^F ^G
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
#\b \t \n ^K ^L \r ^N ^O
FAIL, FAIL, SKIP, FAIL, FAIL, SKIP, FAIL, FAIL,
#^P ^Q ^R ^S ^T ^U ^V ^W
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
#^X ^Y ^Z ^[ ^\ ^] ^^ ^_
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
# ! " # $ % & '
FAIL, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
#( ) * + , - . /
0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, FAIL, FAIL,
#0 1 2 3 4 5 6 7
0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, FAIL,
#8 9 : ; < = > ?
0x14, 0x15, DONE, FAIL, FAIL, FAIL, FAIL, FAIL,
#@ A B C D E F G
0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D,
#H I J K L M N O
0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, FAIL,
#P Q R S T U V W
0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, FAIL,
#X Y Z [ \ ] ^ _
0x2C, 0x2D, 0x2E, 0x2F, FAIL, FAIL, FAIL, FAIL,
#` a b c d e f g
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, FAIL,
#h i j k l m n o
0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, FAIL, FAIL,
#p q r s t u v w
0x3D, 0x3E, 0x3F, FAIL, FAIL, FAIL, FAIL, FAIL,
#x y z { | } ~ ^?
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
]
def a2b_hqx(s):
result = []
def quadruples_gen(s):
t = []
for c in s:
res = table_a2b_hqx[ord(c)]
if res == SKIP:
continue
elif res == FAIL:
raise Error('Illegal character')
elif res == DONE:
yield t
raise Done
else:
t.append(res)
if len(t) == 4:
yield t
t = []
yield t
done = 0
try:
for snippet in quadruples_gen(s):
length = len(snippet)
if length == 4:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2)))
result.append(chr(((snippet[2] & 0x03) << 6) | (snippet[3])))
elif length == 3:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2)))
elif length == 2:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
except Done:
done = 1
except Error:
raise
return (''.join(result), done)
# should this return a bytes object?
#return (bytes(''.join(result), __BRYTHON__.charset), done)
def b2a_hqx(s):
result =[]
def triples_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2])
except IndexError:
yield tuple([ord(c) for c in s])
s = s[3:]
for snippet in triples_gen(s):
length = len(snippet)
if length == 3:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)])
result.append(hqx_encoding[
(snippet[1] & 0x0f) << 2 | ((snippet[2] & 0xc0) >> 6)])
result.append(hqx_encoding[snippet[2] & 0x3f])
elif length == 2:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)])
result.append(hqx_encoding[
(snippet[1] & 0x0f) << 2])
elif length == 1:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4)])
return ''.join(result)
crctab_hqx = [
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
]
def crc_hqx(s, crc):
for c in s:
crc = ((crc << 8) & 0xff00) ^ crctab_hqx[((crc >> 8) & 0xff) ^ ord(c)]
return crc
def rlecode_hqx(s):
"""
Run length encoding for binhex4.
The CPython implementation does not do run length encoding
of \x90 characters. This implementation does.
"""
if not s:
return ''
result = []
prev = s[0]
count = 1
# Add a dummy character to get the loop to go one extra round.
# The dummy must be different from the last character of s.
# In the same step we remove the first character, which has
# already been stored in prev.
if s[-1] == '!':
s = s[1:] + '?'
else:
s = s[1:] + '!'
for c in s:
if c == prev and count < 255:
count += 1
else:
if count == 1:
if prev != '\x90':
result.append(prev)
else:
result.extend(['\x90', '\x00'])
elif count < 4:
if prev != '\x90':
result.extend([prev] * count)
else:
result.extend(['\x90', '\x00'] * count)
else:
if prev != '\x90':
result.extend([prev, '\x90', chr(count)])
else:
result.extend(['\x90', '\x00', '\x90', chr(count)])
count = 1
prev = c
return ''.join(result)
def rledecode_hqx(s):
s = s.split('\x90')
result = [s[0]]
prev = s[0]
for snippet in s[1:]:
count = ord(snippet[0])
if count > 0:
result.append(prev[-1] * (count-1))
prev = snippet
else:
result.append('\x90')
prev = '\x90'
result.append(snippet[1:])
return ''.join(result)
crc_32_tab = [
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419,
0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4,
0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07,
0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856,
0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a,
0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599,
0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190,
0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e,
0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed,
0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3,
0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5,
0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010,
0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17,
0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6,
0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344,
0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a,
0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1,
0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c,
0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe,
0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31,
0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c,
0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b,
0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1,
0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7,
0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66,
0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8,
0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b,
0x2d02ef8d
]
def crc32(s, crc=0):
result = 0
crc = ~int(crc) & 0xffffffff
#crc = ~long(crc) & 0xffffffffL
for c in s:
crc = crc_32_tab[(crc ^ int(ord(c))) & 0xff] ^ (crc >> 8)
#crc = crc_32_tab[(crc ^ long(ord(c))) & 0xffL] ^ (crc >> 8)
#/* Note: (crc >> 8) MUST zero fill on left
result = crc ^ 0xffffffff
if result > 2**31:
result = ((result + 2**31) % 2**32) - 2**31
return result
def b2a_hex(s):
result = []
for char in s:
c = (ord(char) >> 4) & 0xf
if c > 9:
c = c + ord('a') - 10
else:
c = c + ord('0')
result.append(chr(c))
c = ord(char) & 0xf
if c > 9:
c = c + ord('a') - 10
else:
c = c + ord('0')
result.append(chr(c))
return ''.join(result)
hexlify = b2a_hex
table_hex = [
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1, -1,-1,-1,-1,
-1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1
]
def a2b_hex(t):
result = []
def pairs_gen(s):
while s:
try:
yield table_hex[ord(s[0])], table_hex[ord(s[1])]
except IndexError:
if len(s):
raise TypeError('Odd-length string')
return
s = s[2:]
for a, b in pairs_gen(t):
if a < 0 or b < 0:
raise TypeError('Non-hexadecimal digit found')
result.append(chr((a << 4) + b))
return bytes(''.join(result), __BRYTHON__.charset)
unhexlify = a2b_hex
|
agpl-3.0
|
Greennut/ostproject
|
django/contrib/gis/maps/google/gmap.py
|
90
|
8964
|
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.contrib.gis.maps.google.overlays import GPolygon, GPolyline, GMarker
class GoogleMapException(Exception):
pass
# The default Google Maps URL (for the API javascript)
# TODO: Internationalize for Japan, UK, etc.
GOOGLE_MAPS_URL='http://maps.google.com/maps?file=api&v=%s&key='
class GoogleMap(object):
"A class for generating Google Maps JavaScript."
# String constants
onunload = mark_safe('onunload="GUnload()"') # Cleans up after Google Maps
vml_css = mark_safe('v\:* {behavior:url(#default#VML);}') # CSS for IE VML
xmlns = mark_safe('xmlns:v="urn:schemas-microsoft-com:vml"') # XML Namespace (for IE VML).
def __init__(self, key=None, api_url=None, version=None,
center=None, zoom=None, dom_id='map',
kml_urls=[], polylines=None, polygons=None, markers=None,
template='gis/google/google-map.js',
js_module='geodjango',
extra_context={}):
# The Google Maps API Key defined in the settings will be used
# if not passed in as a parameter. The use of an API key is
# _required_.
if not key:
try:
self.key = settings.GOOGLE_MAPS_API_KEY
except AttributeError:
raise GoogleMapException('Google Maps API Key not found (try adding GOOGLE_MAPS_API_KEY to your settings).')
else:
self.key = key
# Getting the Google Maps API version, defaults to using the latest ("2.x"),
# this is not necessarily the most stable.
if not version:
self.version = getattr(settings, 'GOOGLE_MAPS_API_VERSION', '2.x')
else:
self.version = version
# Can specify the API URL in the `api_url` keyword.
if not api_url:
self.api_url = mark_safe(getattr(settings, 'GOOGLE_MAPS_URL', GOOGLE_MAPS_URL) % self.version)
else:
self.api_url = api_url
# Setting the DOM id of the map, the load function, the JavaScript
# template, and the KML URLs array.
self.dom_id = dom_id
self.extra_context = extra_context
self.js_module = js_module
self.template = template
self.kml_urls = kml_urls
# Does the user want any GMarker, GPolygon, and/or GPolyline overlays?
overlay_info = [[GMarker, markers, 'markers'],
[GPolygon, polygons, 'polygons'],
[GPolyline, polylines, 'polylines']]
for overlay_class, overlay_list, varname in overlay_info:
setattr(self, varname, [])
if overlay_list:
for overlay in overlay_list:
if isinstance(overlay, overlay_class):
getattr(self, varname).append(overlay)
else:
getattr(self, varname).append(overlay_class(overlay))
# If GMarker, GPolygons, and/or GPolylines are used the zoom will be
# automatically calculated via the Google Maps API. If both a zoom
# level and a center coordinate are provided with polygons/polylines,
# no automatic determination will occur.
self.calc_zoom = False
if self.polygons or self.polylines or self.markers:
if center is None or zoom is None:
self.calc_zoom = True
# Defaults for the zoom level and center coordinates if the zoom
# is not automatically calculated.
if zoom is None: zoom = 4
self.zoom = zoom
if center is None: center = (0, 0)
self.center = center
def render(self):
"""
Generates the JavaScript necessary for displaying this Google Map.
"""
params = {'calc_zoom' : self.calc_zoom,
'center' : self.center,
'dom_id' : self.dom_id,
'js_module' : self.js_module,
'kml_urls' : self.kml_urls,
'zoom' : self.zoom,
'polygons' : self.polygons,
'polylines' : self.polylines,
'icons': self.icons,
'markers' : self.markers,
}
params.update(self.extra_context)
return render_to_string(self.template, params)
@property
def body(self):
"Returns HTML body tag for loading and unloading Google Maps javascript."
return mark_safe('<body %s %s>' % (self.onload, self.onunload))
@property
def onload(self):
"Returns the `onload` HTML <body> attribute."
return mark_safe('onload="%s.%s_load()"' % (self.js_module, self.dom_id))
@property
def api_script(self):
"Returns the <script> tag for the Google Maps API javascript."
return mark_safe('<script src="%s%s" type="text/javascript"></script>' % (self.api_url, self.key))
@property
def js(self):
"Returns only the generated Google Maps JavaScript (no <script> tags)."
return self.render()
@property
def scripts(self):
"Returns all <script></script> tags required with Google Maps JavaScript."
return mark_safe('%s\n <script type="text/javascript">\n//<![CDATA[\n%s//]]>\n </script>' % (self.api_script, self.js))
@property
def style(self):
"Returns additional CSS styling needed for Google Maps on IE."
return mark_safe('<style type="text/css">%s</style>' % self.vml_css)
@property
def xhtml(self):
"Returns XHTML information needed for IE VML overlays."
return mark_safe('<html xmlns="http://www.w3.org/1999/xhtml" %s>' % self.xmlns)
@property
def icons(self):
"Returns a sequence of GIcon objects in this map."
return set([marker.icon for marker in self.markers if marker.icon])
class GoogleMapSet(GoogleMap):
def __init__(self, *args, **kwargs):
"""
A class for generating sets of Google Maps that will be shown on the
same page together.
Example:
gmapset = GoogleMapSet( GoogleMap( ... ), GoogleMap( ... ) )
gmapset = GoogleMapSet( [ gmap1, gmap2] )
"""
# The `google-multi.js` template is used instead of `google-single.js`
# by default.
template = kwargs.pop('template', 'gis/google/google-multi.js')
# This is the template used to generate the GMap load JavaScript for
# each map in the set.
self.map_template = kwargs.pop('map_template', 'gis/google/google-single.js')
# Running GoogleMap.__init__(), and resetting the template
# value with default obtained above.
super(GoogleMapSet, self).__init__(**kwargs)
self.template = template
# If a tuple/list passed in as first element of args, then assume
if isinstance(args[0], (tuple, list)):
self.maps = args[0]
else:
self.maps = args
# Generating DOM ids for each of the maps in the set.
self.dom_ids = ['map%d' % i for i in xrange(len(self.maps))]
def load_map_js(self):
"""
Returns JavaScript containing all of the loading routines for each
map in this set.
"""
result = []
for dom_id, gmap in zip(self.dom_ids, self.maps):
# Backup copies the GoogleMap DOM id and template attributes.
# They are overridden on each GoogleMap instance in the set so
# that only the loading JavaScript (and not the header variables)
# is used with the generated DOM ids.
tmp = (gmap.template, gmap.dom_id)
gmap.template = self.map_template
gmap.dom_id = dom_id
result.append(gmap.js)
# Restoring the backup values.
gmap.template, gmap.dom_id = tmp
return mark_safe(''.join(result))
def render(self):
"""
Generates the JavaScript for the collection of Google Maps in
this set.
"""
params = {'js_module' : self.js_module,
'dom_ids' : self.dom_ids,
'load_map_js' : self.load_map_js(),
'icons' : self.icons,
}
params.update(self.extra_context)
return render_to_string(self.template, params)
@property
def onload(self):
"Returns the `onload` HTML <body> attribute."
# Overloaded to use the `load` function defined in the
# `google-multi.js`, which calls the load routines for
# each one of the individual maps in the set.
return mark_safe('onload="%s.load()"' % self.js_module)
@property
def icons(self):
"Returns a sequence of all icons in each map of the set."
icons = set()
for map in self.maps: icons |= map.icons
return icons
|
bsd-3-clause
|
mdanielwork/intellij-community
|
python/helpers/pydev/third_party/wrapped_for_pydev/ctypes/__init__.py
|
106
|
15821
|
#@PydevCodeAnalysisIgnore
"""create and manipulate C data types in Python"""
import os as _os, sys as _sys
from itertools import chain as _chain
# special developer support to use ctypes from the CVS sandbox,
# without installing it
# XXX Remove this for the python core version
_magicfile = _os.path.join(_os.path.dirname(__file__), ".CTYPES_DEVEL")
if _os.path.isfile(_magicfile):
execfile(_magicfile)
del _magicfile
__version__ = "0.9.9.6"
from _ctypes import Union, Structure, Array
from _ctypes import _Pointer
from _ctypes import CFuncPtr as _CFuncPtr
from _ctypes import __version__ as _ctypes_version
from _ctypes import RTLD_LOCAL, RTLD_GLOBAL
from _ctypes import ArgumentError
from struct import calcsize as _calcsize
if __version__ != _ctypes_version:
raise Exception, ("Version number mismatch", __version__, _ctypes_version)
if _os.name in ("nt", "ce"):
from _ctypes import FormatError
from _ctypes import FUNCFLAG_CDECL as _FUNCFLAG_CDECL, \
FUNCFLAG_PYTHONAPI as _FUNCFLAG_PYTHONAPI
"""
WINOLEAPI -> HRESULT
WINOLEAPI_(type)
STDMETHODCALLTYPE
STDMETHOD(name)
STDMETHOD_(type, name)
STDAPICALLTYPE
"""
def create_string_buffer(init, size=None):
"""create_string_buffer(aString) -> character array
create_string_buffer(anInteger) -> character array
create_string_buffer(aString, anInteger) -> character array
"""
if isinstance(init, (str, unicode)):
if size is None:
size = len(init) + 1
buftype = c_char * size
buf = buftype()
buf.value = init
return buf
elif isinstance(init, (int, long)):
buftype = c_char * init
buf = buftype()
return buf
raise TypeError, init
def c_buffer(init, size=None):
## "deprecated, use create_string_buffer instead"
## import warnings
## warnings.warn("c_buffer is deprecated, use create_string_buffer instead",
## DeprecationWarning, stacklevel=2)
return create_string_buffer(init, size)
_c_functype_cache = {}
def CFUNCTYPE(restype, *argtypes):
"""CFUNCTYPE(restype, *argtypes) -> function prototype.
restype: the result type
argtypes: a sequence specifying the argument types
The function prototype can be called in three ways to create a
callable object:
prototype(integer address) -> foreign function
prototype(callable) -> create and return a C callable function from callable
prototype(integer index, method name[, paramflags]) -> foreign function calling a COM method
prototype((ordinal number, dll object)[, paramflags]) -> foreign function exported by ordinal
prototype((function name, dll object)[, paramflags]) -> foreign function exported by name
"""
try:
return _c_functype_cache[(restype, argtypes)]
except KeyError:
class CFunctionType(_CFuncPtr):
_argtypes_ = argtypes
_restype_ = restype
_flags_ = _FUNCFLAG_CDECL
_c_functype_cache[(restype, argtypes)] = CFunctionType
return CFunctionType
if _os.name in ("nt", "ce"):
from _ctypes import LoadLibrary as _dlopen
from _ctypes import FUNCFLAG_STDCALL as _FUNCFLAG_STDCALL
if _os.name == "ce":
# 'ce' doesn't have the stdcall calling convention
_FUNCFLAG_STDCALL = _FUNCFLAG_CDECL
_win_functype_cache = {}
def WINFUNCTYPE(restype, *argtypes):
# docstring set later (very similar to CFUNCTYPE.__doc__)
try:
return _win_functype_cache[(restype, argtypes)]
except KeyError:
class WinFunctionType(_CFuncPtr):
_argtypes_ = argtypes
_restype_ = restype
_flags_ = _FUNCFLAG_STDCALL
_win_functype_cache[(restype, argtypes)] = WinFunctionType
return WinFunctionType
if WINFUNCTYPE.__doc__:
WINFUNCTYPE.__doc__ = CFUNCTYPE.__doc__.replace("CFUNCTYPE", "WINFUNCTYPE")
elif _os.name == "posix":
from _ctypes import dlopen as _dlopen #@UnresolvedImport
from _ctypes import sizeof, byref, addressof, alignment
from _ctypes import _SimpleCData
class py_object(_SimpleCData):
_type_ = "O"
class c_short(_SimpleCData):
_type_ = "h"
class c_ushort(_SimpleCData):
_type_ = "H"
class c_long(_SimpleCData):
_type_ = "l"
class c_ulong(_SimpleCData):
_type_ = "L"
if _calcsize("i") == _calcsize("l"):
# if int and long have the same size, make c_int an alias for c_long
c_int = c_long
c_uint = c_ulong
else:
class c_int(_SimpleCData):
_type_ = "i"
class c_uint(_SimpleCData):
_type_ = "I"
class c_float(_SimpleCData):
_type_ = "f"
class c_double(_SimpleCData):
_type_ = "d"
if _calcsize("l") == _calcsize("q"):
# if long and long long have the same size, make c_longlong an alias for c_long
c_longlong = c_long
c_ulonglong = c_ulong
else:
class c_longlong(_SimpleCData):
_type_ = "q"
class c_ulonglong(_SimpleCData):
_type_ = "Q"
## def from_param(cls, val):
## return ('d', float(val), val)
## from_param = classmethod(from_param)
class c_ubyte(_SimpleCData):
_type_ = "B"
c_ubyte.__ctype_le__ = c_ubyte.__ctype_be__ = c_ubyte
# backward compatibility:
##c_uchar = c_ubyte
class c_byte(_SimpleCData):
_type_ = "b"
c_byte.__ctype_le__ = c_byte.__ctype_be__ = c_byte
class c_char(_SimpleCData):
_type_ = "c"
c_char.__ctype_le__ = c_char.__ctype_be__ = c_char
class c_char_p(_SimpleCData):
_type_ = "z"
class c_void_p(_SimpleCData):
_type_ = "P"
c_voidp = c_void_p # backwards compatibility (to a bug)
# This cache maps types to pointers to them.
_pointer_type_cache = {}
def POINTER(cls):
try:
return _pointer_type_cache[cls]
except KeyError:
pass
if type(cls) is str:
klass = type(_Pointer)("LP_%s" % cls,
(_Pointer,),
{})
_pointer_type_cache[id(klass)] = klass
return klass
else:
name = "LP_%s" % cls.__name__
klass = type(_Pointer)(name,
(_Pointer,),
{'_type_': cls})
_pointer_type_cache[cls] = klass
return klass
try:
from _ctypes import set_conversion_mode
except ImportError:
pass
else:
if _os.name in ("nt", "ce"):
set_conversion_mode("mbcs", "ignore")
else:
set_conversion_mode("ascii", "strict")
class c_wchar_p(_SimpleCData):
_type_ = "Z"
class c_wchar(_SimpleCData):
_type_ = "u"
POINTER(c_wchar).from_param = c_wchar_p.from_param #_SimpleCData.c_wchar_p_from_param
def create_unicode_buffer(init, size=None):
"""create_unicode_buffer(aString) -> character array
create_unicode_buffer(anInteger) -> character array
create_unicode_buffer(aString, anInteger) -> character array
"""
if isinstance(init, (str, unicode)):
if size is None:
size = len(init) + 1
buftype = c_wchar * size
buf = buftype()
buf.value = init
return buf
elif isinstance(init, (int, long)):
buftype = c_wchar * init
buf = buftype()
return buf
raise TypeError, init
POINTER(c_char).from_param = c_char_p.from_param #_SimpleCData.c_char_p_from_param
# XXX Deprecated
def SetPointerType(pointer, cls):
if _pointer_type_cache.get(cls, None) is not None:
raise RuntimeError, \
"This type already exists in the cache"
if not _pointer_type_cache.has_key(id(pointer)):
raise RuntimeError, \
"What's this???"
pointer.set_type(cls)
_pointer_type_cache[cls] = pointer
del _pointer_type_cache[id(pointer)]
def pointer(inst):
return POINTER(type(inst))(inst)
# XXX Deprecated
def ARRAY(typ, len):
return typ * len
################################################################
class CDLL(object):
"""An instance of this class represents a loaded dll/shared
library, exporting functions using the standard C calling
convention (named 'cdecl' on Windows).
The exported functions can be accessed as attributes, or by
indexing with the function name. Examples:
<obj>.qsort -> callable object
<obj>['qsort'] -> callable object
Calling the functions releases the Python GIL during the call and
reaquires it afterwards.
"""
class _FuncPtr(_CFuncPtr):
_flags_ = _FUNCFLAG_CDECL
_restype_ = c_int # default, can be overridden in instances
def __init__(self, name, mode=RTLD_LOCAL, handle=None):
self._name = name
if handle is None:
self._handle = _dlopen(self._name, mode)
else:
self._handle = handle
def __repr__(self):
return "<%s '%s', handle %x at %x>" % \
(self.__class__.__name__, self._name,
(self._handle & (_sys.maxint * 2 + 1)),
id(self))
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
raise AttributeError, name
return self.__getitem__(name)
def __getitem__(self, name_or_ordinal):
func = self._FuncPtr((name_or_ordinal, self))
if not isinstance(name_or_ordinal, (int, long)):
func.__name__ = name_or_ordinal
setattr(self, name_or_ordinal, func)
return func
class PyDLL(CDLL):
"""This class represents the Python library itself. It allows to
access Python API functions. The GIL is not released, and
Python exceptions are handled correctly.
"""
class _FuncPtr(_CFuncPtr):
_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI
_restype_ = c_int # default, can be overridden in instances
if _os.name in ("nt", "ce"):
class WinDLL(CDLL):
"""This class represents a dll exporting functions using the
Windows stdcall calling convention.
"""
class _FuncPtr(_CFuncPtr):
_flags_ = _FUNCFLAG_STDCALL
_restype_ = c_int # default, can be overridden in instances
# XXX Hm, what about HRESULT as normal parameter?
# Mustn't it derive from c_long then?
from _ctypes import _check_HRESULT, _SimpleCData
class HRESULT(_SimpleCData):
_type_ = "l"
# _check_retval_ is called with the function's result when it
# is used as restype. It checks for the FAILED bit, and
# raises a WindowsError if it is set.
#
# The _check_retval_ method is implemented in C, so that the
# method definition itself is not included in the traceback
# when it raises an error - that is what we want (and Python
# doesn't have a way to raise an exception in the caller's
# frame).
_check_retval_ = _check_HRESULT
class OleDLL(CDLL):
"""This class represents a dll exporting functions using the
Windows stdcall calling convention, and returning HRESULT.
HRESULT error values are automatically raised as WindowsError
exceptions.
"""
class _FuncPtr(_CFuncPtr):
_flags_ = _FUNCFLAG_STDCALL
_restype_ = HRESULT
class LibraryLoader(object):
def __init__(self, dlltype):
self._dlltype = dlltype
def __getattr__(self, name):
if name[0] == '_':
raise AttributeError(name)
dll = self._dlltype(name)
setattr(self, name, dll)
return dll
def __getitem__(self, name):
return getattr(self, name)
def LoadLibrary(self, name):
return self._dlltype(name)
cdll = LibraryLoader(CDLL)
pydll = LibraryLoader(PyDLL)
if _os.name in ("nt", "ce"):
pythonapi = PyDLL("python dll", None, _sys.dllhandle)
elif _sys.platform == "cygwin":
pythonapi = PyDLL("libpython%d.%d.dll" % _sys.version_info[:2])
else:
pythonapi = PyDLL(None)
if _os.name in ("nt", "ce"):
windll = LibraryLoader(WinDLL)
oledll = LibraryLoader(OleDLL)
if _os.name == "nt":
GetLastError = windll.kernel32.GetLastError
else:
GetLastError = windll.coredll.GetLastError
def WinError(code=None, descr=None):
if code is None:
code = GetLastError()
if descr is None:
descr = FormatError(code).strip()
return WindowsError(code, descr)
_pointer_type_cache[None] = c_void_p
if sizeof(c_uint) == sizeof(c_void_p):
c_size_t = c_uint
elif sizeof(c_ulong) == sizeof(c_void_p):
c_size_t = c_ulong
# functions
from _ctypes import _memmove_addr, _memset_addr, _string_at_addr, _cast_addr
## void *memmove(void *, const void *, size_t);
memmove = CFUNCTYPE(c_void_p, c_void_p, c_void_p, c_size_t)(_memmove_addr)
## void *memset(void *, int, size_t)
memset = CFUNCTYPE(c_void_p, c_void_p, c_int, c_size_t)(_memset_addr)
def PYFUNCTYPE(restype, *argtypes):
class CFunctionType(_CFuncPtr):
_argtypes_ = argtypes
_restype_ = restype
_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI
return CFunctionType
_cast = PYFUNCTYPE(py_object, c_void_p, py_object)(_cast_addr)
def cast(obj, typ):
result = _cast(obj, typ)
result.__keepref = obj
return result
_string_at = CFUNCTYPE(py_object, c_void_p, c_int)(_string_at_addr)
def string_at(ptr, size=0):
"""string_at(addr[, size]) -> string
Return the string at addr."""
return _string_at(ptr, size)
try:
from _ctypes import _wstring_at_addr
except ImportError:
pass
else:
_wstring_at = CFUNCTYPE(py_object, c_void_p, c_int)(_wstring_at_addr)
def wstring_at(ptr, size=0):
"""wstring_at(addr[, size]) -> string
Return the string at addr."""
return _wstring_at(ptr, size)
if _os.name == "nt": # COM stuff
def DllGetClassObject(rclsid, riid, ppv):
# First ask ctypes.com.server than comtypes.server for the
# class object.
# trick py2exe by doing dynamic imports
result = -2147221231 # CLASS_E_CLASSNOTAVAILABLE
try:
ctcom = __import__("ctypes.com.server", globals(), locals(), ['*'])
except ImportError:
pass
else:
result = ctcom.DllGetClassObject(rclsid, riid, ppv)
if result == -2147221231: # CLASS_E_CLASSNOTAVAILABLE
try:
ccom = __import__("comtypes.server", globals(), locals(), ['*'])
except ImportError:
pass
else:
result = ccom.DllGetClassObject(rclsid, riid, ppv)
return result
def DllCanUnloadNow():
# First ask ctypes.com.server than comtypes.server if we can unload or not.
# trick py2exe by doing dynamic imports
result = 0 # S_OK
try:
ctcom = __import__("ctypes.com.server", globals(), locals(), ['*'])
except ImportError:
pass
else:
result = ctcom.DllCanUnloadNow()
if result != 0: # != S_OK
return result
try:
ccom = __import__("comtypes.server", globals(), locals(), ['*'])
except ImportError:
return result
try:
return ccom.DllCanUnloadNow()
except AttributeError:
pass
return result
from ctypes._endian import BigEndianStructure, LittleEndianStructure
# Fill in specifically-sized types
c_int8 = c_byte
c_uint8 = c_ubyte
for kind in [c_short, c_int, c_long, c_longlong]:
if sizeof(kind) == 2: c_int16 = kind
elif sizeof(kind) == 4: c_int32 = kind
elif sizeof(kind) == 8: c_int64 = kind
for kind in [c_ushort, c_uint, c_ulong, c_ulonglong]:
if sizeof(kind) == 2: c_uint16 = kind
elif sizeof(kind) == 4: c_uint32 = kind
elif sizeof(kind) == 8: c_uint64 = kind
del(kind)
|
apache-2.0
|
hrhtspr/IkaLog
|
ikalog/utils/ikautils.py
|
2
|
5158
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 Takeshi HASEGAWA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import platform
import re
import sys
import cv2
import numpy as np
from PIL import Image
class IkaUtils(object):
@staticmethod
def isWindows():
try:
os.uname()
except AttributeError:
return True
return False
@staticmethod
def isOSX():
return platform.system() == 'Darwin'
@staticmethod
def dprint(text):
print(text, file=sys.stderr)
@staticmethod
def baseDirectory():
base_directory = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
base_directory = re.sub('[\\/]+$', '', base_directory)
if os.path.isfile(base_directory):
# In this case, this version of IkaLog is py2exe'd,
# and base_directory is still pointing at the executable.
base_directory = os.path.dirname(base_directory)
return base_directory
# Find the local player.
#
# @param context IkaLog Context.
# @return The player information (Directionary class) if found.
@staticmethod
def getMyEntryFromContext(context):
for e in context['game']['players']:
if e['me']:
return e
return None
# Get player's title.
#
# @param playerEntry The player.
# @return Title in string. Returns None if playerEntry doesn't have title data.
@staticmethod
def playerTitle(playerEntry):
if playerEntry is None:
return None
if not (('gender' in playerEntry) and ('prefix' in playerEntry)):
return None
prefix = re.sub('の', '', playerEntry['prefix'])
return "%s%s" % (prefix, playerEntry['gender'])
@staticmethod
def map2text(map, unknown=None, lang="ja"):
if map is None:
if unknown is None:
unknown = "?"
return unknown
return map.id_
@staticmethod
def rule2text(rule, unknown=None, lang="ja"):
if rule is None:
if unknown is None:
unknown = "?"
return unknown
return rule.id_
@staticmethod
def cropImageGray(img, left, top, width, height):
if len(img.shape) > 2 and img.shape[2] != 1:
return cv2.cvtColor(
img[top:top + height, left:left + width],
cv2.COLOR_BGR2GRAY
)
return img[top:top + height, left:left + width]
@staticmethod
def matchWithMask(img, mask, threshold=99.0, orig_threshold=70.0, debug=False):
if len(img.shape) > 2 and img.shape[2] != 1:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Check false-positive
orig_hist = cv2.calcHist([img], [0], None, [3], [0, 256])
match2 = orig_hist[2] / np.sum(orig_hist)
if match2 > orig_threshold:
# False-Positive condition.
#print("original %f > orig_threshold %f" % (match2, orig_threshold))
return False
ret, thresh1 = cv2.threshold(img, 230, 255, cv2.THRESH_BINARY)
added = thresh1 + mask
hist = cv2.calcHist([added], [0], None, [3], [0, 256])
match = hist[2] / np.sum(hist)
if debug and (match > threshold):
print("match2 %f match %f > threshold %f" %
(match2, match, threshold))
cv2.imshow('match_img', img)
cv2.imshow('match_mask', mask)
cv2.imshow('match_added', added)
# cv2.waitKey()
return match > threshold
@staticmethod
def loadMask(file, left, top, width, height):
mask = cv2.imread(file)
if mask is None:
print("マスクデータ %s のロードに失敗しました")
# raise a exception
mask = mask[top:top + height, left:left + width]
# BGR to GRAY
if mask.shape[2] > 1:
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
return mask
@staticmethod
def getWinLoseText(won, win_text="勝ち", lose_text="負け", unknown_text="不明"):
if won is None:
return unknown_text
return win_text if won else lose_text
@staticmethod
def writeScreenshot(destfile, img):
img_pil = Image.fromarray(img[:, :, ::-1])
try:
img_pil.save(destfile)
assert os.path.isfile(destfile)
except:
self.dprint("Screenshot: failed")
return False
return True
|
apache-2.0
|
gurneyalex/odoo
|
addons/stock_landed_costs/tests/test_stockvaluationlayer.py
|
4
|
20303
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
""" Implementation of "INVENTORY VALUATION TESTS (With valuation layers)" spreadsheet. """
from odoo.tests import Form, tagged
from odoo.addons.stock_account.tests.test_stockvaluationlayer import TestStockValuationCommon
from odoo.addons.stock_account.tests.test_stockvaluation import _create_accounting_data
@tagged('post_install', '-at_install')
class TestStockValuationLC(TestStockValuationCommon):
@classmethod
def setUpClass(cls):
super(TestStockValuationLC, cls).setUpClass()
cls.productlc1 = cls.env['product.product'].create({
'name': 'product1',
'type': 'service',
'categ_id': cls.env.ref('product.product_category_all').id,
})
cls.stock_input_account, cls.stock_output_account, cls.stock_valuation_account, cls.expense_account, cls.stock_journal = _create_accounting_data(cls.env)
cls.product1.write({
'property_account_expense_id': cls.expense_account.id,
})
cls.productlc1.write({
'property_account_expense_id': cls.expense_account.id,
})
cls.product1.categ_id.write({
'property_stock_account_input_categ_id': cls.stock_input_account.id,
'property_stock_account_output_categ_id': cls.stock_output_account.id,
'property_stock_valuation_account_id': cls.stock_valuation_account.id,
'property_stock_journal': cls.stock_journal.id,
})
cls.payable_account = cls.env['account.account'].create({
'name': 'payable',
'code': 'payable',
'user_type_id': cls.env.ref('account.data_account_type_payable').id,
'reconcile': True,
})
def _get_stock_input_move_lines(self):
return self.env['account.move.line'].search([
('account_id', '=', self.stock_input_account.id),
], order='id')
def _get_stock_output_move_lines(self):
return self.env['account.move.line'].search([
('account_id', '=', self.stock_output_account.id),
], order='id')
def _get_stock_valuation_move_lines(self):
return self.env['account.move.line'].search([
('account_id', '=', self.stock_valuation_account.id),
], order='id')
def _get_payable_move_lines(self):
return self.env['account.move.line'].search([
('account_id', '=', self.payable_account.id),
], order='id')
def _get_expense_move_lines(self):
return self.env['account.move.line'].search([
('account_id', '=', self.expense_account.id),
], order='id')
def _make_lc(self, move, amount):
picking = move.picking_id
lc = Form(self.env['stock.landed.cost'])
lc.account_journal_id = self.stock_journal
lc.picking_ids.add(move.picking_id)
with lc.cost_lines.new() as cost_line:
cost_line.product_id = self.productlc1
cost_line.price_unit = amount
lc = lc.save()
lc.compute_landed_cost()
lc.button_validate()
return lc
@tagged('post_install', '-at_install')
class TestStockValuationLCFIFO(TestStockValuationLC):
def setUp(self):
super(TestStockValuationLCFIFO, self).setUp()
self.product1.product_tmpl_id.categ_id.property_cost_method = 'fifo'
self.product1.product_tmpl_id.categ_id.property_valuation = 'real_time'
def test_normal_1(self):
move1 = self._make_in_move(self.product1, 10, unit_cost=10, create_picking=True)
move2 = self._make_in_move(self.product1, 10, unit_cost=20)
lc = self._make_lc(move1, 100)
move3 = self._make_out_move(self.product1, 1)
self.assertEqual(self.product1.value_svl, 380)
self.assertEqual(self.product1.quantity_svl, 19)
self.assertEqual(self.product1.standard_price, 20)
def test_negative_1(self):
self.product1.standard_price = 10
move1 = self._make_out_move(self.product1, 2, force_assign=True)
move2 = self._make_in_move(self.product1, 10, unit_cost=15, create_picking=True)
lc = self._make_lc(move2, 100)
self.assertEqual(self.product1.value_svl, 200)
self.assertEqual(self.product1.quantity_svl, 8)
def test_alreadyout_1(self):
move1 = self._make_in_move(self.product1, 10, unit_cost=10, create_picking=True)
move2 = self._make_out_move(self.product1, 10)
lc = self._make_lc(move1, 100)
self.assertEqual(self.product1.value_svl, 0)
self.assertEqual(self.product1.quantity_svl, 0)
def test_alreadyout_2(self):
move1 = self._make_in_move(self.product1, 10, unit_cost=10, create_picking=True)
move2 = self._make_in_move(self.product1, 10, unit_cost=20)
move2 = self._make_out_move(self.product1, 1)
lc = self._make_lc(move1, 100)
self.assertEqual(self.product1.value_svl, 380)
self.assertEqual(self.product1.quantity_svl, 19)
def test_alreadyout_3(self):
move1 = self._make_in_move(self.product1, 10, unit_cost=10, create_picking=True)
move2 = self._make_out_move(self.product1, 10)
move1.move_line_ids.qty_done = 15
lc = self._make_lc(move1, 60)
self.assertEqual(self.product1.value_svl, 70)
self.assertEqual(self.product1.quantity_svl, 5)
def test_fifo_to_standard_1(self):
move1 = self._make_in_move(self.product1, 10, unit_cost=10)
move2 = self._make_in_move(self.product1, 10, unit_cost=15)
move3 = self._make_out_move(self.product1, 5)
lc = self._make_lc(move1, 100)
self.product1.product_tmpl_id.categ_id.property_cost_method = 'standard'
out_svl = self.product1.stock_valuation_layer_ids.sorted()[-2]
in_svl = self.product1.stock_valuation_layer_ids.sorted()[-1]
self.assertEqual(out_svl.value, -250)
self.assertEqual(in_svl.value, 225)
def test_rounding_1(self):
"""3@100, out 1, out 1, out 1"""
move1 = self._make_in_move(self.product1, 3, unit_cost=20, create_picking=True)
lc = self._make_lc(move1, 40)
move2 = self._make_out_move(self.product1, 1)
move3 = self._make_out_move(self.product1, 1)
move4 = self._make_out_move(self.product1, 1)
self.assertEqual(self.product1.stock_valuation_layer_ids.mapped('value'), [60.0, 40.0, -33.33, -33.34, -33.33])
self.assertEqual(self.product1.value_svl, 0)
self.assertEqual(self.product1.quantity_svl, 0)
def test_rounding_2(self):
"""3@98, out 1, out 1, out 1"""
move1 = self._make_in_move(self.product1, 3, unit_cost=20, create_picking=True)
lc = self._make_lc(move1, 38)
move2 = self._make_out_move(self.product1, 1)
move3 = self._make_out_move(self.product1, 1)
move4 = self._make_out_move(self.product1, 1)
self.assertEqual(move2.stock_valuation_layer_ids.value, -32.67)
self.assertEqual(move3.stock_valuation_layer_ids.value, -32.67)
self.assertAlmostEqual(move4.stock_valuation_layer_ids.value, -32.66, delta=0.01) # self.env.company.currency_id.round(-32.66) -> -32.660000000000004
self.assertEqual(self.product1.value_svl, 0)
self.assertEqual(self.product1.quantity_svl, 0)
def test_rounding_3(self):
"""[email protected], out 1, out 1, out 1"""
move1 = self._make_in_move(self.product1, 3, unit_cost=1, create_picking=True)
lc = self._make_lc(move1, 1.85)
move2 = self._make_out_move(self.product1, 1)
move3 = self._make_out_move(self.product1, 1)
move4 = self._make_out_move(self.product1, 1)
self.assertEqual(self.product1.stock_valuation_layer_ids.mapped('value'), [3.0, 1.85, -1.62, -1.62, -1.61])
self.assertEqual(self.product1.value_svl, 0)
self.assertEqual(self.product1.quantity_svl, 0)
def test_in_and_out_1(self):
move1 = self._make_in_move(self.product1, 10, unit_cost=100, create_picking=True)
self.assertEqual(move1.stock_valuation_layer_ids[0].remaining_value, 1000)
lc1 = self._make_lc(move1, 100)
self.assertEqual(move1.stock_valuation_layer_ids[0].remaining_value, 1100)
lc2 = self._make_lc(move1, 50)
self.assertEqual(move1.stock_valuation_layer_ids[0].remaining_value, 1150)
self.assertEqual(self.product1.value_svl, 1150)
self.assertEqual(self.product1.quantity_svl, 10)
move2 = self._make_out_move(self.product1, 1)
self.assertEqual(move2.stock_valuation_layer_ids.value, -115)
@tagged('post_install', '-at_install')
class TestStockValuationLCAVCO(TestStockValuationLC):
def setUp(self):
super(TestStockValuationLCAVCO, self).setUp()
self.product1.product_tmpl_id.categ_id.property_cost_method = 'average'
self.product1.product_tmpl_id.categ_id.property_valuation = 'real_time'
def test_normal_1(self):
move1 = self._make_in_move(self.product1, 10, unit_cost=10, create_picking=True)
move2 = self._make_in_move(self.product1, 10, unit_cost=20)
lc = self._make_lc(move1, 100)
move3 = self._make_out_move(self.product1, 1)
self.assertEqual(self.product1.value_svl, 380)
def test_negative_1(self):
self.product1.standard_price = 10
move1 = self._make_out_move(self.product1, 2, force_assign=True)
move2 = self._make_in_move(self.product1, 10, unit_cost=15, create_picking=True)
lc = self._make_lc(move2, 100)
self.assertEqual(self.product1.value_svl, 200)
self.assertEqual(self.product1.quantity_svl, 8)
def test_alreadyout_1(self):
move1 = self._make_in_move(self.product1, 10, unit_cost=10, create_picking=True)
move2 = self._make_out_move(self.product1, 10)
lc = self._make_lc(move1, 100)
self.assertEqual(len(self.product1.stock_valuation_layer_ids), 2)
self.assertEqual(self.product1.value_svl, 0)
self.assertEqual(self.product1.quantity_svl, 0)
def test_alreadyout_2(self):
move1 = self._make_in_move(self.product1, 10, unit_cost=10, create_picking=True)
move2 = self._make_in_move(self.product1, 10, unit_cost=20)
move2 = self._make_out_move(self.product1, 1)
lc = self._make_lc(move1, 100)
self.assertEqual(self.product1.value_svl, 375)
self.assertEqual(self.product1.quantity_svl, 19)
@tagged('post_install', '-at_install')
class TestStockValuationLCFIFOVB(TestStockValuationLC):
@classmethod
def setUpClass(cls):
super(TestStockValuationLCFIFOVB, cls).setUpClass()
cls.vendor1 = cls. env['res.partner'].create({'name': 'vendor1'})
cls.vendor1.property_account_payable_id = cls.payable_account
cls.vendor2 = cls.env['res.partner'].create({'name': 'vendor2'})
cls.vendor2.property_account_payable_id = cls.payable_account
cls.product1.product_tmpl_id.categ_id.property_cost_method = 'fifo'
cls.product1.product_tmpl_id.categ_id.property_valuation = 'real_time'
def test_vendor_bill_flow_anglo_saxon_1(self):
"""In anglo saxon accounting, receive 10@10 and invoice. Then invoice 1@50 as a landed costs
and create a linked landed costs record.
"""
self.env.company.anglo_saxon_accounting = True
# Create an RFQ for self.product1, 10@10
rfq = Form(self.env['purchase.order'])
rfq.partner_id = self.vendor1
with rfq.order_line.new() as po_line:
po_line.product_id = self.product1
po_line.price_unit = 10
po_line.product_qty = 10
po_line.taxes_id.clear()
rfq = rfq.save()
rfq.button_confirm()
# Process the receipt
receipt = rfq.picking_ids
wiz = receipt.button_validate()
wiz = self.env['stock.immediate.transfer'].browse(wiz['res_id']).process()
self.assertEqual(rfq.order_line.qty_received, 10)
input_aml = self._get_stock_input_move_lines()[-1]
self.assertEqual(input_aml.debit, 0)
self.assertEqual(input_aml.credit, 100)
valuation_aml = self._get_stock_valuation_move_lines()[-1]
self.assertEqual(valuation_aml.debit, 100)
self.assertEqual(valuation_aml.credit, 0)
# Create a vebdor bill for the RFQ
action = rfq.action_view_invoice()
vb = Form(self.env['account.move'].with_context(action['context']))
vb = vb.save()
vb.post()
input_aml = self._get_stock_input_move_lines()[-1]
self.assertEqual(input_aml.debit, 100)
self.assertEqual(input_aml.credit, 0)
payable_aml = self._get_payable_move_lines()[-1]
self.assertEqual(payable_aml.debit, 0)
self.assertEqual(payable_aml.credit, 100)
# Create a vendor bill for a landed cost product, post it and validate a landed cost
# linked to this vendor bill. LC; 1@50
lcvb = Form(self.env['account.move'].with_context(default_type='in_invoice'))
lcvb.partner_id = self.vendor2
with lcvb.invoice_line_ids.new() as inv_line:
inv_line.product_id = self.productlc1
inv_line.price_unit = 50
inv_line.is_landed_costs_line = True
with lcvb.invoice_line_ids.edit(0) as inv_line:
inv_line.tax_ids.clear()
lcvb = lcvb.save()
lcvb.post()
input_aml = self._get_stock_input_move_lines()[-1]
self.assertEqual(input_aml.debit, 50)
self.assertEqual(input_aml.credit, 0)
payable_aml = self._get_payable_move_lines()[-1]
self.assertEqual(payable_aml.debit, 0)
self.assertEqual(payable_aml.credit, 50)
action = lcvb.button_create_landed_costs()
lc = Form(self.env[action['res_model']].browse(action['res_id']))
lc.picking_ids.add(receipt)
lc = lc.save()
lc.button_validate()
self.assertEqual(lc.cost_lines.price_unit, 50)
self.assertEqual(lc.cost_lines.product_id, self.productlc1)
input_aml = self._get_stock_input_move_lines()[-1]
self.assertEqual(input_aml.debit, 0)
self.assertEqual(input_aml.credit, 50)
valuation_aml = self._get_stock_valuation_move_lines()[-1]
self.assertEqual(valuation_aml.debit, 50)
self.assertEqual(valuation_aml.credit, 0)
# Check reconciliation of input aml of lc
lc_input_aml = lc.account_move_id.line_ids.filtered(lambda aml: aml.account_id == self.stock_input_account)
self.assertTrue(len(lc_input_aml.full_reconcile_id), 1)
self.assertEqual(self.product1.quantity_svl, 10)
self.assertEqual(self.product1.value_svl, 150)
def test_vendor_bill_flow_anglo_saxon_2(self):
"""In anglo saxon accounting, receive 10@10 and invoice with the addition of 1@50 as a
landed costs and create a linked landed costs record.
"""
self.env.company.anglo_saxon_accounting = True
# Create an RFQ for self.product1, 10@10
rfq = Form(self.env['purchase.order'])
rfq.partner_id = self.vendor1
with rfq.order_line.new() as po_line:
po_line.product_id = self.product1
po_line.price_unit = 10
po_line.product_qty = 10
po_line.taxes_id.clear()
rfq = rfq.save()
rfq.button_confirm()
# Process the receipt
receipt = rfq.picking_ids
wiz = receipt.button_validate()
wiz = self.env['stock.immediate.transfer'].browse(wiz['res_id']).process()
self.assertEqual(rfq.order_line.qty_received, 10)
input_aml = self._get_stock_input_move_lines()[-1]
self.assertEqual(input_aml.debit, 0)
self.assertEqual(input_aml.credit, 100)
valuation_aml = self._get_stock_valuation_move_lines()[-1]
self.assertEqual(valuation_aml.debit, 100)
self.assertEqual(valuation_aml.credit, 0)
# Create a vebdor bill for the RFQ and add to it the landed cost
action = rfq.action_view_invoice()
vb = Form(self.env['account.move'].with_context(action['context']))
with vb.invoice_line_ids.new() as inv_line:
inv_line.product_id = self.productlc1
inv_line.price_unit = 50
inv_line.is_landed_costs_line = True
vb = vb.save()
vb.post()
action = vb.button_create_landed_costs()
lc = Form(self.env[action['res_model']].browse(action['res_id']))
lc.picking_ids.add(receipt)
lc = lc.save()
lc.button_validate()
# Check reconciliation of input aml of lc
lc_input_aml = lc.account_move_id.line_ids.filtered(lambda aml: aml.account_id == self.stock_input_account)
self.assertTrue(len(lc_input_aml.full_reconcile_id), 1)
def test_vendor_bill_flow_continental_1(self):
"""In continental accounting, receive 10@10 and invoice. Then invoice 1@50 as a landed costs
and create a linked landed costs record.
"""
self.env.company.anglo_saxon_accounting = False
# Create an RFQ for self.product1, 10@10
rfq = Form(self.env['purchase.order'])
rfq.partner_id = self.vendor1
with rfq.order_line.new() as po_line:
po_line.product_id = self.product1
po_line.price_unit = 10
po_line.product_qty = 10
po_line.taxes_id.clear()
rfq = rfq.save()
rfq.button_confirm()
# Process the receipt
receipt = rfq.picking_ids
wiz = receipt.button_validate()
wiz = self.env['stock.immediate.transfer'].browse(wiz['res_id']).process()
self.assertEqual(rfq.order_line.qty_received, 10)
input_aml = self._get_stock_input_move_lines()[-1]
self.assertEqual(input_aml.debit, 0)
self.assertEqual(input_aml.credit, 100)
valuation_aml = self._get_stock_valuation_move_lines()[-1]
self.assertEqual(valuation_aml.debit, 100)
self.assertEqual(valuation_aml.credit, 0)
# Create a vebdor bill for the RFQ
action = rfq.action_view_invoice()
vb = Form(self.env['account.move'].with_context(action['context']))
vb = vb.save()
vb.post()
expense_aml = self._get_expense_move_lines()[-1]
self.assertEqual(expense_aml.debit, 100)
self.assertEqual(expense_aml.credit, 0)
payable_aml = self._get_payable_move_lines()[-1]
self.assertEqual(payable_aml.debit, 0)
self.assertEqual(payable_aml.credit, 100)
# Create a vendor bill for a landed cost product, post it and validate a landed cost
# linked to this vendor bill. LC; 1@50
lcvb = Form(self.env['account.move'].with_context(default_type='in_invoice'))
lcvb.partner_id = self.vendor2
with lcvb.invoice_line_ids.new() as inv_line:
inv_line.product_id = self.productlc1
inv_line.price_unit = 50
inv_line.is_landed_costs_line = True
with lcvb.invoice_line_ids.edit(0) as inv_line:
inv_line.tax_ids.clear()
lcvb = lcvb.save()
lcvb.post()
expense_aml = self._get_expense_move_lines()[-1]
self.assertEqual(expense_aml.debit, 50)
self.assertEqual(expense_aml.credit, 0)
payable_aml = self._get_payable_move_lines()[-1]
self.assertEqual(payable_aml.debit, 0)
self.assertEqual(payable_aml.credit, 50)
action = lcvb.button_create_landed_costs()
lc = Form(self.env[action['res_model']].browse(action['res_id']))
lc.picking_ids.add(receipt)
lc = lc.save()
lc.button_validate()
self.assertEqual(lc.cost_lines.price_unit, 50)
self.assertEqual(lc.cost_lines.product_id, self.productlc1)
input_aml = self._get_stock_input_move_lines()[-1]
self.assertEqual(input_aml.debit, 0)
self.assertEqual(input_aml.credit, 50)
valuation_aml = self._get_stock_valuation_move_lines()[-1]
self.assertEqual(valuation_aml.debit, 50)
self.assertEqual(valuation_aml.credit, 0)
self.assertEqual(self.product1.quantity_svl, 10)
self.assertEqual(self.product1.value_svl, 150)
|
agpl-3.0
|
wanderine/nipype
|
nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py
|
2
|
2188
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.semtools.diffusion.diffusion import DWIConvert
def test_DWIConvert_inputs():
input_map = dict(args=dict(argstr='%s',
),
conversionMode=dict(argstr='--conversionMode %s',
),
environ=dict(nohash=True,
usedefault=True,
),
fMRI=dict(argstr='--fMRI ',
),
fslNIFTIFile=dict(argstr='--fslNIFTIFile %s',
),
gradientVectorFile=dict(argstr='--gradientVectorFile %s',
hash_files=False,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputBValues=dict(argstr='--inputBValues %s',
),
inputBVectors=dict(argstr='--inputBVectors %s',
),
inputDicomDirectory=dict(argstr='--inputDicomDirectory %s',
),
inputVolume=dict(argstr='--inputVolume %s',
),
outputBValues=dict(argstr='--outputBValues %s',
hash_files=False,
),
outputBVectors=dict(argstr='--outputBVectors %s',
hash_files=False,
),
outputDirectory=dict(argstr='--outputDirectory %s',
hash_files=False,
),
outputVolume=dict(argstr='--outputVolume %s',
hash_files=False,
),
smallGradientThreshold=dict(argstr='--smallGradientThreshold %f',
),
terminal_output=dict(nohash=True,
),
useBMatrixGradientDirections=dict(argstr='--useBMatrixGradientDirections ',
),
useIdentityMeaseurementFrame=dict(argstr='--useIdentityMeaseurementFrame ',
),
writeProtocolGradientsFile=dict(argstr='--writeProtocolGradientsFile ',
),
)
inputs = DWIConvert.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_DWIConvert_outputs():
output_map = dict(gradientVectorFile=dict(),
outputBValues=dict(),
outputBVectors=dict(),
outputDirectory=dict(),
outputVolume=dict(),
)
outputs = DWIConvert.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
bsd-3-clause
|
crossbario/crossbar-examples
|
work/multisub.py
|
3
|
3259
|
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from autobahn.wamp.types import PublishOptions, SubscribeOptions
from autobahn.twisted.wamp import ApplicationSession
from autobahn.twisted.wamp import ApplicationRunner
from autobahn.twisted.util import sleep
class MyComponent(ApplicationSession):
@inlineCallbacks
def onJoin(self, details):
print("session ready")
self.received = []
topic1 = "com.example.topic1"
sub_options = SubscribeOptions(details_arg="details")
pub_options = PublishOptions(acknowledge=True, exclude_me=False)
delay = 0.1
# subscribe handler1 on topic
#
def handler1(value, details=None):
print("handler1 got event")
self.received.append(("handler1", value, details.publication))
sub1 = yield self.subscribe(handler1, topic1, options=sub_options)
print("handler1 subscribed: {}".format(sub1), sub1)
res = yield self.publish(topic1, 1, options=pub_options)
print("event 1 published: {}".format(res))
yield sleep(delay)
# subscribe handler2 on same topic
#
def handler2(value, details=None):
print("handler2 got event")
self.received.append(("handler2", value, details.publication))
sub2 = yield self.subscribe(handler2, topic1, options=sub_options)
print("handler2 subscribed: {}".format(sub2), sub2)
res = yield self.publish(topic1, 2, options=pub_options)
print("event 2 published: {}".format(res))
yield sleep(delay)
# subscribe handler2 on same topic a second time
#
sub2b = yield self.subscribe(handler2, topic1, options=sub_options)
print("handler2 subscribed 2nd: {}".format(sub2b), sub2b)
res = yield self.publish(topic1, 3, options=pub_options)
print("event 3 published: {}".format(res))
yield sleep(delay)
# unsubscribe subscription1
#
yield sub1.unsubscribe()
print("handler1 unsubscribed: {}".format(sub1), sub1)
res = yield self.publish(topic1, 4, options=pub_options)
print("event 4 published: {}".format(res))
yield sleep(delay)
# unsubscribe subscription2
#
yield sub2.unsubscribe()
print("handler2 unsubscribed: {}".format(sub2), sub2)
res = yield self.publish(topic1, 5, options=pub_options)
print("event 5 published: {}".format(res))
yield sleep(delay)
# unsubscribe subscription2b
#
yield sub2b.unsubscribe()
print("handler2 unsubscribed 2nd: {}".format(sub2b), sub2b)
res = yield self.publish(topic1, 6, options=pub_options)
print("event 6 published: {}".format(res))
yield sleep(delay)
print("Done!", self.received)
self.leave()
def onLeave(self, details):
print("session left")
self.disconnect()
def onDisconnect(self):
print("transport disconnected")
reactor.stop()
if __name__ == '__main__':
runner = ApplicationRunner(url="ws://localhost:8080/ws", realm="realm1",
debug=False, debug_wamp=False)
runner.run(MyComponent)
|
apache-2.0
|
avatar29A/pyfuzzy
|
fuzzy/norm/HarmonicMean.py
|
1
|
1128
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Rene Liebscher
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
__revision__ = "$Id: HarmonicMean.py,v 1.8 2009-10-27 20:06:27 rliebscher Exp $"
from fuzzy.norm.Norm import Norm, sum
class HarmonicMean(Norm):
def __init__(self):
super(HarmonicMean, self).__init__(Norm.UNKNOWN)
def __call__(self, *args):
args = self.checkArgsN(args)
if 0. in args:
return 0.
return float(len(args))/sum(*[1.0/x for x in args])
|
mit
|
GdZ/scriptfile
|
software/googleAppEngine/lib/django_1_3/django/db/backends/oracle/introspection.py
|
210
|
5106
|
from django.db.backends import BaseDatabaseIntrospection
import cx_Oracle
import re
foreign_key_re = re.compile(r"\sCONSTRAINT `[^`]*` FOREIGN KEY \(`([^`]*)`\) REFERENCES `([^`]*)` \(`([^`]*)`\)")
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type objects to Django Field types.
data_types_reverse = {
cx_Oracle.CLOB: 'TextField',
cx_Oracle.DATETIME: 'DateField',
cx_Oracle.FIXED_CHAR: 'CharField',
cx_Oracle.NCLOB: 'TextField',
cx_Oracle.NUMBER: 'DecimalField',
cx_Oracle.STRING: 'CharField',
cx_Oracle.TIMESTAMP: 'DateTimeField',
}
try:
data_types_reverse[cx_Oracle.NATIVE_FLOAT] = 'FloatField'
except AttributeError:
pass
try:
data_types_reverse[cx_Oracle.UNICODE] = 'CharField'
except AttributeError:
pass
def get_field_type(self, data_type, description):
# If it's a NUMBER with scale == 0, consider it an IntegerField
if data_type == cx_Oracle.NUMBER and description[5] == 0:
if description[4] > 11:
return 'BigIntegerField'
else:
return 'IntegerField'
else:
return super(DatabaseIntrospection, self).get_field_type(
data_type, description)
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
cursor.execute("SELECT TABLE_NAME FROM USER_TABLES")
return [row[0].lower() for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
cursor.execute("SELECT * FROM %s WHERE ROWNUM < 2" % self.connection.ops.quote_name(table_name))
description = []
for desc in cursor.description:
description.append((desc[0].lower(),) + desc[1:])
return description
def table_name_converter(self, name):
"Table name comparison is case insensitive under Oracle"
return name.lower()
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name))])
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
table_name = table_name.upper()
cursor.execute("""
SELECT ta.column_id - 1, tb.table_name, tb.column_id - 1
FROM user_constraints, USER_CONS_COLUMNS ca, USER_CONS_COLUMNS cb,
user_tab_cols ta, user_tab_cols tb
WHERE user_constraints.table_name = %s AND
ta.table_name = %s AND
ta.column_name = ca.column_name AND
ca.table_name = %s AND
user_constraints.constraint_name = ca.constraint_name AND
user_constraints.r_constraint_name = cb.constraint_name AND
cb.table_name = tb.table_name AND
cb.column_name = tb.column_name AND
ca.position = cb.position""", [table_name, table_name, table_name])
relations = {}
for row in cursor.fetchall():
relations[row[0]] = (row[2], row[1].lower())
return relations
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of fieldname -> infodict for the given table,
where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
"""
# This query retrieves each index on the given table, including the
# first associated field name
# "We were in the nick of time; you were in great peril!"
sql = """\
SELECT LOWER(all_tab_cols.column_name) AS column_name,
CASE user_constraints.constraint_type
WHEN 'P' THEN 1 ELSE 0
END AS is_primary_key,
CASE user_indexes.uniqueness
WHEN 'UNIQUE' THEN 1 ELSE 0
END AS is_unique
FROM all_tab_cols, user_cons_columns, user_constraints, user_ind_columns, user_indexes
WHERE all_tab_cols.column_name = user_cons_columns.column_name (+)
AND all_tab_cols.table_name = user_cons_columns.table_name (+)
AND user_cons_columns.constraint_name = user_constraints.constraint_name (+)
AND user_constraints.constraint_type (+) = 'P'
AND user_ind_columns.column_name (+) = all_tab_cols.column_name
AND user_ind_columns.table_name (+) = all_tab_cols.table_name
AND user_indexes.uniqueness (+) = 'UNIQUE'
AND user_indexes.index_name (+) = user_ind_columns.index_name
AND all_tab_cols.table_name = UPPER(%s)
"""
cursor.execute(sql, [table_name])
indexes = {}
for row in cursor.fetchall():
indexes[row[0]] = {'primary_key': row[1], 'unique': row[2]}
return indexes
|
mit
|
Midrya/chromium
|
third_party/boto/core/auth.py
|
68
|
3150
|
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import requests.packages.urllib3
import hmac
import base64
from hashlib import sha256
import sys
import datetime
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
class SigV2Auth(object):
"""
Sign an Query Signature V2 request.
"""
def __init__(self, credentials, api_version=''):
self.credentials = credentials
self.api_version = api_version
self.hmac = hmac.new(self.credentials.secret_key.encode('utf-8'),
digestmod=sha256)
def calc_signature(self, args):
scheme, host, port = requests.packages.urllib3.get_host(args['url'])
string_to_sign = '%s\n%s\n%s\n' % (args['method'], host, '/')
hmac = self.hmac.copy()
args['params']['SignatureMethod'] = 'HmacSHA256'
if self.credentials.token:
args['params']['SecurityToken'] = self.credentials.token
sorted_params = sorted(args['params'])
pairs = []
for key in sorted_params:
value = args['params'][key]
pairs.append(quote(key, safe='') + '=' +
quote(value, safe='-_~'))
qs = '&'.join(pairs)
string_to_sign += qs
print('string_to_sign')
print(string_to_sign)
hmac.update(string_to_sign.encode('utf-8'))
b64 = base64.b64encode(hmac.digest()).strip().decode('utf-8')
return (qs, b64)
def add_auth(self, args):
args['params']['Action'] = 'DescribeInstances'
args['params']['AWSAccessKeyId'] = self.credentials.access_key
args['params']['SignatureVersion'] = '2'
args['params']['Timestamp'] = datetime.datetime.utcnow().isoformat()
args['params']['Version'] = self.api_version
qs, signature = self.calc_signature(args)
args['params']['Signature'] = signature
if args['method'] == 'POST':
args['data'] = args['params']
args['params'] = {}
|
bsd-3-clause
|
applicationdevm/XlsxWriter
|
xlsxwriter/test/comparison/test_chart_data_labels03.py
|
8
|
1743
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_data_labels03.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'bar'})
chart.axis_ids = [45693952, 45762816]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'values': '=Sheet1!$A$1:$A$5',
'data_labels': {'value': 1, 'position': 'outside_end'},
})
chart.add_series({
'values': '=Sheet1!$B$1:$B$5',
'data_labels': {'value': 1, 'position': 'inside_base'},
})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
bsd-2-clause
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/verification_ip_flow_parameters.py
|
1
|
3503
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VerificationIPFlowParameters(Model):
"""Parameters that define the IP flow to be verified.
:param target_resource_id: The ID of the target resource to perform
next-hop on.
:type target_resource_id: str
:param direction: The direction of the packet represented as a 5-tuple.
Possible values include: 'Inbound', 'Outbound'
:type direction: str or ~azure.mgmt.network.v2017_08_01.models.Direction
:param protocol: Protocol to be verified on. Possible values include:
'TCP', 'UDP'
:type protocol: str or ~azure.mgmt.network.v2017_08_01.models.Protocol
:param local_port: The local port. Acceptable values are a single integer
in the range (0-65535). Support for * for the source port, which depends
on the direction.
:type local_port: str
:param remote_port: The remote port. Acceptable values are a single
integer in the range (0-65535). Support for * for the source port, which
depends on the direction.
:type remote_port: str
:param local_ip_address: The local IP address. Acceptable values are valid
IPv4 addresses.
:type local_ip_address: str
:param remote_ip_address: The remote IP address. Acceptable values are
valid IPv4 addresses.
:type remote_ip_address: str
:param target_nic_resource_id: The NIC ID. (If VM has multiple NICs and IP
forwarding is enabled on any of them, then this parameter must be
specified. Otherwise optional).
:type target_nic_resource_id: str
"""
_validation = {
'target_resource_id': {'required': True},
'direction': {'required': True},
'protocol': {'required': True},
'local_port': {'required': True},
'remote_port': {'required': True},
'local_ip_address': {'required': True},
'remote_ip_address': {'required': True},
}
_attribute_map = {
'target_resource_id': {'key': 'targetResourceId', 'type': 'str'},
'direction': {'key': 'direction', 'type': 'str'},
'protocol': {'key': 'protocol', 'type': 'str'},
'local_port': {'key': 'localPort', 'type': 'str'},
'remote_port': {'key': 'remotePort', 'type': 'str'},
'local_ip_address': {'key': 'localIPAddress', 'type': 'str'},
'remote_ip_address': {'key': 'remoteIPAddress', 'type': 'str'},
'target_nic_resource_id': {'key': 'targetNicResourceId', 'type': 'str'},
}
def __init__(self, target_resource_id, direction, protocol, local_port, remote_port, local_ip_address, remote_ip_address, target_nic_resource_id=None):
super(VerificationIPFlowParameters, self).__init__()
self.target_resource_id = target_resource_id
self.direction = direction
self.protocol = protocol
self.local_port = local_port
self.remote_port = remote_port
self.local_ip_address = local_ip_address
self.remote_ip_address = remote_ip_address
self.target_nic_resource_id = target_nic_resource_id
|
mit
|
MatthewWilkes/django-oscar
|
src/oscar/apps/search/app.py
|
33
|
1040
|
from django.conf.urls import url
from haystack.views import search_view_factory
from oscar.core.application import Application
from oscar.core.loading import get_class
from oscar.apps.search import facets
class SearchApplication(Application):
name = 'search'
search_view = get_class('search.views', 'FacetedSearchView')
search_form = get_class('search.forms', 'SearchForm')
def get_urls(self):
# The form class has to be passed to the __init__ method as that is how
# Haystack works. It's slightly different to normal CBVs.
urlpatterns = [
url(r'^$', search_view_factory(
view_class=self.search_view,
form_class=self.search_form,
searchqueryset=self.get_sqs()),
name='search'),
]
return self.post_process_urls(urlpatterns)
def get_sqs(self):
"""
Return the SQS required by a the Haystack search view
"""
return facets.base_sqs()
application = SearchApplication()
|
bsd-3-clause
|
sarvex/tensorflow
|
tensorflow/lite/testing/op_tests/elu.py
|
17
|
2060
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for elu."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_elu_tests(options):
"""Make a set of tests to do (float) tf.nn.elu."""
test_parameters = [
{
"input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3],
[3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
},
]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.compat.v1.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.elu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build the inputs for the test case."""
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
|
apache-2.0
|
GNOME/conduit
|
test/soup/env/__init__.py
|
2
|
1235
|
import os, sys
from soup.utils.pluginloader import PluginLoader
class EnvironmentWrapper(object):
@classmethod
def enabled(cls, opts):
return True
def prepare_environment(self):
""" Modify the environment that the tests are running in """
pass
def decorate_test(self, test):
""" Decorate a callable so that it can be run in the modified environment """
return test
def finalize_environment(self):
""" Clean up the environment. Called at the very end of the test suite. """
pass
class _EnvironmentLoader(PluginLoader):
_subclass_ = EnvironmentWrapper
_module_ = "soup.env"
_path_ = os.path.dirname(__file__)
def prepare_environment(self, opts):
self.active = []
for eklass in self.get_all():
if eklass.enabled(opts):
e = eklass()
e.prepare_environment()
self.active.append(e)
def decorate_test(self, test):
t = test
for e in self.active:
t = e.decorate_test(t)
return t
def finalize_environment(self):
for e in self.active:
e.finalize_environment()
EnvironmentLoader = _EnvironmentLoader()
|
gpl-2.0
|
jctanner/ansible
|
test/units/modules/test_pip.py
|
35
|
1502
|
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import pytest
from ansible.modules import pip
pytestmark = pytest.mark.usefixtures('patch_ansible_module')
@pytest.mark.parametrize('patch_ansible_module', [{'name': 'six'}], indirect=['patch_ansible_module'])
def test_failure_when_pip_absent(mocker, capfd):
get_bin_path = mocker.patch('ansible.module_utils.basic.AnsibleModule.get_bin_path')
get_bin_path.return_value = None
with pytest.raises(SystemExit):
pip.main()
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed']
assert 'pip needs to be installed' in results['msg']
@pytest.mark.parametrize('patch_ansible_module, test_input, expected', [
[None, ['django>1.11.1', '<1.11.2', 'ipaddress', 'simpleproject<2.0.0', '>1.1.0'],
['django>1.11.1,<1.11.2', 'ipaddress', 'simpleproject<2.0.0,>1.1.0']],
[None, ['django>1.11.1,<1.11.2,ipaddress', 'simpleproject<2.0.0,>1.1.0'],
['django>1.11.1,<1.11.2', 'ipaddress', 'simpleproject<2.0.0,>1.1.0']],
[None, ['django>1.11.1', '<1.11.2', 'ipaddress,simpleproject<2.0.0,>1.1.0'],
['django>1.11.1,<1.11.2', 'ipaddress', 'simpleproject<2.0.0,>1.1.0']]])
def test_recover_package_name(test_input, expected):
assert pip._recover_package_name(test_input) == expected
|
gpl-3.0
|
tsgit/invenio
|
modules/bibcheck/lib/plugins/rename_subfield_filter.py
|
2
|
2388
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013, 2016 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Bibcheck plugin to move (rename) a subfield if
pattern matches and complement == False or
pattern does not match and complement = True,
depending on subfield_filter
Example:
[mvtexkey_withSpace]
check = rename_subfield_filter
filter_collection = HEP
check.source_field = "035__a"
check.new_code = "z"
check.pattern = " "
check.complement = false
check.subfield_filter = ["9", "SPIRESTeX"]
[mvtexkey_wrongSyntax]
check = rename_subfield_filter
filter_collection = HEP
check.source_field = "035__a"
check.new_code = "z"
check.pattern = "^[A-Za-z]+:\\d{4}[a-z]{2,3}$"
check.complement = true
check.subfield_filter = ["9", "INSPIRETeX"]
"""
def check_record(record, source_field, new_code,
pattern='', subfield_filter=(None, None), complement=False):
""" Changes the code of a subfield to new_code """
import re
from invenio.bibrecord import record_modify_subfield
assert len(source_field) == 6
source_field = source_field.replace("_", " ")
assert len(subfield_filter) == 2
subfield_filter = tuple(subfield_filter)
for pos, val in record.iterfield(source_field, subfield_filter):
pattern_matches = re.search(pattern, val) if pattern else True
if (pattern_matches and not complement) or \
(complement and not pattern_matches):
record_modify_subfield(record, source_field[:3], new_code, val,
pos[2], field_position_local=pos[1])
record.set_amended('move from %s to %s: %s' %
(source_field.replace(" ", "_"), new_code, val))
|
gpl-2.0
|
apache/stratos
|
components/org.apache.stratos.python.cartridge.agent/src/main/python/cartridge.agent/cartridge.agent/modules/databridge/thrift/thrift/Thrift.py
|
253
|
4543
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
class TType:
STOP = 0
VOID = 1
BOOL = 2
BYTE = 3
I08 = 3
DOUBLE = 4
I16 = 6
I32 = 8
I64 = 10
STRING = 11
UTF7 = 11
STRUCT = 12
MAP = 13
SET = 14
LIST = 15
UTF8 = 16
UTF16 = 17
_VALUES_TO_NAMES = ('STOP',
'VOID',
'BOOL',
'BYTE',
'DOUBLE',
None,
'I16',
None,
'I32',
None,
'I64',
'STRING',
'STRUCT',
'MAP',
'SET',
'LIST',
'UTF8',
'UTF16')
class TMessageType:
CALL = 1
REPLY = 2
EXCEPTION = 3
ONEWAY = 4
class TProcessor:
"""Base class for procsessor, which works on two streams."""
def process(iprot, oprot):
pass
class TException(Exception):
"""Base class for all thrift exceptions."""
# BaseException.message is deprecated in Python v[2.6,3.0)
if (2, 6, 0) <= sys.version_info < (3, 0):
def _get_message(self):
return self._message
def _set_message(self, message):
self._message = message
message = property(_get_message, _set_message)
def __init__(self, message=None):
Exception.__init__(self, message)
self.message = message
class TApplicationException(TException):
"""Application level thrift exceptions."""
UNKNOWN = 0
UNKNOWN_METHOD = 1
INVALID_MESSAGE_TYPE = 2
WRONG_METHOD_NAME = 3
BAD_SEQUENCE_ID = 4
MISSING_RESULT = 5
INTERNAL_ERROR = 6
PROTOCOL_ERROR = 7
INVALID_TRANSFORM = 8
INVALID_PROTOCOL = 9
UNSUPPORTED_CLIENT_TYPE = 10
def __init__(self, type=UNKNOWN, message=None):
TException.__init__(self, message)
self.type = type
def __str__(self):
if self.message:
return self.message
elif self.type == self.UNKNOWN_METHOD:
return 'Unknown method'
elif self.type == self.INVALID_MESSAGE_TYPE:
return 'Invalid message type'
elif self.type == self.WRONG_METHOD_NAME:
return 'Wrong method name'
elif self.type == self.BAD_SEQUENCE_ID:
return 'Bad sequence ID'
elif self.type == self.MISSING_RESULT:
return 'Missing result'
elif self.type == self.INTERNAL_ERROR:
return 'Internal error'
elif self.type == self.PROTOCOL_ERROR:
return 'Protocol error'
elif self.type == self.INVALID_TRANSFORM:
return 'Invalid transform'
elif self.type == self.INVALID_PROTOCOL:
return 'Invalid protocol'
elif self.type == self.UNSUPPORTED_CLIENT_TYPE:
return 'Unsupported client type'
else:
return 'Default (unknown) TApplicationException'
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.type = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
oprot.writeStructBegin('TApplicationException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
if self.type is not None:
oprot.writeFieldBegin('type', TType.I32, 2)
oprot.writeI32(self.type)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
|
apache-2.0
|
xbmcmegapack/plugin.video.megapack.dev
|
resources/lib/menus/home_languages_japanese.py
|
1
|
1113
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This file is part of XBMC Mega Pack Addon.
Copyright (C) 2014 Wolverine ([email protected])
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see http://www.gnu.org/licenses/gpl-3.0.html
"""
class Languages_Japanese():
'''Class that manages this specific menu context.'''
def open(self, plugin, menu):
menu.add_xplugins(plugin.get_xplugins(dictionaries=["Channels",
"Events", "Live", "Movies", "Sports", "TVShows"],
languages=["Japanese"]))
|
gpl-3.0
|
caraya/ebook-experiments
|
python-xml-gen/xml-gen.py
|
2
|
2180
|
#!/usr/bin/env python
import mimetypes
import glob
import os
import os.path
# Initialize the mimetypes database
mimetypes.init()
# Create the package.opf file
package = open('package.opf', 'w')
# The glob below should encompass everything under
# OEBPS. Right now I'm trying to remove empty directories
# and the package_content variable.
# WARNING: This glob will add all files and directories
# to the variable. You will have to edit the file and remove
# empty directories and the package.opf file reference from
# both the manifest and the spine
package_content = glob.glob('OEBPS/**/*')
# FIRST PASS AT WRITING FUNCTION TO ADDRESS ISSUE ABOVE
#for file in os.listdir( location ):
# if os.path.isfile(os.path.join('OEBPS', file)):
# package_content = ''
# package_contet += file
# Rather than use a templating system we build the XML portion
# by hand
template_top = '''<package xmlns="http://www.idpf.org/2007/opf"
unique-identifier="book-id"
version="3.0" xml:lang="en">
<metadata >
<!-- TITLE -->
<dc:title></dc:title>
<!-- AUTHOR, PUBLISHER AND PUBLICATION DATES-->
<dc:creator></dc:creator>
<dc:publisher></dc:publisher>
<dc:date></dc:date>
<meta property="dcterms:modified"></meta>
<!-- MISC INFORMATION -->
<dc:language>en</dc:language>
<dc:identifier id="book-id"></dc:identifier>
<meta name="cover" content="img-cov" />
</metadata>
<manifest>
'''
template_transition = '''</manifest>
<spine toc="ncx">'''
template_bottom = '''</spine>
</package>'''
manifest = ""
spine = ""
# Write the content of OEBPS into the manifest and spines
for i, item in enumerate(package_content):
basename = os.path.basename(item)
mime = mimetypes.guess_type(item, strict=True)
manifest += '\t<item id="file_%s" href="%s" media-type="%s"/>\n' % (i+1, basename, mime[0])
spine += '\n\t<itemref idref="file_%s" />' % (i+1)
# I don't remember my python all that well to remember
# how to print the interpolated content.
# This should do for now.
package.write(template_top)
package.write(manifest)
package.write(template_transition)
package.write(spine)
package.write(template_bottom)
|
mit
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/nbconvert/filters/latex.py
|
10
|
1868
|
"""Latex filters.
Module of useful filters for processing Latex within Jinja latex templates.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import re
from nbconvert.utils.pandoc import pandoc
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
LATEX_RE_SUBS = (
(re.compile(r'\.\.\.+'), r'{\\ldots}'),
)
# Latex substitutions for escaping latex.
# see: http://stackoverflow.com/questions/16259923/how-can-i-escape-latex-special-characters-inside-django-templates
LATEX_SUBS = {
'&': r'\&',
'%': r'\%',
'$': r'\$',
'#': r'\#',
'_': r'\_',
'{': r'\{',
'}': r'\}',
'~': r'\textasciitilde{}',
'^': r'\^{}',
'\\': r'\textbackslash{}',
}
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
__all__ = ['escape_latex']
def escape_latex(text):
"""
Escape characters that may conflict with latex.
Parameters
----------
text : str
Text containing characters that may conflict with Latex
"""
text = ''.join(LATEX_SUBS.get(c, c) for c in text)
for pattern, replacement in LATEX_RE_SUBS:
text = pattern.sub(replacement, text)
return text
|
gpl-3.0
|
mlperf/training_results_v0.5
|
v0.5.0/google/cloud_v3.8/gnmt-tpuv3-8/code/gnmt/model/staging/models/rough/mask_rcnn/utils.py
|
8
|
2663
|
# Copyright 2018 Google. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Util functions for running Mask RCNN model using TPU low level APIs.
"""
import tensorflow as tf
def wrap_computation_in_while_loop(op_fn, n, parallel_iterations=1):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
ops = op_fn()
if not isinstance(ops, list):
ops = [ops]
with tf.control_dependencies(ops):
return i + 1
return tf.while_loop(
lambda i: tf.less(i, n),
computation, [tf.constant(0)],
parallel_iterations=parallel_iterations)
class InputsFlattener(object):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
"""
def __init__(self):
self._feature_structure = {}
def flatten_features_and_labels(self, features, labels):
"""Flattens the `features` and `labels` to a single tensor list."""
self._feature_structure['features'] = features
if labels is not None:
self._feature_structure['labels'] = labels
return tf.contrib.framework.nest.flatten(self._feature_structure)
def unflatten_features_and_labels(self, flattened_inputs):
unflattened_inputs = tf.contrib.framework.nest.pack_sequence_as(
self._feature_structure, flattened_inputs)
return unflattened_inputs['features'], unflattened_inputs.get('labels')
class CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug.')
return self._object
|
apache-2.0
|
ak15199/rop
|
art/cascade.py
|
1
|
1744
|
from ._baseclass import ArtBaseClass
import numpy as np
from opc.colors import BLACK
from opc.hue import hsvToRgb
from math import fmod, sin, cos, sqrt
class ClearTrain(object):
def __init__(self, length):
self.length = length
self.points = [(0, 0) for i in range(length)]
self.head = 0
def add(self, matrix, x, y):
x0, y0 = self.points[self.head]
matrix.drawPixel(x0, y0, BLACK)
self.points[self.head] = (x, y)
self.head = (self.head + 1) % self.length
DELTA_AMP = 0.09
DELTA_ANG = 0.033
DELTA_HUE = 0.006
TRAIN_LEN = 16
class Art(ArtBaseClass):
description = "Color cascade (needs tuning for > 32x32)"
def __init__(self, matrix, config):
self.hue = 0
self.ang = 0
self.amp = 0
self.mult = 1+sqrt(matrix.numpix/32)/90
self.train = ClearTrain(TRAIN_LEN)
def start(self, matrix):
matrix.clear()
def refresh(self, matrix):
# this relies on the fact that the pixels we seed get multiplied and
# overfow the uint8 in intresting ways
matrix.blur(3)
matrix.buf.buf = (self.mult*matrix.buf.buf).astype(np.uint8)
self.amp += DELTA_AMP
if self.amp >= 1 and False:
self.amp = 0
self.hue += DELTA_HUE
self.ang += DELTA_ANG
xcenter = matrix.width / 2.0
ycenter = matrix.height / 2.0
amp = sin(self.amp)
tx = amp * sin(self.ang)
ty = amp * cos(self.ang)
x = xcenter + xcenter * tx
y = ycenter + ycenter * ty
color = hsvToRgb(fmod(self.hue, 1), 1, 1)
matrix.drawPixel(x, y, color)
self.train.add(matrix, x, y)
def interval(self):
return 40
|
gpl-3.0
|
pedropenna/.emacs.d
|
elpa/floobits-20160804.1135/floo/common/api.py
|
2
|
7200
|
import sys
import base64
import json
import subprocess
import traceback
from functools import wraps
try:
import ssl
except ImportError:
ssl = False
PY2 = sys.version_info < (3, 0)
try:
import __builtin__
str_instances = (str, __builtin__.basestring)
except Exception:
str_instances = (str, )
try:
import urllib
from urllib.request import Request, urlopen
HTTPError = urllib.error.HTTPError
URLError = urllib.error.URLError
except (AttributeError, ImportError, ValueError):
import urllib2
from urllib2 import Request, urlopen
HTTPError = urllib2.HTTPError
URLError = urllib2.URLError
try:
from .. import editor
from . import msg, shared as G, utils
except ImportError:
import editor
import msg
import shared as G
import utils
def get_basic_auth(host):
username = G.AUTH.get(host, {}).get('username')
secret = G.AUTH.get(host, {}).get('secret')
if username is None or secret is None:
return
basic_auth = ('%s:%s' % (username, secret)).encode('utf-8')
basic_auth = base64.encodestring(basic_auth)
return basic_auth.decode('ascii').replace('\n', '')
class APIResponse():
def __init__(self, r):
self.body = None
if isinstance(r, bytes):
r = r.decode('utf-8')
if isinstance(r, str_instances):
lines = r.split('\n')
self.code = int(lines[0])
if self.code != 204:
self.body = json.loads('\n'.join(lines[1:]))
elif hasattr(r, 'code'):
# Hopefully this is an HTTPError
self.code = r.code
if self.code != 204:
self.body = json.loads(r.read().decode("utf-8"))
elif hasattr(r, 'reason'):
# Hopefully this is a URLError
# horrible hack, but lots of other stuff checks the response code :/
self.code = 500
self.body = r.reason
else:
# WFIO
self.code = 500
self.body = r
msg.debug('code: %s' % self.code)
def proxy_api_request(host, url, data, method):
args = ['python', '-m', 'floo.proxy', '--host', host, '--url', url]
if data:
args += ["--data", json.dumps(data)]
if method:
args += ["--method", method]
msg.log('Running ', ' '.join(args), ' (', G.PLUGIN_PATH, ')')
proc = subprocess.Popen(args, cwd=G.PLUGIN_PATH, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(stdout, stderr) = proc.communicate()
if stderr:
raise IOError(stderr)
if proc.poll() != 0:
raise IOError(stdout)
r = APIResponse(stdout)
return r
def user_agent():
return 'Floobits Plugin %s %s %s py-%s.%s' % (
editor.name(),
G.__PLUGIN_VERSION__,
editor.platform(),
sys.version_info[0],
sys.version_info[1]
)
def hit_url(host, url, data, method):
if data:
data = json.dumps(data).encode('utf-8')
r = Request(url, data=data)
r.method = method
r.get_method = lambda: method
auth = get_basic_auth(host)
if auth:
r.add_header('Authorization', 'Basic %s' % auth)
r.add_header('Accept', 'application/json')
r.add_header('Content-type', 'application/json')
r.add_header('User-Agent', user_agent())
return urlopen(r, timeout=5)
def api_request(host, url, data=None, method=None):
if data:
method = method or 'POST'
else:
method = method or 'GET'
if ssl is False:
return proxy_api_request(host, url, data, method)
try:
r = hit_url(host, url, data, method)
except HTTPError as e:
r = e
except URLError as e:
msg.warn('Error hitting url ', url, ': ', e)
r = e
if not PY2:
msg.warn('Retrying using system python...')
return proxy_api_request(host, url, data, method)
return APIResponse(r)
def create_workspace(host, post_data):
api_url = 'https://%s/api/workspace' % host
return api_request(host, api_url, post_data)
def delete_workspace(host, owner, workspace):
api_url = 'https://%s/api/workspace/%s/%s' % (host, owner, workspace)
return api_request(host, api_url, method='DELETE')
def update_workspace(workspace_url, data):
result = utils.parse_url(workspace_url)
api_url = 'https://%s/api/workspace/%s/%s' % (result['host'], result['owner'], result['workspace'])
return api_request(result['host'], api_url, data, method='PUT')
def get_workspace_by_url(url):
result = utils.parse_url(url)
api_url = 'https://%s/api/workspace/%s/%s' % (result['host'], result['owner'], result['workspace'])
return api_request(result['host'], api_url)
def get_workspace(host, owner, workspace):
api_url = 'https://%s/api/workspace/%s/%s' % (host, owner, workspace)
return api_request(host, api_url)
def get_workspaces(host):
api_url = 'https://%s/api/workspaces/can/view' % (host)
return api_request(host, api_url)
def get_orgs(host):
api_url = 'https://%s/api/orgs' % (host)
return api_request(host, api_url)
def get_orgs_can_admin(host):
api_url = 'https://%s/api/orgs/can/admin' % (host)
return api_request(host, api_url)
def request_review(host, owner, workspace, description):
api_url = 'https://%s/api/workspace/%s/%s/review' % (host, owner, workspace)
return api_request(host, api_url, data={'description': description})
def send_error(description=None, exception=None):
G.ERROR_COUNT += 1
data = {
'jsondump': {
'error_count': G.ERROR_COUNT
},
'message': {},
'dir': G.COLAB_DIR,
}
stack = ''
if G.AGENT:
data['owner'] = getattr(G.AGENT, "owner", None)
data['username'] = getattr(G.AGENT, "username", None)
data['workspace'] = getattr(G.AGENT, "workspace", None)
if exception:
exc_info = sys.exc_info()
try:
stack = traceback.format_exception(*exc_info)
except Exception:
if exc_info[0] is None:
stack = 'No sys.exc_info()'
else:
stack = "Python is rtardd"
try:
description = str(exception)
except Exception:
description = "Python is rtadd"
data['message'] = {
'description': description,
'stack': stack
}
msg.log('Floobits plugin error! Sending exception report: ', data['message'])
if description:
data['message']['description'] = description
if G.ERRORS_SENT >= G.MAX_ERROR_REPORTS:
msg.warn('Already sent ', G.ERRORS_SENT, ' errors this session. Not sending any more.\n', description, exception, stack)
return
try:
# TODO: use G.AGENT.proto.host?
api_url = 'https://%s/api/log' % (G.DEFAULT_HOST)
r = api_request(G.DEFAULT_HOST, api_url, data)
G.ERRORS_SENT += 1
return r
except Exception as e:
print(e)
def send_errors(f):
@wraps(f)
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
send_error(None, e)
raise
return wrapped
|
gpl-3.0
|
zhjunlang/kbengine
|
kbe/src/lib/python/Lib/distutils/tests/test_build_clib.py
|
95
|
4978
|
"""Tests for distutils.command.build_clib."""
import unittest
import os
import sys
from test.support import run_unittest
from distutils.command.build_clib import build_clib
from distutils.errors import DistutilsSetupError
from distutils.tests import support
from distutils.spawn import find_executable
class BuildCLibTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_check_library_dist(self):
pkg_dir, dist = self.create_dist()
cmd = build_clib(dist)
# 'libraries' option must be a list
self.assertRaises(DistutilsSetupError, cmd.check_library_list, 'foo')
# each element of 'libraries' must a 2-tuple
self.assertRaises(DistutilsSetupError, cmd.check_library_list,
['foo1', 'foo2'])
# first element of each tuple in 'libraries'
# must be a string (the library name)
self.assertRaises(DistutilsSetupError, cmd.check_library_list,
[(1, 'foo1'), ('name', 'foo2')])
# library name may not contain directory separators
self.assertRaises(DistutilsSetupError, cmd.check_library_list,
[('name', 'foo1'),
('another/name', 'foo2')])
# second element of each tuple must be a dictionary (build info)
self.assertRaises(DistutilsSetupError, cmd.check_library_list,
[('name', {}),
('another', 'foo2')])
# those work
libs = [('name', {}), ('name', {'ok': 'good'})]
cmd.check_library_list(libs)
def test_get_source_files(self):
pkg_dir, dist = self.create_dist()
cmd = build_clib(dist)
# "in 'libraries' option 'sources' must be present and must be
# a list of source filenames
cmd.libraries = [('name', {})]
self.assertRaises(DistutilsSetupError, cmd.get_source_files)
cmd.libraries = [('name', {'sources': 1})]
self.assertRaises(DistutilsSetupError, cmd.get_source_files)
cmd.libraries = [('name', {'sources': ['a', 'b']})]
self.assertEqual(cmd.get_source_files(), ['a', 'b'])
cmd.libraries = [('name', {'sources': ('a', 'b')})]
self.assertEqual(cmd.get_source_files(), ['a', 'b'])
cmd.libraries = [('name', {'sources': ('a', 'b')}),
('name2', {'sources': ['c', 'd']})]
self.assertEqual(cmd.get_source_files(), ['a', 'b', 'c', 'd'])
def test_build_libraries(self):
pkg_dir, dist = self.create_dist()
cmd = build_clib(dist)
class FakeCompiler:
def compile(*args, **kw):
pass
create_static_lib = compile
cmd.compiler = FakeCompiler()
# build_libraries is also doing a bit of typo checking
lib = [('name', {'sources': 'notvalid'})]
self.assertRaises(DistutilsSetupError, cmd.build_libraries, lib)
lib = [('name', {'sources': list()})]
cmd.build_libraries(lib)
lib = [('name', {'sources': tuple()})]
cmd.build_libraries(lib)
def test_finalize_options(self):
pkg_dir, dist = self.create_dist()
cmd = build_clib(dist)
cmd.include_dirs = 'one-dir'
cmd.finalize_options()
self.assertEqual(cmd.include_dirs, ['one-dir'])
cmd.include_dirs = None
cmd.finalize_options()
self.assertEqual(cmd.include_dirs, [])
cmd.distribution.libraries = 'WONTWORK'
self.assertRaises(DistutilsSetupError, cmd.finalize_options)
@unittest.skipIf(sys.platform == 'win32', "can't test on Windows")
def test_run(self):
pkg_dir, dist = self.create_dist()
cmd = build_clib(dist)
foo_c = os.path.join(pkg_dir, 'foo.c')
self.write_file(foo_c, 'int main(void) { return 1;}\n')
cmd.libraries = [('foo', {'sources': [foo_c]})]
build_temp = os.path.join(pkg_dir, 'build')
os.mkdir(build_temp)
cmd.build_temp = build_temp
cmd.build_clib = build_temp
# before we run the command, we want to make sure
# all commands are present on the system
# by creating a compiler and checking its executables
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler
compiler = new_compiler()
customize_compiler(compiler)
for ccmd in compiler.executables.values():
if ccmd is None:
continue
if find_executable(ccmd[0]) is None:
self.skipTest('The %r command is not found' % ccmd[0])
# this should work
cmd.run()
# let's check the result
self.assertIn('libfoo.a', os.listdir(build_temp))
def test_suite():
return unittest.makeSuite(BuildCLibTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
lgpl-3.0
|
chaluemwut/fbserver
|
venv/lib/python2.7/site-packages/sklearn/decomposition/tests/test_nmf.py
|
33
|
6189
|
import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
"""Test NNDSVD behaviour on negative input"""
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
"""Test that NNDSVD does not return negative values"""
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
"""Test NNDSVD error
Test that _initialize_nmf error is less than the standard deviation of the
entries in the matrix.
"""
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
"""Test NNDSVD variants correctness
Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
the basic version has zeros.
"""
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
"""Test model fit behaviour on negative input"""
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
"""Test that the decomposition does not contain negative values"""
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
"""Test that the fit is not too far away"""
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
"""Test that NLS solver doesn't return negative values"""
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
"""Test that the NLS results should be close"""
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
"""Test that NMF.transform returns close values
(transform uses scipy.optimize.nnls for now)
"""
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
"""Smoke test for the case of more components than features."""
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
"""Test sparseness
Test that sparsity constraints actually increase sparseness in the
part where they are applied.
"""
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
"""Test that sparse matrices are accepted as input"""
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
"""Test that transform works on sparse data. Issue #2124"""
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF()
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
|
apache-2.0
|
evilhero/mylar
|
lib/comictaggerlib/pageloader.py
|
1
|
2202
|
"""A PyQT4 class to load a page image from a ComicArchive in a background thread"""
# Copyright 2012-2014 Anthony Beville
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PyQt4 import QtCore, QtGui, uic
from PyQt4.QtCore import pyqtSignal
from comictaggerlib.ui.qtutils import getQImageFromData
#from comicarchive import ComicArchive
#import utils
class PageLoader(QtCore.QThread):
"""
This class holds onto a reference of each instance in a list since
problems occur if the ref count goes to zero and the GC tries to reap
the object while the thread is going.
If the client class wants to stop the thread, they should mark it as
"abandoned", and no signals will be issued.
"""
loadComplete = pyqtSignal(QtGui.QImage)
instanceList = []
mutex = QtCore.QMutex()
# Remove all finished threads from the list
@staticmethod
def reapInstances():
for obj in reversed(PageLoader.instanceList):
if obj.isFinished():
PageLoader.instanceList.remove(obj)
def __init__(self, ca, page_num):
QtCore.QThread.__init__(self)
self.ca = ca
self.page_num = page_num
self.abandoned = False
# remove any old instances, and then add ourself
PageLoader.mutex.lock()
PageLoader.reapInstances()
PageLoader.instanceList.append(self)
PageLoader.mutex.unlock()
def run(self):
image_data = self.ca.getPage(self.page_num)
if self.abandoned:
return
if image_data is not None:
img = getQImageFromData(image_data)
if self.abandoned:
return
self.loadComplete.emit(img)
|
gpl-3.0
|
leosartaj/autosign
|
tests/helper.py
|
1
|
1238
|
#!/usr/bin/env python2
##
# autosign
# https://github.com/leosartaj/autosign.git
#
# copyright (c) 2014 sartaj singh
# licensed under the mit license.
##
import shutil, os
from autosign import config
"""
Helper functions for performing tests
"""
def newFile(fName):
"""
Touch a new file
"""
with open(fName, 'a'):
pass
def testArea(obj):
obj.testArea = os.path.join(obj.dire, 'testArea')
os.mkdir(obj.testArea)
obj.unsigned1 = os.path.join(obj.dire, 'testArea/test_unsignedfile1.py')
newFile(obj.unsigned1)
obj.signed1 = os.path.join(obj.dire, 'testArea/test_signedfile1.py')
shutil.copy(obj.signfile, obj.signed1)
obj.testArea2 = os.path.join(obj.dire, 'testArea/testArea2')
os.mkdir(obj.testArea2)
obj.unsigned2 = os.path.join(obj.dire, 'testArea/testArea2/test_unsignedfile2.py')
newFile(obj.unsigned2)
obj.signed2 = os.path.join(obj.dire, 'testArea/testArea2/test_signedfile2.py')
shutil.copy(obj.signfile, obj.signed2)
def readrc(obj):
obj.signrc = os.path.join(obj.dire, 'testData/signrc')
parser = config.gen_parser(obj.signrc)
obj.options_py = config.parse_section(parser, 'python')
obj.options_c = config.parse_section(parser, 'c')
|
mit
|
nicoboss/Floatmotion
|
OpenGL/GL/ARB/internalformat_query.py
|
9
|
1526
|
'''OpenGL extension ARB.internalformat_query
This module customises the behaviour of the
OpenGL.raw.GL.ARB.internalformat_query to provide a more
Python-friendly API
Overview (from the spec)
OpenGL 4.1 has a number of queries to indicate the maximum number of
samples available for different formats. These give a coarse-grained
query mechanism e.g. an implementation can expose different sample
counts for integer and floating-point formats, but not for different
floating-point formats. There is also no convenient way for the user
to determine the granularity of sample counts available, only the
maximum.
This extension adds a query mechanism that allows the user to
determine which sample counts are available for a specific internal
format.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/internalformat_query.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.internalformat_query import *
from OpenGL.raw.GL.ARB.internalformat_query import _EXTENSION_NAME
def glInitInternalformatQueryARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glGetInternalformativ=wrapper.wrapper(glGetInternalformativ).setOutput(
'params',size=lambda x:(x,),pnameArg='bufSize',orPassIn=True
)
### END AUTOGENERATED SECTION
|
agpl-3.0
|
justmearc/zero-phone
|
app-list.py
|
1
|
2441
|
#List of all apps and menu to open them
#copyright (c) 2015 Tyler Spadgenske
#GPL License
###############################
#To be packaged with stock TYOS
###############################
import sys, time
import pygame
class Run():
def __init__(self, fona):
#Stuff to follow app protocol
self.exit = False
self.blit_one_surface = {'surface':[], 'rects':[]}
self.blit = {'surfaces':[], 'rects':[]}
self.next_app = None
#Get list of installed apps
self.get_app_order()
#Setup clock
self.start_time = time.time()
#Stuff for intro animation
self.stock_image = pygame.image.load('/home/pi/zero-phone/' + self.app_order[0] + '.png')
self.stock_rect = self.stock_image.get_rect()
self.load_icons()
#More variables
self.open_app = None
def run_app(self):
if self.open_app != None and self.open_app < len(self.app_order):
self.next_app = self.app_order[self.open_app]
self.exit = True
def get_events(self, event):
#Check for touch to open an app
if event.pos[0] < 52 and event.pos[1] > 29 and event.pos[1] < 78:
self.open_app = 0
if event.pos[0] < 104 and event.pos[0] > 52 and event.pos[1] < 78 and event.pos[1] > 29:
self.open_app = 1
if event.pos[0] < 156 and event.pos[0] > 104 and event.pos[1] < 78 and event.pos[1] > 29:
self.open_app = 2
if event.pos[0] < 208 and event.pos[0] > 156 and event.pos[1] < 78 and event.pos[1] > 29:
self.open_app = 3
def on_first_run(self):
self.open_app = None
def load_icons(self):
for i in range(0, len(self.app_order)):
#Add icon and rect
self.blit['surfaces'].append(pygame.image.load('/home/pi/zero-phone/' + self.app_order[i] + '.png'))
self.stock_rect = self.stock_image.get_rect()
self.stock_rect.centery = 52
self.stock_rect.centerx = 26 + 52 * i
self.blit['rects'].append(self.stock_rect)
def get_app_order(self):
#Get the order of the apps to be blitted
order_file = open('/home/pi/zero-phone/order.txt', 'r')
order = order_file.readlines()
#Remove newlines /n
for i in range(0, len(order)):
order[i] = order[i].rstrip()
self.app_order = order[4:]
|
mit
|
sumedhasingla/VTK
|
ThirdParty/Twisted/twisted/internet/test/test_gtkreactor.py
|
28
|
2861
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests to ensure all attributes of L{twisted.internet.gtkreactor} are
deprecated.
"""
import sys
from twisted.trial.unittest import TestCase
class GtkReactorDeprecation(TestCase):
"""
Tests to ensure all attributes of L{twisted.internet.gtkreactor} are
deprecated.
"""
class StubGTK:
class GDK:
INPUT_READ = None
def input_add(self, *params):
pass
class StubPyGTK:
def require(self, something):
pass
def setUp(self):
"""
Create a stub for the module 'gtk' if it does not exist, so that it can
be imported without errors or warnings.
"""
self.mods = sys.modules.copy()
sys.modules['gtk'] = self.StubGTK()
sys.modules['pygtk'] = self.StubPyGTK()
def tearDown(self):
"""
Return sys.modules to the way it was before the test.
"""
sys.modules.clear()
sys.modules.update(self.mods)
def lookForDeprecationWarning(self, testmethod, attributeName):
warningsShown = self.flushWarnings([testmethod])
self.assertEqual(len(warningsShown), 1)
self.assertIs(warningsShown[0]['category'], DeprecationWarning)
self.assertEqual(
warningsShown[0]['message'],
"twisted.internet.gtkreactor." + attributeName + " "
"was deprecated in Twisted 10.1.0: All new applications should be "
"written with gtk 2.x, which is supported by "
"twisted.internet.gtk2reactor.")
def test_gtkReactor(self):
"""
Test deprecation of L{gtkreactor.GtkReactor}
"""
from twisted.internet import gtkreactor
gtkreactor.GtkReactor();
self.lookForDeprecationWarning(self.test_gtkReactor, "GtkReactor")
def test_portableGtkReactor(self):
"""
Test deprecation of L{gtkreactor.GtkReactor}
"""
from twisted.internet import gtkreactor
gtkreactor.PortableGtkReactor()
self.lookForDeprecationWarning(self.test_portableGtkReactor,
"PortableGtkReactor")
def test_install(self):
"""
Test deprecation of L{gtkreactor.install}
"""
from twisted.internet import gtkreactor
self.assertRaises(AssertionError, gtkreactor.install)
self.lookForDeprecationWarning(self.test_install, "install")
def test_portableInstall(self):
"""
Test deprecation of L{gtkreactor.portableInstall}
"""
from twisted.internet import gtkreactor
self.assertRaises(AssertionError, gtkreactor.portableInstall)
self.lookForDeprecationWarning(self.test_portableInstall,
"portableInstall")
|
bsd-3-clause
|
mgit-at/ansible
|
lib/ansible/plugins/lookup/keyring.py
|
59
|
1901
|
# (c) 2016, Samuel Boucher <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: keyring
author:
- Samuel Boucher <[email protected]>
version_added: "2.3"
requirements:
- keyring (python library)
short_description: grab secrets from the OS keyring
description:
- Allows you to access data stored in the OS provided keyring/keychain.
"""
EXAMPLES = """
- name : output secrets to screen (BAD IDEA)
debug:
msg: "Password: {{item}}"
with_keyring:
- 'servicename username'
- name: access mysql with password from keyring
mysql_db: login_password={{lookup('keyring','mysql joe')}} login_user=joe
"""
RETURN = """
_raw:
description: secrets stored
"""
HAS_KEYRING = True
from ansible.errors import AnsibleError
from ansible.utils.display import Display
try:
import keyring
except ImportError:
HAS_KEYRING = False
from ansible.plugins.lookup import LookupBase
display = Display()
class LookupModule(LookupBase):
def run(self, terms, **kwargs):
if not HAS_KEYRING:
raise AnsibleError(u"Can't LOOKUP(keyring): missing required python library 'keyring'")
display.vvvv(u"keyring: %s" % keyring.get_keyring())
ret = []
for term in terms:
(servicename, username) = (term.split()[0], term.split()[1])
display.vvvv(u"username: %s, servicename: %s " % (username, servicename))
password = keyring.get_password(servicename, username)
if password is None:
raise AnsibleError(u"servicename: %s for user %s not found" % (servicename, username))
ret.append(password.rstrip())
return ret
|
gpl-3.0
|
mzdaniel/oh-mainline
|
vendor/packages/twisted/twisted/web/static.py
|
20
|
35712
|
# -*- test-case-name: twisted.web.test.test_static -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Static resources for L{twisted.web}.
"""
import os
import warnings
import urllib
import itertools
import cgi
import time
from zope.interface import implements
from twisted.web import server
from twisted.web import resource
from twisted.web import http
from twisted.web.util import redirectTo
from twisted.python import components, filepath, log
from twisted.internet import abstract, interfaces
from twisted.spread import pb
from twisted.persisted import styles
from twisted.python.util import InsensitiveDict
from twisted.python.runtime import platformType
dangerousPathError = resource.NoResource("Invalid request URL.")
def isDangerous(path):
return path == '..' or '/' in path or os.sep in path
class Data(resource.Resource):
"""
This is a static, in-memory resource.
"""
def __init__(self, data, type):
resource.Resource.__init__(self)
self.data = data
self.type = type
def render_GET(self, request):
request.setHeader("content-type", self.type)
request.setHeader("content-length", str(len(self.data)))
if request.method == "HEAD":
return ''
return self.data
render_HEAD = render_GET
def addSlash(request):
qs = ''
qindex = request.uri.find('?')
if qindex != -1:
qs = request.uri[qindex:]
return "http%s://%s%s/%s" % (
request.isSecure() and 's' or '',
request.getHeader("host"),
(request.uri.split('?')[0]),
qs)
class Redirect(resource.Resource):
def __init__(self, request):
resource.Resource.__init__(self)
self.url = addSlash(request)
def render(self, request):
return redirectTo(self.url, request)
class Registry(components.Componentized, styles.Versioned):
"""
I am a Componentized object that will be made available to internal Twisted
file-based dynamic web content such as .rpy and .epy scripts.
"""
def __init__(self):
components.Componentized.__init__(self)
self._pathCache = {}
persistenceVersion = 1
def upgradeToVersion1(self):
self._pathCache = {}
def cachePath(self, path, rsrc):
self._pathCache[path] = rsrc
def getCachedPath(self, path):
return self._pathCache.get(path)
def loadMimeTypes(mimetype_locations=['/etc/mime.types']):
"""
Multiple file locations containing mime-types can be passed as a list.
The files will be sourced in that order, overriding mime-types from the
files sourced beforehand, but only if a new entry explicitly overrides
the current entry.
"""
import mimetypes
# Grab Python's built-in mimetypes dictionary.
contentTypes = mimetypes.types_map
# Update Python's semi-erroneous dictionary with a few of the
# usual suspects.
contentTypes.update(
{
'.conf': 'text/plain',
'.diff': 'text/plain',
'.exe': 'application/x-executable',
'.flac': 'audio/x-flac',
'.java': 'text/plain',
'.ogg': 'application/ogg',
'.oz': 'text/x-oz',
'.swf': 'application/x-shockwave-flash',
'.tgz': 'application/x-gtar',
'.wml': 'text/vnd.wap.wml',
'.xul': 'application/vnd.mozilla.xul+xml',
'.py': 'text/plain',
'.patch': 'text/plain',
}
)
# Users can override these mime-types by loading them out configuration
# files (this defaults to ['/etc/mime.types']).
for location in mimetype_locations:
if os.path.exists(location):
more = mimetypes.read_mime_types(location)
if more is not None:
contentTypes.update(more)
return contentTypes
def getTypeAndEncoding(filename, types, encodings, defaultType):
p, ext = os.path.splitext(filename)
ext = ext.lower()
if encodings.has_key(ext):
enc = encodings[ext]
ext = os.path.splitext(p)[1].lower()
else:
enc = None
type = types.get(ext, defaultType)
return type, enc
class File(resource.Resource, styles.Versioned, filepath.FilePath):
"""
File is a resource that represents a plain non-interpreted file
(although it can look for an extension like .rpy or .cgi and hand the
file to a processor for interpretation if you wish). Its constructor
takes a file path.
Alternatively, you can give a directory path to the constructor. In this
case the resource will represent that directory, and its children will
be files underneath that directory. This provides access to an entire
filesystem tree with a single Resource.
If you map the URL 'http://server/FILE' to a resource created as
File('/tmp'), then http://server/FILE/ will return an HTML-formatted
listing of the /tmp/ directory, and http://server/FILE/foo/bar.html will
return the contents of /tmp/foo/bar.html .
@cvar childNotFound: L{Resource} used to render 404 Not Found error pages.
"""
contentTypes = loadMimeTypes()
contentEncodings = {
".gz" : "gzip",
".bz2": "bzip2"
}
processors = {}
indexNames = ["index", "index.html", "index.htm", "index.rpy"]
type = None
### Versioning
persistenceVersion = 6
def upgradeToVersion6(self):
self.ignoredExts = []
if self.allowExt:
self.ignoreExt("*")
del self.allowExt
def upgradeToVersion5(self):
if not isinstance(self.registry, Registry):
self.registry = Registry()
def upgradeToVersion4(self):
if not hasattr(self, 'registry'):
self.registry = {}
def upgradeToVersion3(self):
if not hasattr(self, 'allowExt'):
self.allowExt = 0
def upgradeToVersion2(self):
self.defaultType = "text/html"
def upgradeToVersion1(self):
if hasattr(self, 'indexName'):
self.indexNames = [self.indexName]
del self.indexName
def __init__(self, path, defaultType="text/html", ignoredExts=(), registry=None, allowExt=0):
"""
Create a file with the given path.
@param path: The filename of the file from which this L{File} will
serve data.
@type path: C{str}
@param defaultType: A I{major/minor}-style MIME type specifier
indicating the I{Content-Type} with which this L{File}'s data
will be served if a MIME type cannot be determined based on
C{path}'s extension.
@type defaultType: C{str}
@param ignoredExts: A sequence giving the extensions of paths in the
filesystem which will be ignored for the purposes of child
lookup. For example, if C{ignoredExts} is C{(".bar",)} and
C{path} is a directory containing a file named C{"foo.bar"}, a
request for the C{"foo"} child of this resource will succeed
with a L{File} pointing to C{"foo.bar"}.
@param registry: The registry object being used to handle this
request. If C{None}, one will be created.
@type registry: L{Registry}
@param allowExt: Ignored parameter, only present for backwards
compatibility. Do not pass a value for this parameter.
"""
resource.Resource.__init__(self)
filepath.FilePath.__init__(self, path)
self.defaultType = defaultType
if ignoredExts in (0, 1) or allowExt:
warnings.warn("ignoredExts should receive a list, not a boolean")
if ignoredExts or allowExt:
self.ignoredExts = ['*']
else:
self.ignoredExts = []
else:
self.ignoredExts = list(ignoredExts)
self.registry = registry or Registry()
def ignoreExt(self, ext):
"""Ignore the given extension.
Serve file.ext if file is requested
"""
self.ignoredExts.append(ext)
childNotFound = resource.NoResource("File not found.")
def directoryListing(self):
return DirectoryLister(self.path,
self.listNames(),
self.contentTypes,
self.contentEncodings,
self.defaultType)
def getChild(self, path, request):
"""
If this L{File}'s path refers to a directory, return a L{File}
referring to the file named C{path} in that directory.
If C{path} is the empty string, return a L{DirectoryLister} instead.
"""
self.restat(reraise=False)
if not self.isdir():
return self.childNotFound
if path:
try:
fpath = self.child(path)
except filepath.InsecurePath:
return self.childNotFound
else:
fpath = self.childSearchPreauth(*self.indexNames)
if fpath is None:
return self.directoryListing()
if not fpath.exists():
fpath = fpath.siblingExtensionSearch(*self.ignoredExts)
if fpath is None:
return self.childNotFound
if platformType == "win32":
# don't want .RPY to be different than .rpy, since that would allow
# source disclosure.
processor = InsensitiveDict(self.processors).get(fpath.splitext()[1])
else:
processor = self.processors.get(fpath.splitext()[1])
if processor:
return resource.IResource(processor(fpath.path, self.registry))
return self.createSimilarFile(fpath.path)
# methods to allow subclasses to e.g. decrypt files on the fly:
def openForReading(self):
"""Open a file and return it."""
return self.open()
def getFileSize(self):
"""Return file size."""
return self.getsize()
def _parseRangeHeader(self, range):
"""
Parse the value of a Range header into (start, stop) pairs.
In a given pair, either of start or stop can be None, signifying that
no value was provided, but not both.
@return: A list C{[(start, stop)]} of pairs of length at least one.
@raise ValueError: if the header is syntactically invalid or if the
Bytes-Unit is anything other than 'bytes'.
"""
try:
kind, value = range.split('=', 1)
except ValueError:
raise ValueError("Missing '=' separator")
kind = kind.strip()
if kind != 'bytes':
raise ValueError("Unsupported Bytes-Unit: %r" % (kind,))
unparsedRanges = filter(None, map(str.strip, value.split(',')))
parsedRanges = []
for byteRange in unparsedRanges:
try:
start, end = byteRange.split('-', 1)
except ValueError:
raise ValueError("Invalid Byte-Range: %r" % (byteRange,))
if start:
try:
start = int(start)
except ValueError:
raise ValueError("Invalid Byte-Range: %r" % (byteRange,))
else:
start = None
if end:
try:
end = int(end)
except ValueError:
raise ValueError("Invalid Byte-Range: %r" % (byteRange,))
else:
end = None
if start is not None:
if end is not None and start > end:
# Start must be less than or equal to end or it is invalid.
raise ValueError("Invalid Byte-Range: %r" % (byteRange,))
elif end is None:
# One or both of start and end must be specified. Omitting
# both is invalid.
raise ValueError("Invalid Byte-Range: %r" % (byteRange,))
parsedRanges.append((start, end))
return parsedRanges
def _rangeToOffsetAndSize(self, start, end):
"""
Convert a start and end from a Range header to an offset and size.
This method checks that the resulting range overlaps with the resource
being served (and so has the value of C{getFileSize()} as an indirect
input).
Either but not both of start or end can be C{None}:
- Omitted start means that the end value is actually a start value
relative to the end of the resource.
- Omitted end means the end of the resource should be the end of
the range.
End is interpreted as inclusive, as per RFC 2616.
If this range doesn't overlap with any of this resource, C{(0, 0)} is
returned, which is not otherwise a value return value.
@param start: The start value from the header, or C{None} if one was
not present.
@param end: The end value from the header, or C{None} if one was not
present.
@return: C{(offset, size)} where offset is how far into this resource
this resource the range begins and size is how long the range is,
or C{(0, 0)} if the range does not overlap this resource.
"""
size = self.getFileSize()
if start is None:
start = size - end
end = size
elif end is None:
end = size
elif end < size:
end += 1
elif end > size:
end = size
if start >= size:
start = end = 0
return start, (end - start)
def _contentRange(self, offset, size):
"""
Return a string suitable for the value of a Content-Range header for a
range with the given offset and size.
The offset and size are not sanity checked in any way.
@param offset: How far into this resource the range begins.
@param size: How long the range is.
@return: The value as appropriate for the value of a Content-Range
header.
"""
return 'bytes %d-%d/%d' % (
offset, offset + size - 1, self.getFileSize())
def _doSingleRangeRequest(self, request, (start, end)):
"""
Set up the response for Range headers that specify a single range.
This method checks if the request is satisfiable and sets the response
code and Content-Range header appropriately. The return value
indicates which part of the resource to return.
@param request: The Request object.
@param start: The start of the byte range as specified by the header.
@param end: The end of the byte range as specified by the header. At
most one of C{start} and C{end} may be C{None}.
@return: A 2-tuple of the offset and size of the range to return.
offset == size == 0 indicates that the request is not satisfiable.
"""
offset, size = self._rangeToOffsetAndSize(start, end)
if offset == size == 0:
# This range doesn't overlap with any of this resource, so the
# request is unsatisfiable.
request.setResponseCode(http.REQUESTED_RANGE_NOT_SATISFIABLE)
request.setHeader(
'content-range', 'bytes */%d' % (self.getFileSize(),))
else:
request.setResponseCode(http.PARTIAL_CONTENT)
request.setHeader(
'content-range', self._contentRange(offset, size))
return offset, size
def _doMultipleRangeRequest(self, request, byteRanges):
"""
Set up the response for Range headers that specify a single range.
This method checks if the request is satisfiable and sets the response
code and Content-Type and Content-Length headers appropriately. The
return value, which is a little complicated, indicates which parts of
the resource to return and the boundaries that should separate the
parts.
In detail, the return value is a tuple rangeInfo C{rangeInfo} is a
list of 3-tuples C{(partSeparator, partOffset, partSize)}. The
response to this request should be, for each element of C{rangeInfo},
C{partSeparator} followed by C{partSize} bytes of the resource
starting at C{partOffset}. Each C{partSeparator} includes the
MIME-style boundary and the part-specific Content-type and
Content-range headers. It is convenient to return the separator as a
concrete string from this method, becasue this method needs to compute
the number of bytes that will make up the response to be able to set
the Content-Length header of the response accurately.
@param request: The Request object.
@param byteRanges: A list of C{(start, end)} values as specified by
the header. For each range, at most one of C{start} and C{end}
may be C{None}.
@return: See above.
"""
matchingRangeFound = False
rangeInfo = []
contentLength = 0
boundary = "%x%x" % (int(time.time()*1000000), os.getpid())
if self.type:
contentType = self.type
else:
contentType = 'bytes' # It's what Apache does...
for start, end in byteRanges:
partOffset, partSize = self._rangeToOffsetAndSize(start, end)
if partOffset == partSize == 0:
continue
contentLength += partSize
matchingRangeFound = True
partContentRange = self._contentRange(partOffset, partSize)
partSeparator = (
"\r\n"
"--%s\r\n"
"Content-type: %s\r\n"
"Content-range: %s\r\n"
"\r\n") % (boundary, contentType, partContentRange)
contentLength += len(partSeparator)
rangeInfo.append((partSeparator, partOffset, partSize))
if not matchingRangeFound:
request.setResponseCode(http.REQUESTED_RANGE_NOT_SATISFIABLE)
request.setHeader(
'content-length', '0')
request.setHeader(
'content-range', 'bytes */%d' % (self.getFileSize(),))
return [], ''
finalBoundary = "\r\n--" + boundary + "--\r\n"
rangeInfo.append((finalBoundary, 0, 0))
request.setResponseCode(http.PARTIAL_CONTENT)
request.setHeader(
'content-type', 'multipart/byteranges; boundary="%s"' % (boundary,))
request.setHeader(
'content-length', contentLength + len(finalBoundary))
return rangeInfo
def _setContentHeaders(self, request, size=None):
"""
Set the Content-length and Content-type headers for this request.
This method is not appropriate for requests for multiple byte ranges;
L{_doMultipleRangeRequest} will set these headers in that case.
@param request: The L{Request} object.
@param size: The size of the response. If not specified, default to
C{self.getFileSize()}.
"""
if size is None:
size = self.getFileSize()
request.setHeader('content-length', str(size))
if self.type:
request.setHeader('content-type', self.type)
if self.encoding:
request.setHeader('content-encoding', self.encoding)
def makeProducer(self, request, fileForReading):
"""
Make a L{StaticProducer} that will produce the body of this response.
This method will also set the response code and Content-* headers.
@param request: The L{Request} object.
@param fileForReading: The file object containing the resource.
@return: A L{StaticProducer}. Calling C{.start()} on this will begin
producing the response.
"""
byteRange = request.getHeader('range')
if byteRange is None:
self._setContentHeaders(request)
request.setResponseCode(http.OK)
return NoRangeStaticProducer(request, fileForReading)
try:
parsedRanges = self._parseRangeHeader(byteRange)
except ValueError:
log.msg("Ignoring malformed Range header %r" % (byteRange,))
self._setContentHeaders(request)
request.setResponseCode(http.OK)
return NoRangeStaticProducer(request, fileForReading)
if len(parsedRanges) == 1:
offset, size = self._doSingleRangeRequest(
request, parsedRanges[0])
self._setContentHeaders(request, size)
return SingleRangeStaticProducer(
request, fileForReading, offset, size)
else:
rangeInfo = self._doMultipleRangeRequest(request, parsedRanges)
return MultipleRangeStaticProducer(
request, fileForReading, rangeInfo)
def render_GET(self, request):
"""
Begin sending the contents of this L{File} (or a subset of the
contents, based on the 'range' header) to the given request.
"""
self.restat(False)
if self.type is None:
self.type, self.encoding = getTypeAndEncoding(self.basename(),
self.contentTypes,
self.contentEncodings,
self.defaultType)
if not self.exists():
return self.childNotFound.render(request)
if self.isdir():
return self.redirect(request)
request.setHeader('accept-ranges', 'bytes')
try:
fileForReading = self.openForReading()
except IOError, e:
import errno
if e[0] == errno.EACCES:
return resource.ForbiddenResource().render(request)
else:
raise
if request.setLastModified(self.getmtime()) is http.CACHED:
return ''
producer = self.makeProducer(request, fileForReading)
if request.method == 'HEAD':
return ''
producer.start()
# and make sure the connection doesn't get closed
return server.NOT_DONE_YET
render_HEAD = render_GET
def redirect(self, request):
return redirectTo(addSlash(request), request)
def listNames(self):
if not self.isdir():
return []
directory = self.listdir()
directory.sort()
return directory
def listEntities(self):
return map(lambda fileName, self=self: self.createSimilarFile(os.path.join(self.path, fileName)), self.listNames())
def createSimilarFile(self, path):
f = self.__class__(path, self.defaultType, self.ignoredExts, self.registry)
# refactoring by steps, here - constructor should almost certainly take these
f.processors = self.processors
f.indexNames = self.indexNames[:]
f.childNotFound = self.childNotFound
return f
class StaticProducer(object):
"""
Superclass for classes that implement the business of producing.
@ivar request: The L{IRequest} to write the contents of the file to.
@ivar fileObject: The file the contents of which to write to the request.
"""
implements(interfaces.IPullProducer)
bufferSize = abstract.FileDescriptor.bufferSize
def __init__(self, request, fileObject):
"""
Initialize the instance.
"""
self.request = request
self.fileObject = fileObject
def start(self):
raise NotImplementedError(self.start)
def resumeProducing(self):
raise NotImplementedError(self.resumeProducing)
def stopProducing(self):
"""
Stop producing data.
L{IPullProducer.stopProducing} is called when our consumer has died,
and subclasses also call this method when they are done producing
data.
"""
self.fileObject.close()
self.request = None
class NoRangeStaticProducer(StaticProducer):
"""
A L{StaticProducer} that writes the entire file to the request.
"""
def start(self):
self.request.registerProducer(self, False)
def resumeProducing(self):
if not self.request:
return
data = self.fileObject.read(self.bufferSize)
if data:
# this .write will spin the reactor, calling .doWrite and then
# .resumeProducing again, so be prepared for a re-entrant call
self.request.write(data)
else:
self.request.unregisterProducer()
self.request.finish()
self.stopProducing()
class SingleRangeStaticProducer(StaticProducer):
"""
A L{StaticProducer} that writes a single chunk of a file to the request.
"""
def __init__(self, request, fileObject, offset, size):
"""
Initialize the instance.
@param request: See L{StaticProducer}.
@param fileObject: See L{StaticProducer}.
@param offset: The offset into the file of the chunk to be written.
@param size: The size of the chunk to write.
"""
StaticProducer.__init__(self, request, fileObject)
self.offset = offset
self.size = size
def start(self):
self.fileObject.seek(self.offset)
self.bytesWritten = 0
self.request.registerProducer(self, 0)
def resumeProducing(self):
if not self.request:
return
data = self.fileObject.read(
min(self.bufferSize, self.size - self.bytesWritten))
if data:
self.bytesWritten += len(data)
# this .write will spin the reactor, calling .doWrite and then
# .resumeProducing again, so be prepared for a re-entrant call
self.request.write(data)
if self.request and self.bytesWritten == self.size:
self.request.unregisterProducer()
self.request.finish()
self.stopProducing()
class MultipleRangeStaticProducer(StaticProducer):
"""
A L{StaticProducer} that writes several chunks of a file to the request.
"""
def __init__(self, request, fileObject, rangeInfo):
"""
Initialize the instance.
@param request: See L{StaticProducer}.
@param fileObject: See L{StaticProducer}.
@param rangeInfo: A list of tuples C{[(boundary, offset, size)]}
where:
- C{boundary} will be written to the request first.
- C{offset} the offset into the file of chunk to write.
- C{size} the size of the chunk to write.
"""
StaticProducer.__init__(self, request, fileObject)
self.rangeInfo = rangeInfo
def start(self):
self.rangeIter = iter(self.rangeInfo)
self._nextRange()
self.request.registerProducer(self, 0)
def _nextRange(self):
self.partBoundary, partOffset, self._partSize = self.rangeIter.next()
self._partBytesWritten = 0
self.fileObject.seek(partOffset)
def resumeProducing(self):
if not self.request:
return
data = []
dataLength = 0
done = False
while dataLength < self.bufferSize:
if self.partBoundary:
dataLength += len(self.partBoundary)
data.append(self.partBoundary)
self.partBoundary = None
p = self.fileObject.read(
min(self.bufferSize - dataLength,
self._partSize - self._partBytesWritten))
self._partBytesWritten += len(p)
dataLength += len(p)
data.append(p)
if self.request and self._partBytesWritten == self._partSize:
try:
self._nextRange()
except StopIteration:
done = True
break
self.request.write(''.join(data))
if done:
self.request.unregisterProducer()
self.request.finish()
self.request = None
class FileTransfer(pb.Viewable):
"""
A class to represent the transfer of a file over the network.
"""
request = None
def __init__(self, file, size, request):
warnings.warn(
"FileTransfer is deprecated since Twisted 9.0. "
"Use a subclass of StaticProducer instead.",
DeprecationWarning, stacklevel=2)
self.file = file
self.size = size
self.request = request
self.written = self.file.tell()
request.registerProducer(self, 0)
def resumeProducing(self):
if not self.request:
return
data = self.file.read(min(abstract.FileDescriptor.bufferSize, self.size - self.written))
if data:
self.written += len(data)
# this .write will spin the reactor, calling .doWrite and then
# .resumeProducing again, so be prepared for a re-entrant call
self.request.write(data)
if self.request and self.file.tell() == self.size:
self.request.unregisterProducer()
self.request.finish()
self.request = None
def pauseProducing(self):
pass
def stopProducing(self):
self.file.close()
self.request = None
# Remotely relay producer interface.
def view_resumeProducing(self, issuer):
self.resumeProducing()
def view_pauseProducing(self, issuer):
self.pauseProducing()
def view_stopProducing(self, issuer):
self.stopProducing()
class ASISProcessor(resource.Resource):
"""
Serve files exactly as responses without generating a status-line or any
headers. Inspired by Apache's mod_asis.
"""
def __init__(self, path, registry=None):
resource.Resource.__init__(self)
self.path = path
self.registry = registry or Registry()
def render(self, request):
request.startedWriting = 1
res = File(self.path, registry=self.registry)
return res.render(request)
def formatFileSize(size):
"""
Format the given file size in bytes to human readable format.
"""
if size < 1024:
return '%iB' % size
elif size < (1024 ** 2):
return '%iK' % (size / 1024)
elif size < (1024 ** 3):
return '%iM' % (size / (1024 ** 2))
else:
return '%iG' % (size / (1024 ** 3))
class DirectoryLister(resource.Resource):
"""
Print the content of a directory.
@ivar template: page template used to render the content of the directory.
It must contain the format keys B{header} and B{tableContent}.
@type template: C{str}
@ivar linePattern: template used to render one line in the listing table.
It must contain the format keys B{class}, B{href}, B{text}, B{size},
B{type} and B{encoding}.
@type linePattern: C{str}
@ivar contentEncodings: a mapping of extensions to encoding types.
@type contentEncodings: C{dict}
@ivar defaultType: default type used when no mimetype is detected.
@type defaultType: C{str}
@ivar dirs: filtered content of C{path}, if the whole content should not be
displayed (default to C{None}, which means the actual content of
C{path} is printed).
@type dirs: C{NoneType} or C{list}
@ivar path: directory which content should be listed.
@type path: C{str}
"""
template = """<html>
<head>
<title>%(header)s</title>
<style>
.even-dir { background-color: #efe0ef }
.even { background-color: #eee }
.odd-dir {background-color: #f0d0ef }
.odd { background-color: #dedede }
.icon { text-align: center }
.listing {
margin-left: auto;
margin-right: auto;
width: 50%%;
padding: 0.1em;
}
body { border: 0; padding: 0; margin: 0; background-color: #efefef; }
h1 {padding: 0.1em; background-color: #777; color: white; border-bottom: thin white dashed;}
</style>
</head>
<body>
<h1>%(header)s</h1>
<table>
<thead>
<tr>
<th>Filename</th>
<th>Size</th>
<th>Content type</th>
<th>Content encoding</th>
</tr>
</thead>
<tbody>
%(tableContent)s
</tbody>
</table>
</body>
</html>
"""
linePattern = """<tr class="%(class)s">
<td><a href="%(href)s">%(text)s</a></td>
<td>%(size)s</td>
<td>%(type)s</td>
<td>%(encoding)s</td>
</tr>
"""
def __init__(self, pathname, dirs=None,
contentTypes=File.contentTypes,
contentEncodings=File.contentEncodings,
defaultType='text/html'):
resource.Resource.__init__(self)
self.contentTypes = contentTypes
self.contentEncodings = contentEncodings
self.defaultType = defaultType
# dirs allows usage of the File to specify what gets listed
self.dirs = dirs
self.path = pathname
def _getFilesAndDirectories(self, directory):
"""
Helper returning files and directories in given directory listing, with
attributes to be used to build a table content with
C{self.linePattern}.
@return: tuple of (directories, files)
@rtype: C{tuple} of C{list}
"""
files = []
dirs = []
for path in directory:
url = urllib.quote(path, "/")
escapedPath = cgi.escape(path)
if os.path.isdir(os.path.join(self.path, path)):
url = url + '/'
dirs.append({'text': escapedPath + "/", 'href': url,
'size': '', 'type': '[Directory]',
'encoding': ''})
else:
mimetype, encoding = getTypeAndEncoding(path, self.contentTypes,
self.contentEncodings,
self.defaultType)
try:
size = os.stat(os.path.join(self.path, path)).st_size
except OSError:
continue
files.append({
'text': escapedPath, "href": url,
'type': '[%s]' % mimetype,
'encoding': (encoding and '[%s]' % encoding or ''),
'size': formatFileSize(size)})
return dirs, files
def _buildTableContent(self, elements):
"""
Build a table content using C{self.linePattern} and giving elements odd
and even classes.
"""
tableContent = []
rowClasses = itertools.cycle(['odd', 'even'])
for element, rowClass in zip(elements, rowClasses):
element["class"] = rowClass
tableContent.append(self.linePattern % element)
return tableContent
def render(self, request):
"""
Render a listing of the content of C{self.path}.
"""
request.setHeader("content-type", "text/html; charset=utf-8")
if self.dirs is None:
directory = os.listdir(self.path)
directory.sort()
else:
directory = self.dirs
dirs, files = self._getFilesAndDirectories(directory)
tableContent = "".join(self._buildTableContent(dirs + files))
header = "Directory listing for %s" % (
cgi.escape(urllib.unquote(request.uri)),)
return self.template % {"header": header, "tableContent": tableContent}
def __repr__(self):
return '<DirectoryLister of %r>' % self.path
__str__ = __repr__
|
agpl-3.0
|
by46/tunicorn
|
tunicorn/packages/werkzeug/serving.py
|
73
|
27736
|
# -*- coding: utf-8 -*-
"""
werkzeug.serving
~~~~~~~~~~~~~~~~
There are many ways to serve a WSGI application. While you're developing
it you usually don't want a full blown webserver like Apache but a simple
standalone one. From Python 2.5 onwards there is the `wsgiref`_ server in
the standard library. If you're using older versions of Python you can
download the package from the cheeseshop.
However there are some caveats. Sourcecode won't reload itself when
changed and each time you kill the server using ``^C`` you get an
`KeyboardInterrupt` error. While the latter is easy to solve the first
one can be a pain in the ass in some situations.
The easiest way is creating a small ``start-myproject.py`` that runs the
application::
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from myproject import make_app
from werkzeug.serving import run_simple
app = make_app(...)
run_simple('localhost', 8080, app, use_reloader=True)
You can also pass it a `extra_files` keyword argument with a list of
additional files (like configuration files) you want to observe.
For bigger applications you should consider using `werkzeug.script`
instead of a simple start file.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import os
import socket
import sys
import signal
try:
import ssl
except ImportError:
class _SslDummy(object):
def __getattr__(self, name):
raise RuntimeError('SSL support unavailable')
ssl = _SslDummy()
def _get_openssl_crypto_module():
try:
from OpenSSL import crypto
except ImportError:
raise TypeError('Using ad-hoc certificates requires the pyOpenSSL '
'library.')
else:
return crypto
try:
from SocketServer import ThreadingMixIn, ForkingMixIn
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
except ImportError:
from socketserver import ThreadingMixIn, ForkingMixIn
from http.server import HTTPServer, BaseHTTPRequestHandler
# important: do not use relative imports here or python -m will break
import werkzeug
from werkzeug._internal import _log
from werkzeug._compat import PY2, reraise, wsgi_encoding_dance
from werkzeug.urls import url_parse, url_unquote
from werkzeug.exceptions import InternalServerError
LISTEN_QUEUE = 128
can_open_by_fd = hasattr(socket, 'fromfd')
class WSGIRequestHandler(BaseHTTPRequestHandler, object):
"""A request handler that implements WSGI dispatching."""
@property
def server_version(self):
return 'Werkzeug/' + werkzeug.__version__
def make_environ(self):
request_url = url_parse(self.path)
def shutdown_server():
self.server.shutdown_signal = True
url_scheme = self.server.ssl_context is None and 'http' or 'https'
path_info = url_unquote(request_url.path)
environ = {
'wsgi.version': (1, 0),
'wsgi.url_scheme': url_scheme,
'wsgi.input': self.rfile,
'wsgi.errors': sys.stderr,
'wsgi.multithread': self.server.multithread,
'wsgi.multiprocess': self.server.multiprocess,
'wsgi.run_once': False,
'werkzeug.server.shutdown': shutdown_server,
'SERVER_SOFTWARE': self.server_version,
'REQUEST_METHOD': self.command,
'SCRIPT_NAME': '',
'PATH_INFO': wsgi_encoding_dance(path_info),
'QUERY_STRING': wsgi_encoding_dance(request_url.query),
'CONTENT_TYPE': self.headers.get('Content-Type', ''),
'CONTENT_LENGTH': self.headers.get('Content-Length', ''),
'REMOTE_ADDR': self.address_string(),
'REMOTE_PORT': self.port_integer(),
'SERVER_NAME': self.server.server_address[0],
'SERVER_PORT': str(self.server.server_address[1]),
'SERVER_PROTOCOL': self.request_version
}
for key, value in self.headers.items():
key = 'HTTP_' + key.upper().replace('-', '_')
if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
environ[key] = value
if request_url.scheme and request_url.netloc:
environ['HTTP_HOST'] = request_url.netloc
return environ
def run_wsgi(self):
if self.headers.get('Expect', '').lower().strip() == '100-continue':
self.wfile.write(b'HTTP/1.1 100 Continue\r\n\r\n')
self.environ = environ = self.make_environ()
headers_set = []
headers_sent = []
def write(data):
assert headers_set, 'write() before start_response'
if not headers_sent:
status, response_headers = headers_sent[:] = headers_set
try:
code, msg = status.split(None, 1)
except ValueError:
code, msg = status, ""
self.send_response(int(code), msg)
header_keys = set()
for key, value in response_headers:
self.send_header(key, value)
key = key.lower()
header_keys.add(key)
if 'content-length' not in header_keys:
self.close_connection = True
self.send_header('Connection', 'close')
if 'server' not in header_keys:
self.send_header('Server', self.version_string())
if 'date' not in header_keys:
self.send_header('Date', self.date_time_string())
self.end_headers()
assert isinstance(data, bytes), 'applications must write bytes'
self.wfile.write(data)
self.wfile.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
reraise(*exc_info)
finally:
exc_info = None
elif headers_set:
raise AssertionError('Headers already set')
headers_set[:] = [status, response_headers]
return write
def execute(app):
application_iter = app(environ, start_response)
try:
for data in application_iter:
write(data)
if not headers_sent:
write(b'')
finally:
if hasattr(application_iter, 'close'):
application_iter.close()
application_iter = None
try:
execute(self.server.app)
except (socket.error, socket.timeout) as e:
self.connection_dropped(e, environ)
except Exception:
if self.server.passthrough_errors:
raise
from werkzeug.debug.tbtools import get_current_traceback
traceback = get_current_traceback(ignore_system_exceptions=True)
try:
# if we haven't yet sent the headers but they are set
# we roll back to be able to set them again.
if not headers_sent:
del headers_set[:]
execute(InternalServerError())
except Exception:
pass
self.server.log('error', 'Error on request:\n%s',
traceback.plaintext)
def handle(self):
"""Handles a request ignoring dropped connections."""
rv = None
try:
rv = BaseHTTPRequestHandler.handle(self)
except (socket.error, socket.timeout) as e:
self.connection_dropped(e)
except Exception:
if self.server.ssl_context is None or not is_ssl_error():
raise
if self.server.shutdown_signal:
self.initiate_shutdown()
return rv
def initiate_shutdown(self):
"""A horrible, horrible way to kill the server for Python 2.6 and
later. It's the best we can do.
"""
# Windows does not provide SIGKILL, go with SIGTERM then.
sig = getattr(signal, 'SIGKILL', signal.SIGTERM)
# reloader active
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
os.kill(os.getpid(), sig)
# python 2.7
self.server._BaseServer__shutdown_request = True
# python 2.6
self.server._BaseServer__serving = False
def connection_dropped(self, error, environ=None):
"""Called if the connection was closed by the client. By default
nothing happens.
"""
def handle_one_request(self):
"""Handle a single HTTP request."""
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
elif self.parse_request():
return self.run_wsgi()
def send_response(self, code, message=None):
"""Send the response header and log the response code."""
self.log_request(code)
if message is None:
message = code in self.responses and self.responses[code][0] or ''
if self.request_version != 'HTTP/0.9':
hdr = "%s %d %s\r\n" % (self.protocol_version, code, message)
self.wfile.write(hdr.encode('ascii'))
def version_string(self):
return BaseHTTPRequestHandler.version_string(self).strip()
def address_string(self):
return self.client_address[0]
def port_integer(self):
return self.client_address[1]
def log_request(self, code='-', size='-'):
self.log('info', '"%s" %s %s', self.requestline, code, size)
def log_error(self, *args):
self.log('error', *args)
def log_message(self, format, *args):
self.log('info', format, *args)
def log(self, type, message, *args):
_log(type, '%s - - [%s] %s\n' % (self.address_string(),
self.log_date_time_string(),
message % args))
#: backwards compatible name if someone is subclassing it
BaseRequestHandler = WSGIRequestHandler
def generate_adhoc_ssl_pair(cn=None):
from random import random
crypto = _get_openssl_crypto_module()
# pretty damn sure that this is not actually accepted by anyone
if cn is None:
cn = '*'
cert = crypto.X509()
cert.set_serial_number(int(random() * sys.maxsize))
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(60 * 60 * 24 * 365)
subject = cert.get_subject()
subject.CN = cn
subject.O = 'Dummy Certificate'
issuer = cert.get_issuer()
issuer.CN = 'Untrusted Authority'
issuer.O = 'Self-Signed'
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 1024)
cert.set_pubkey(pkey)
cert.sign(pkey, 'md5')
return cert, pkey
def make_ssl_devcert(base_path, host=None, cn=None):
"""Creates an SSL key for development. This should be used instead of
the ``'adhoc'`` key which generates a new cert on each server start.
It accepts a path for where it should store the key and cert and
either a host or CN. If a host is given it will use the CN
``*.host/CN=host``.
For more information see :func:`run_simple`.
.. versionadded:: 0.9
:param base_path: the path to the certificate and key. The extension
``.crt`` is added for the certificate, ``.key`` is
added for the key.
:param host: the name of the host. This can be used as an alternative
for the `cn`.
:param cn: the `CN` to use.
"""
from OpenSSL import crypto
if host is not None:
cn = '*.%s/CN=%s' % (host, host)
cert, pkey = generate_adhoc_ssl_pair(cn=cn)
cert_file = base_path + '.crt'
pkey_file = base_path + '.key'
with open(cert_file, 'wb') as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
with open(pkey_file, 'wb') as f:
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
return cert_file, pkey_file
def generate_adhoc_ssl_context():
"""Generates an adhoc SSL context for the development server."""
crypto = _get_openssl_crypto_module()
import tempfile
import atexit
cert, pkey = generate_adhoc_ssl_pair()
cert_handle, cert_file = tempfile.mkstemp()
pkey_handle, pkey_file = tempfile.mkstemp()
atexit.register(os.remove, pkey_file)
atexit.register(os.remove, cert_file)
os.write(cert_handle, crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
os.write(pkey_handle, crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
os.close(cert_handle)
os.close(pkey_handle)
ctx = load_ssl_context(cert_file, pkey_file)
return ctx
def load_ssl_context(cert_file, pkey_file=None, protocol=None):
"""Loads SSL context from cert/private key files and optional protocol.
Many parameters are directly taken from the API of
:py:class:`ssl.SSLContext`.
:param cert_file: Path of the certificate to use.
:param pkey_file: Path of the private key to use. If not given, the key
will be obtained from the certificate file.
:param protocol: One of the ``PROTOCOL_*`` constants in the stdlib ``ssl``
module. Defaults to ``PROTOCOL_SSLv23``.
"""
if protocol is None:
protocol = ssl.PROTOCOL_SSLv23
ctx = _SSLContext(protocol)
ctx.load_cert_chain(cert_file, pkey_file)
return ctx
class _SSLContext(object):
'''A dummy class with a small subset of Python3's ``ssl.SSLContext``, only
intended to be used with and by Werkzeug.'''
def __init__(self, protocol):
self._protocol = protocol
self._certfile = None
self._keyfile = None
self._password = None
def load_cert_chain(self, certfile, keyfile=None, password=None):
self._certfile = certfile
self._keyfile = keyfile or certfile
self._password = password
def wrap_socket(self, sock, **kwargs):
return ssl.wrap_socket(sock, keyfile=self._keyfile,
certfile=self._certfile,
ssl_version=self._protocol, **kwargs)
def is_ssl_error(error=None):
"""Checks if the given error (or the current one) is an SSL error."""
exc_types = (ssl.SSLError,)
try:
from OpenSSL.SSL import Error
exc_types += (Error,)
except ImportError:
pass
if error is None:
error = sys.exc_info()[1]
return isinstance(error, exc_types)
def select_ip_version(host, port):
"""Returns AF_INET4 or AF_INET6 depending on where to connect to."""
# disabled due to problems with current ipv6 implementations
# and various operating systems. Probably this code also is
# not supposed to work, but I can't come up with any other
# ways to implement this.
# try:
# info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
# socket.SOCK_STREAM, 0,
# socket.AI_PASSIVE)
# if info:
# return info[0][0]
# except socket.gaierror:
# pass
if ':' in host and hasattr(socket, 'AF_INET6'):
return socket.AF_INET6
return socket.AF_INET
class BaseWSGIServer(HTTPServer, object):
"""Simple single-threaded, single-process WSGI server."""
multithread = False
multiprocess = False
request_queue_size = LISTEN_QUEUE
def __init__(self, host, port, app, handler=None,
passthrough_errors=False, ssl_context=None, fd=None):
if handler is None:
handler = WSGIRequestHandler
self.address_family = select_ip_version(host, port)
if fd is not None:
real_sock = socket.fromfd(fd, self.address_family,
socket.SOCK_STREAM)
port = 0
HTTPServer.__init__(self, (host, int(port)), handler)
self.app = app
self.passthrough_errors = passthrough_errors
self.shutdown_signal = False
self.host = host
self.port = port
# Patch in the original socket.
if fd is not None:
self.socket.close()
self.socket = real_sock
self.server_address = self.socket.getsockname()
if ssl_context is not None:
if isinstance(ssl_context, tuple):
ssl_context = load_ssl_context(*ssl_context)
if ssl_context == 'adhoc':
ssl_context = generate_adhoc_ssl_context()
# If we are on Python 2 the return value from socket.fromfd
# is an internal socket object but what we need for ssl wrap
# is the wrapper around it :(
sock = self.socket
if PY2 and not isinstance(sock, socket.socket):
sock = socket.socket(sock.family, sock.type, sock.proto, sock)
self.socket = ssl_context.wrap_socket(sock, server_side=True)
self.ssl_context = ssl_context
else:
self.ssl_context = None
def log(self, type, message, *args):
_log(type, message, *args)
def serve_forever(self):
self.shutdown_signal = False
try:
HTTPServer.serve_forever(self)
except KeyboardInterrupt:
pass
finally:
self.server_close()
def handle_error(self, request, client_address):
if self.passthrough_errors:
raise
return HTTPServer.handle_error(self, request, client_address)
def get_request(self):
con, info = self.socket.accept()
return con, info
class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer):
"""A WSGI server that does threading."""
multithread = True
class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer):
"""A WSGI server that does forking."""
multiprocess = True
def __init__(self, host, port, app, processes=40, handler=None,
passthrough_errors=False, ssl_context=None, fd=None):
BaseWSGIServer.__init__(self, host, port, app, handler,
passthrough_errors, ssl_context, fd)
self.max_children = processes
def make_server(host=None, port=None, app=None, threaded=False, processes=1,
request_handler=None, passthrough_errors=False,
ssl_context=None, fd=None):
"""Create a new server instance that is either threaded, or forks
or just processes one request after another.
"""
if threaded and processes > 1:
raise ValueError("cannot have a multithreaded and "
"multi process server.")
elif threaded:
return ThreadedWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context, fd=fd)
elif processes > 1:
return ForkingWSGIServer(host, port, app, processes, request_handler,
passthrough_errors, ssl_context, fd=fd)
else:
return BaseWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context, fd=fd)
def is_running_from_reloader():
"""Checks if the application is running from within the Werkzeug
reloader subprocess.
.. versionadded:: 0.10
"""
return os.environ.get('WERKZEUG_RUN_MAIN') == 'true'
def run_simple(hostname, port, application, use_reloader=False,
use_debugger=False, use_evalex=True,
extra_files=None, reloader_interval=1,
reloader_type='auto', threaded=False,
processes=1, request_handler=None, static_files=None,
passthrough_errors=False, ssl_context=None):
"""Start a WSGI application. Optional features include a reloader,
multithreading and fork support.
This function has a command-line interface too::
python -m werkzeug.serving --help
.. versionadded:: 0.5
`static_files` was added to simplify serving of static files as well
as `passthrough_errors`.
.. versionadded:: 0.6
support for SSL was added.
.. versionadded:: 0.8
Added support for automatically loading a SSL context from certificate
file and private key.
.. versionadded:: 0.9
Added command-line interface.
.. versionadded:: 0.10
Improved the reloader and added support for changing the backend
through the `reloader_type` parameter. See :ref:`reloader`
for more information.
:param hostname: The host for the application. eg: ``'localhost'``
:param port: The port for the server. eg: ``8080``
:param application: the WSGI application to execute
:param use_reloader: should the server automatically restart the python
process if modules were changed?
:param use_debugger: should the werkzeug debugging system be used?
:param use_evalex: should the exception evaluation feature be enabled?
:param extra_files: a list of files the reloader should watch
additionally to the modules. For example configuration
files.
:param reloader_interval: the interval for the reloader in seconds.
:param reloader_type: the type of reloader to use. The default is
auto detection. Valid values are ``'stat'`` and
``'watchdog'``. See :ref:`reloader` for more
information.
:param threaded: should the process handle each request in a separate
thread?
:param processes: if greater than 1 then handle each request in a new process
up to this maximum number of concurrent processes.
:param request_handler: optional parameter that can be used to replace
the default one. You can use this to replace it
with a different
:class:`~BaseHTTPServer.BaseHTTPRequestHandler`
subclass.
:param static_files: a dict of paths for static files. This works exactly
like :class:`SharedDataMiddleware`, it's actually
just wrapping the application in that middleware before
serving.
:param passthrough_errors: set this to `True` to disable the error catching.
This means that the server will die on errors but
it can be useful to hook debuggers in (pdb etc.)
:param ssl_context: an SSL context for the connection. Either an
:class:`ssl.SSLContext`, a tuple in the form
``(cert_file, pkey_file)``, the string ``'adhoc'`` if
the server should automatically create one, or ``None``
to disable SSL (which is the default).
"""
if use_debugger:
from werkzeug.debug import DebuggedApplication
application = DebuggedApplication(application, use_evalex)
if static_files:
from werkzeug.wsgi import SharedDataMiddleware
application = SharedDataMiddleware(application, static_files)
def log_startup(sock):
display_hostname = hostname not in ('', '*') and hostname or 'localhost'
if ':' in display_hostname:
display_hostname = '[%s]' % display_hostname
quit_msg = '(Press CTRL+C to quit)'
port = sock.getsockname()[1]
_log('info', ' * Running on %s://%s:%d/ %s',
ssl_context is None and 'http' or 'https',
display_hostname, port, quit_msg)
def inner():
try:
fd = int(os.environ['WERKZEUG_SERVER_FD'])
except (LookupError, ValueError):
fd = None
srv = make_server(hostname, port, application, threaded,
processes, request_handler,
passthrough_errors, ssl_context,
fd=fd)
if fd is None:
log_startup(srv.socket)
srv.serve_forever()
if use_reloader:
# If we're not running already in the subprocess that is the
# reloader we want to open up a socket early to make sure the
# port is actually available.
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
if port == 0 and not can_open_by_fd:
raise ValueError('Cannot bind to a random port with enabled '
'reloader if the Python interpreter does '
'not support socket opening by fd.')
# Create and destroy a socket so that any exceptions are
# raised before we spawn a separate Python interpreter and
# lose this ability.
address_family = select_ip_version(hostname, port)
s = socket.socket(address_family, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((hostname, port))
if hasattr(s, 'set_inheritable'):
s.set_inheritable(True)
# If we can open the socket by file descriptor, then we can just
# reuse this one and our socket will survive the restarts.
if can_open_by_fd:
os.environ['WERKZEUG_SERVER_FD'] = str(s.fileno())
s.listen(LISTEN_QUEUE)
log_startup(s)
else:
s.close()
from ._reloader import run_with_reloader
run_with_reloader(inner, extra_files, reloader_interval,
reloader_type)
else:
inner()
def run_with_reloader(*args, **kwargs):
# People keep using undocumented APIs. Do not use this function
# please, we do not guarantee that it continues working.
from ._reloader import run_with_reloader
return run_with_reloader(*args, **kwargs)
def main():
'''A simple command-line interface for :py:func:`run_simple`.'''
# in contrast to argparse, this works at least under Python < 2.7
import optparse
from werkzeug.utils import import_string
parser = optparse.OptionParser(
usage='Usage: %prog [options] app_module:app_object')
parser.add_option('-b', '--bind', dest='address',
help='The hostname:port the app should listen on.')
parser.add_option('-d', '--debug', dest='use_debugger',
action='store_true', default=False,
help='Use Werkzeug\'s debugger.')
parser.add_option('-r', '--reload', dest='use_reloader',
action='store_true', default=False,
help='Reload Python process if modules change.')
options, args = parser.parse_args()
hostname, port = None, None
if options.address:
address = options.address.split(':')
hostname = address[0]
if len(address) > 1:
port = address[1]
if len(args) != 1:
sys.stdout.write('No application supplied, or too much. See --help\n')
sys.exit(1)
app = import_string(args[0])
run_simple(
hostname=(hostname or '127.0.0.1'), port=int(port or 5000),
application=app, use_reloader=options.use_reloader,
use_debugger=options.use_debugger
)
if __name__ == '__main__':
main()
|
mit
|
majora2007/plexpy
|
lib/apscheduler/schedulers/tornado.py
|
33
|
1792
|
from __future__ import absolute_import
from datetime import timedelta
from functools import wraps
from apscheduler.schedulers.base import BaseScheduler
from apscheduler.util import maybe_ref
try:
from tornado.ioloop import IOLoop
except ImportError: # pragma: nocover
raise ImportError('TornadoScheduler requires tornado installed')
def run_in_ioloop(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
self._ioloop.add_callback(func, self, *args, **kwargs)
return wrapper
class TornadoScheduler(BaseScheduler):
"""
A scheduler that runs on a Tornado IOLoop.
=========== ===============================================================
``io_loop`` Tornado IOLoop instance to use (defaults to the global IO loop)
=========== ===============================================================
"""
_ioloop = None
_timeout = None
def start(self):
super(TornadoScheduler, self).start()
self.wakeup()
@run_in_ioloop
def shutdown(self, wait=True):
super(TornadoScheduler, self).shutdown(wait)
self._stop_timer()
def _configure(self, config):
self._ioloop = maybe_ref(config.pop('io_loop', None)) or IOLoop.current()
super(TornadoScheduler, self)._configure(config)
def _start_timer(self, wait_seconds):
self._stop_timer()
if wait_seconds is not None:
self._timeout = self._ioloop.add_timeout(timedelta(seconds=wait_seconds), self.wakeup)
def _stop_timer(self):
if self._timeout:
self._ioloop.remove_timeout(self._timeout)
del self._timeout
@run_in_ioloop
def wakeup(self):
self._stop_timer()
wait_seconds = self._process_jobs()
self._start_timer(wait_seconds)
|
gpl-3.0
|
MartialD/hyperspy
|
hyperspy/events.py
|
5
|
18478
|
import inspect
import collections
from contextlib import contextmanager
from functools import wraps # Used in exec statement
import re
class Events(object):
"""
Events container.
All available events are attributes of this class.
"""
def __init__(self):
self._events = {}
@contextmanager
def suppress(self):
"""
Use this function with a 'with' statement to temporarily suppress
all callbacks of all events in the container. When the 'with' lock
completes, the old suppression values will be restored.
Example usage
-------------
>>> with obj.events.suppress():
... # Any events triggered by assignments are prevented:
... obj.val_a = a
... obj.val_b = b
>>> # Trigger one event instead:
>>> obj.events.values_changed.trigger()
See also
--------
Event.suppress
Event.suppress_callback
"""
old = {}
try:
for e in self._events.values():
old[e] = e._suppress
e._suppress = True
yield
finally:
for e, oldval in old.items():
e._suppress = oldval
def _update_doc(self):
"""
Updates the doc to reflect the events that are contained
"""
new_doc = self.__class__.__doc__
new_doc += '\n\tEvents:\n\t-------\n'
for name, e in self._events.items():
edoc = inspect.getdoc(e) or ''
doclines = edoc.splitlines()
e_short = doclines[0] if len(doclines) > 0 else edoc
new_doc += '\t%s :\n\t\t%s\n' % (name, e_short)
new_doc = new_doc.replace('\t', ' ')
self.__doc__ = new_doc
def __setattr__(self, name, value):
"""
Magic to enable having `Event`s as attributes, and keeping them
separate from other attributes.
If it's an `Event`, store it in self._events, otherwise set attribute
in normal way.
"""
if isinstance(value, Event):
self._events[name] = value
self._update_doc()
else:
super(Events, self).__setattr__(name, value)
def __getattr__(self, name):
"""
Magic to enable having `Event`s as attributes, and keeping them
separate from other attributes.
Returns Event attribute `name` (__getattr__ is only called if attribute
could not be found in the normal way).
"""
return self._events[name]
def __delattr__(self, name):
"""
Magic to enable having `Event`s as attributes, and keeping them
separate from other attributes.
Deletes attribute from self._events if present, otherwise delete
attribute in normal way.
"""
if name in self._events:
del self._events[name]
self._update_doc()
else:
super(Events, self).__delattr__(name)
def __dir__(self):
"""
Magic to enable having `Event`s as attributes, and keeping them
separate from other attributes.
Makes sure tab-completion works in IPython etc.
"""
d = dir(type(self))
d.extend(self.__dict__.keys())
d.extend(self._events.keys())
return sorted(set(d))
def __iter__(self):
"""
Allows iteration of all events in the container
"""
return self._events.values().__iter__()
def __repr__(self):
return "<hyperspy.events.Events: " + repr(self._events) + ">"
class Event(object):
def __init__(self, doc='', arguments=None):
"""
Create an Event object.
Arguments:
----------
doc : str
Optional docstring for the new Event.
arguments : iterable
Pass to define the arguments of the trigger() function. Each
element must either be an argument name, or a tuple containing
the argument name and the argument's default value.
Example usage:
--------------
>>> from hyperspy.events import Event
>>> Event()
<hyperspy.events.Event: set()>
>>> Event(doc="This event has a docstring!").__doc__
'This event has a docstring!'
>>> e1 = Event()
>>> e2 = Event(arguments=('arg1', ('arg2', None)))
>>> e1.trigger(arg1=12, arg2=43, arg3='str', arg4=4.3) # Can trigger with whatever
>>> e2.trigger(arg1=11, arg2=22, arg3=3.4)
Traceback (most recent call last):
...
TypeError: trigger() got an unexpected keyword argument 'arg3'
"""
self.__doc__ = doc
self._arguments = tuple(arguments) if arguments else None
self._connected_all = set()
self._connected_some = {}
self._connected_map = {}
self._suppress = False
self._suppressed_callbacks = set()
if arguments:
self._trigger_maker(arguments)
@property
def arguments(self):
return self._arguments
# Regex for confirming valid python identifier
_re_arg_name = re.compile("[a-zA-Z_][a-zA-Z0-9_]*")
def _trigger_maker(self, arguments):
"""
Dynamically creates a function with a signature equal to `arguments`.
Ensures that trigger can only be called with the correct arguments
"""
orig_f = self.trigger
# Validate code for exec!
defaults = []
for arg in arguments:
if isinstance(arg, (tuple, list)):
defaults.append(arg[1])
arg = arg[0]
elif len(defaults) > 0:
raise SyntaxError(
"non-default argument follows default argument")
m = self._re_arg_name.match(arg)
if m is None or m.end() != len(arg):
raise ValueError("Argument name invalid: %s" % arg)
arguments = [a[0] if isinstance(a, (tuple, list))
else a for a in arguments]
# Create the dynamic code:
arglist = ', '.join(arguments)
arg_pass = ', '.join([a + '=' + a for a in arguments])
wrap_code = u"""
@wraps(f)
def trigger(self, %s):
return f(%s)
""" % (arglist, arg_pass)
wrap_code = wrap_code.replace(" ", "") # Remove indentation
# Execute dynamic code:
gl = dict(globals())
gl.update(locals())
gl.update({'f': orig_f}) # Make sure it keeps the original!
exec(wrap_code, gl, locals())
new_f = locals()['trigger']
# Replace the trigger function with the new one
if defaults:
new_f.__defaults__ = tuple(defaults)
new_f = new_f.__get__(self, self.__class__) # Bind method to self
self.trigger = new_f
@contextmanager
def suppress(self):
"""
Use this function with a 'with' statement to temporarily suppress
all events in the container. When the 'with' lock completes, the old
suppression values will be restored.
Example usage
-------------
>>> with obj.events.myevent.suppress():
... # These would normally both trigger myevent:
... obj.val_a = a
... obj.val_b = b
Trigger manually once:
>>> obj.events.myevent.trigger()
See also
--------
suppress_callback
Events.suppress
"""
old = self._suppress
self._suppress = True
try:
yield
finally:
self._suppress = old
@contextmanager
def suppress_callback(self, function):
"""
Use this function with a 'with' statement to temporarily suppress
a single callback from being called. All other connected callbacks
will trigger. When the 'with' lock completes, the old suppression value
will be restored.
Example usage
-------------
>>> with obj.events.myevent.suppress_callback(f):
... # Events will trigger as normal, but `f` will not be called
... obj.val_a = a
... obj.val_b = b
>>> # Here, `f` will be called as before:
>>> obj.events.myevent.trigger()
See also
--------
suppress
Events.suppress
"""
was_suppressed = function in self._suppressed_callbacks
if not was_suppressed:
self._suppressed_callbacks.add(function)
try:
yield
finally:
if not was_suppressed:
self._suppressed_callbacks.discard(function)
@property
def connected(self):
""" Connected functions.
"""
ret = set()
ret.update(self._connected_all)
ret.update(self._connected_some.keys())
ret.update(self._connected_map.keys())
return ret
def connect(self, function, kwargs='all'):
"""
Connects a function to the event.
Arguments:
----------
function : callable
The function to call when the event triggers.
kwargs : {tuple or list, dictionary, 'all', 'auto'}, default "all"
If "all", all the trigger keyword arguments are passed to the
function. If a list or tuple of strings, only those keyword
arguments that are in the tuple or list are passed. If empty,
no keyword argument is passed. If dictionary, the keyword arguments
of trigger are mapped as indicated in the dictionary. For example,
{"a" : "b"} maps the trigger argument "a" to the function argument
"b".
See also
--------
disconnect
"""
if not callable(function):
raise TypeError("Only callables can be registered")
if function in self.connected:
raise ValueError("Function %s already connected to %s." %
(function, self))
if kwargs == 'auto':
spec = inspect.signature(function)
_has_args = False
_has_kwargs = False
_normal_params = []
for name, par in spec.parameters.items():
if par.kind == par.VAR_POSITIONAL:
_has_args = True
elif par.kind == par.VAR_KEYWORD:
_has_kwargs = True
else:
_normal_params.append(name)
if _has_args and not _has_kwargs:
raise NotImplementedError("Connecting to variable argument "
"functions is not supported in auto "
"connection mode.")
elif _has_kwargs:
kwargs = 'all'
else:
kwargs = _normal_params
if kwargs == "all":
self._connected_all.add(function)
elif isinstance(kwargs, dict):
self._connected_map[function] = kwargs
elif isinstance(kwargs, (tuple, list)):
self._connected_some[function] = tuple(kwargs)
else:
raise ValueError("Invalid value passed to kwargs.")
def disconnect(self, function):
"""
Disconnects a function from the event. The passed function will be
disconnected irregardless of which 'nargs' argument was passed to
connect().
If you only need to temporarily prevent a function from being called,
single callback suppression is supported by the `suppress_callback`
context manager.
Parameters
----------
function: function
return_connection_kwargs: Bool, default False
If True, returns the kwargs that would reconnect the function as
it was.
See also
--------
connect
suppress_callback
"""
if function in self._connected_all:
self._connected_all.remove(function)
elif function in self._connected_some:
self._connected_some.pop(function)
elif function in self._connected_map:
self._connected_map.pop(function)
else:
raise ValueError("The %s function is not connected to %s." %
(function, self))
def trigger(self, **kwargs):
"""
Triggers the event. If the event is suppressed, this does nothing.
Otherwise it calls all the connected functions with the arguments as
specified when connected.
See also
--------
suppress
suppress_callback
Events.suppress
"""
if self._suppress:
return
# Work on copies of collections of connected functions.
# Take copies initially, to ensure that all functions connected when
# event triggered are called.
connected_all = self._connected_all.difference(
self._suppressed_callbacks)
connected_some = list(self._connected_some.items())
connected_map = list(self._connected_map.items())
# Loop over all collections
for function in connected_all:
function(**kwargs)
for function, kwsl in connected_some:
if function not in self._suppressed_callbacks:
function(**{kw: kwargs.get(kw, None) for kw in kwsl})
for function, kwsd in connected_map:
if function not in self._suppressed_callbacks:
function(**{kwf: kwargs[kwt] for kwt, kwf in kwsd.items()})
def __deepcopy__(self, memo):
dc = type(self)()
memo[id(self)] = dc
return dc
def __str__(self):
if self.__doc__:
edoc = inspect.getdoc(self) or ''
doclines = edoc.splitlines()
e_short = doclines[0] if len(doclines) > 0 else edoc
text = ("<hyperspy.events.Event: " + e_short + ": " +
str(self.connected) + ">")
else:
text = self.__repr__()
return text
def __repr__(self):
return "<hyperspy.events.Event: " + repr(self.connected) + ">"
class EventSuppressor(object):
"""
Object to enforce a variety of suppression types simultaneously
Targets to be suppressed can be added by the function `add()`, or given
in the constructor. Valid targets are:
- `Event`: The entire Event will be suppressed
- `Events`: All events in th container will be suppressed
- (Event, callback): The callback will be suppressed in Event
- (Events, callback): The callback will be suppressed in each event in
Events where it is connected.
- Any iterable collection of the above target types
Example usage
-------------
>>> es = EventSuppressor((event1, callback1), (event1, callback2))
>>> es.add(event2, callback2)
>>> es.add(event3)
>>> es.add(events_container1)
>>> es.add(events_container2, callback1)
>>> es.add(event4, (events_container3, callback2))
>>>
>>> with es.suppress():
... do_something()
"""
def __init__(self, *to_suppress):
self._cms = []
if len(to_suppress) > 0:
self.add(*to_suppress)
def _add_single(self, target):
# Identify and initializes the CM, but doesn't enter it
if self._is_tuple_target(target):
if isinstance(target[0], Event):
cm = target[0].suppress_callback(target[1])
self._cms.append(cm)
else:
# Don't check for function presence in event now:
# suppress_callback does this when entering
for e in target[0]:
self._cms.append(e.suppress_callback(target[1]))
else:
cm = target.suppress()
self._cms.append(cm)
def _is_tuple_target(self, candidate):
v = (isinstance(candidate, collections.Iterable) and
len(candidate) == 2 and
isinstance(candidate[0], (Event, Events)) and
callable(candidate[1]))
return v
def _is_target(self, candidate):
v = (isinstance(candidate, (Event, Events)) or
self._is_tuple_target(candidate))
return v
def add(self, *to_suppress):
"""
Add one or more targets to be suppressed
Valid targets are:
- `Event`: The entire Event will be suppressed
- `Events`: All events in the container will be suppressed
- (Event, callback): The callback will be suppressed in Event
- (Events, callback): The callback will be suppressed in each event
in Events where it is connected.
- Any iterable collection of the above target types
"""
# Remove useless layers of iterables:
while (isinstance(to_suppress, collections.Iterable) and
len(to_suppress) == 1):
to_suppress = to_suppress[0]
# If single target passed, add directly:
if self._is_target(to_suppress):
self._add_single(to_suppress)
elif isinstance(to_suppress, collections.Iterable):
if len(to_suppress) == 0:
raise ValueError("No viable suppression targets added!")
for t in to_suppress:
if self._is_target(t):
self._add_single(t)
else:
raise ValueError("No viable suppression targets added!")
@contextmanager
def suppress(self):
"""
Use this function with a 'with' statement to temporarily suppress
all events added. When the 'with' lock completes, the old suppression
values will be restored.
See also
--------
Events.suppress
Event.suppress
Event.suppress_callback
"""
# We don't suppress any exceptions, so we can use simple CM management:
cms = []
try:
for cm in self._cms:
cm.__enter__()
cms.append(cm) # Only add entered CMs to list
yield
finally:
# Completed succefully or exception occured, unwind all
for cm in reversed(cms):
# We don't use exception info, so simply pass blanks
cm.__exit__(None, None, None)
|
gpl-3.0
|
JCROM-Android/jcrom_external_chromium_org
|
third_party/closure_linter/closure_linter/common/matcher.py
|
284
|
2158
|
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Regular expression based JavaScript matcher classes."""
__author__ = ('[email protected] (Robert Walker)',
'[email protected] (Andy Perelson)')
from closure_linter.common import position
from closure_linter.common import tokens
# Shorthand
Token = tokens.Token
Position = position.Position
class Matcher(object):
"""A token matcher.
Specifies a pattern to match, the type of token it represents, what mode the
token changes to, and what mode the token applies to.
Modes allow more advanced grammars to be incorporated, and are also necessary
to tokenize line by line. We can have different patterns apply to different
modes - i.e. looking for documentation while in comment mode.
Attributes:
regex: The regular expression representing this matcher.
type: The type of token indicated by a successful match.
result_mode: The mode to move to after a successful match.
"""
def __init__(self, regex, token_type, result_mode=None, line_start=False):
"""Create a new matcher template.
Args:
regex: The regular expression to match.
token_type: The type of token a successful match indicates.
result_mode: What mode to change to after a successful match. Defaults to
None, which means to not change the current mode.
line_start: Whether this matcher should only match string at the start
of a line.
"""
self.regex = regex
self.type = token_type
self.result_mode = result_mode
self.line_start = line_start
|
bsd-3-clause
|
calfonso/ansible
|
lib/ansible/modules/web_infrastructure/ansible_tower/tower_inventory.py
|
18
|
3741
|
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_inventory
version_added: "2.3"
author: "Wayne Witzel III (@wwitzel3)"
short_description: create, update, or destroy Ansible Tower inventory.
description:
- Create, update, or destroy Ansible Tower inventories. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- The name to use for the inventory.
required: True
description:
description:
- The description to use for the inventory.
organization:
description:
- Organization the inventory belongs to.
required: True
variables:
description:
- Inventory variables. Use C(@) to get from file.
state:
description:
- Desired state of the resource.
default: "present"
choices: ["present", "absent"]
extends_documentation_fragment: tower
'''
EXAMPLES = '''
- name: Add tower inventory
tower_inventory:
name: "Foo Inventory"
description: "Our Foo Cloud Servers"
organization: "Bar Org"
state: present
tower_config_file: "~/tower_cli.cfg"
'''
from ansible.module_utils.ansible_tower import tower_argument_spec, tower_auth_config, tower_check_mode, HAS_TOWER_CLI
try:
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
def main():
argument_spec = tower_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
description=dict(),
organization=dict(required=True),
variables=dict(),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
name = module.params.get('name')
description = module.params.get('description')
organization = module.params.get('organization')
variables = module.params.get('variables')
state = module.params.get('state')
json_output = {'inventory': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
inventory = tower_cli.get_resource('inventory')
try:
org_res = tower_cli.get_resource('organization')
org = org_res.get(name=organization)
if state == 'present':
result = inventory.modify(name=name, organization=org['id'], variables=variables,
description=description, create_on_missing=True)
json_output['id'] = result['id']
elif state == 'absent':
result = inventory.delete(name=name, organization=org['id'])
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update inventory, organization not found: {0}'.format(excinfo), changed=False)
except (exc.ConnectionError, exc.BadRequest) as excinfo:
module.fail_json(msg='Failed to update inventory: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
gpl-3.0
|
dlazz/ansible
|
lib/ansible/module_utils/facts/other/facter.py
|
232
|
2985
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.module_utils.facts.namespace import PrefixFactNamespace
from ansible.module_utils.facts.collector import BaseFactCollector
class FacterFactCollector(BaseFactCollector):
name = 'facter'
_fact_ids = set(['facter'])
def __init__(self, collectors=None, namespace=None):
namespace = PrefixFactNamespace(namespace_name='facter',
prefix='facter_')
super(FacterFactCollector, self).__init__(collectors=collectors,
namespace=namespace)
def find_facter(self, module):
facter_path = module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin'])
cfacter_path = module.get_bin_path('cfacter', opt_dirs=['/opt/puppetlabs/bin'])
# Prefer to use cfacter if available
if cfacter_path is not None:
facter_path = cfacter_path
return facter_path
def run_facter(self, module, facter_path):
# if facter is installed, and we can use --json because
# ruby-json is ALSO installed, include facter data in the JSON
rc, out, err = module.run_command(facter_path + " --puppet --json")
return rc, out, err
def get_facter_output(self, module):
facter_path = self.find_facter(module)
if not facter_path:
return None
rc, out, err = self.run_facter(module, facter_path)
if rc != 0:
return None
return out
def collect(self, module=None, collected_facts=None):
# Note that this mirrors previous facter behavior, where there isnt
# a 'ansible_facter' key in the main fact dict, but instead, 'facter_whatever'
# items are added to the main dict.
facter_dict = {}
if not module:
return facter_dict
facter_output = self.get_facter_output(module)
# TODO: if we fail, should we add a empty facter key or nothing?
if facter_output is None:
return facter_dict
try:
facter_dict = json.loads(facter_output)
except Exception:
# FIXME: maybe raise a FactCollectorError with some info attrs?
pass
return facter_dict
|
gpl-3.0
|
tcpcloud/contrail-controller
|
src/discovery/tests/test_dsa.py
|
2
|
20448
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import sys
import gevent
import uuid
import time
sys.path.append("../config/common/tests")
from testtools.matchers import Equals, Contains, Not
from test_utils import *
import test_common
import test_case
from vnc_api.vnc_api import *
from vnc_api.gen.resource_xsd import *
from cfgm_common.exceptions import *
from gevent import sleep
import discoveryclient.client as client
EP_DELIM=','
PUBSUB_DELIM=' '
def parse_pubsub_ep(pubsub_str):
r = pubsub_str.split(EP_DELIM)
if len(r) < 4:
for i in range(4-len(r)):
r.append('')
return r
# '1.1.1.1/24' or '1.1.1.1'
def prefix_str_to_obj(prefix_str):
if '/' not in prefix_str:
prefix_str += '/32'
x = prefix_str.split('/')
if len(x) != 2:
return None
return SubnetType(x[0], int(x[1]))
def build_dsa_rule_entry(rule_str):
r = parse_pubsub_ep(rule_str)
r = rule_str.split(PUBSUB_DELIM) if rule_str else []
if len(r) < 2:
return None
# [0] is publisher-spec, [1] is subscriber-spec
pubspec = parse_pubsub_ep(r[0])
subspec = parse_pubsub_ep(r[1])
pfx_pub = prefix_str_to_obj(pubspec[0])
pfx_sub = prefix_str_to_obj(subspec[0])
if pfx_sub is None or pfx_sub is None:
return None
publisher = DiscoveryPubSubEndPointType(ep_prefix = pfx_pub,
ep_type = pubspec[1], ep_id = pubspec[2],
ep_version = pubspec[3])
subscriber = [DiscoveryPubSubEndPointType(ep_prefix = pfx_sub,
ep_type = subspec[1], ep_id = subspec[2],
ep_version = subspec[3])]
dsa_rule_entry = DiscoveryServiceAssignmentType(publisher, subscriber)
return dsa_rule_entry
server_list = {}
def info_callback(info, client_id):
print 'subscribe[%s]=%s' % (client_id, info)
global server_list
server_list[client_id] = [entry['@publisher-id'] for entry in info]
def validate_in_use_count(response, expected_counts, context):
services = response['services']
in_use_counts = {entry['ep_id']:entry['in_use'] for entry in services}
print '%s %s' % (context, in_use_counts)
return in_use_counts == expected_counts
class TestDsa(test_case.DsTestCase):
def setUp(self):
extra_config_knobs = [
('pulkit-pub', 'policy', 'load-balance'),
('test_bug_1548638', 'policy', 'fixed'),
]
super(TestDsa, self).setUp(extra_disc_server_config_knobs=extra_config_knobs)
def tearDown(self):
global server_list
server_list = {}
super(TestDsa, self).tearDown()
def test_bug_1549243(self):
puburl = '/publish'
suburl = "/subscribe"
service_type = 'pulkit-pub'
subscriber_type = "pulkit-sub"
dsa = DiscoveryServiceAssignment()
rule_entry = build_dsa_rule_entry('77.77.2.0/24,%s 77.77.2.0/24,%s' % (service_type, subscriber_type))
rule_uuid = uuid.uuid4()
dsa_rule1 = DsaRule(name = str(rule_uuid), parent_obj = dsa, dsa_rule_entry = rule_entry)
dsa_rule1.set_uuid(str(rule_uuid))
self._vnc_lib.dsa_rule_create(dsa_rule1)
# publish 3 instances
pub_tasks = []
client_type = 'test-discovery'
for ipaddr in ["77.77.1.10", "77.77.2.10", "77.77.3.10"]:
pub_id = 'test_discovery-%s' % ipaddr
pub_data = {service_type : '%s-%s' % (service_type, ipaddr)}
disc = client.DiscoveryClient(
self._disc_server_ip, self._disc_server_port,
client_type, pub_id)
disc.remote_addr = ipaddr
task = disc.publish(service_type, pub_data)
pub_tasks.append(task)
time.sleep(1)
# Verify all services are published.
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), 3)
service_count = 2
sub_tasks = []
for remote, count in [("77.77.3.11", 6), ("77.77.2.11", 4)]:
for i in range(count):
subscriber_id = "client-%s-%d" % (remote, i)
disc = client.DiscoveryClient(
self._disc_server_ip, self._disc_server_port,
subscriber_type, pub_id=subscriber_id)
disc.remote_addr = remote
obj = disc.subscribe(
service_type, service_count, info_callback, subscriber_id)
sub_tasks.append(obj.task)
time.sleep(1)
print 'Started tasks to subscribe service %s, count %d' \
% (service_type, service_count)
# validate all clients have subscribed
time.sleep(1)
(code, msg) = self._http_get('/clients.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), 6*2+4)
# verify service assignment is 4,4,8
expected_in_use_counts = {
'test_discovery-77.77.1.10':4,
'test_discovery-77.77.2.10':8,
'test_discovery-77.77.3.10':4,
}
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), 3)
success = validate_in_use_count(response, expected_in_use_counts, 'In-use count after initial subscribe')
self.assertEqual(success, True)
# validate assignment remains same after resubscribe
time.sleep(2*60)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), 3)
success = validate_in_use_count(response, expected_in_use_counts, 'In-use count after initial subscribe')
self.assertEqual(success, True)
def test_bug_1548638(self):
puburl = '/publish'
suburl = "/subscribe"
service_type = 'test_bug_1548638'
# publish 3 dns servers
for ipaddr in ["77.77.1.10", "77.77.2.10", "77.77.3.10"]:
payload = {
service_type: { "ip-addr" : ipaddr, "port" : "1111" },
'service-type' : '%s' % service_type,
'service-id' : '%s-%s' % (service_type, ipaddr),
'remote-addr': ipaddr,
}
(code, msg) = self._http_post(puburl, json.dumps(payload))
self.assertEqual(code, 200)
# Verify all services are published.
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), 3)
# verify all agents see only 2 publishers due to fixed policy
expectedpub_set = set(["test_bug_1548638-77.77.1.10", "test_bug_1548638-77.77.2.10"])
for ipaddr in ["77.77.1.11", "77.77.2.11", "77.77.3.11"]:
payload = {
'service' : service_type,
'client' : ipaddr,
'instances' : 2,
'client-type' : 'contrail-vrouter-agent:0',
'remote-addr' : ipaddr,
}
(code, msg) = self._http_post(suburl, json.dumps(payload))
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response[service_type]), payload['instances'])
receivedpub_set = set([svc['@publisher-id'] for svc in response[service_type]])
self.assertEqual(expectedpub_set == receivedpub_set, True)
dsa = DiscoveryServiceAssignment()
rule_entry = build_dsa_rule_entry('77.77.3.0/24,%s 77.77.3.11/32,contrail-vrouter-agent:0' % service_type)
rule_uuid = uuid.uuid4()
dsa_rule1 = DsaRule(name = str(rule_uuid), parent_obj = dsa, dsa_rule_entry = rule_entry)
dsa_rule1.set_uuid(str(rule_uuid))
self._vnc_lib.dsa_rule_create(dsa_rule1)
expectedpub_set = set(["test_bug_1548638-77.77.1.10", "test_bug_1548638-77.77.2.10"])
for ipaddr in ["77.77.1.11", "77.77.2.11"]:
payload = {
'service' : service_type,
'client' : ipaddr,
'instances' : 2,
'client-type' : 'contrail-vrouter-agent:0',
'remote-addr' : ipaddr,
}
(code, msg) = self._http_post(suburl, json.dumps(payload))
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response[service_type]), payload['instances'])
receivedpub_set = set([svc['@publisher-id'] for svc in response[service_type]])
self.assertEqual(expectedpub_set == receivedpub_set, True)
expectedpub_set = set(["test_bug_1548638-77.77.3.10"])
for ipaddr in ["77.77.3.11"]:
payload = {
'service' : service_type,
'client' : ipaddr,
'instances' : 2,
'client-type' : 'contrail-vrouter-agent:0',
'remote-addr' : ipaddr,
}
(code, msg) = self._http_post(suburl, json.dumps(payload))
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response[service_type]), 1)
receivedpub_set = set([svc['@publisher-id'] for svc in response[service_type]])
self.assertEqual(expectedpub_set == receivedpub_set, True)
def test_bug_1548771(self):
dsa = DiscoveryServiceAssignment()
rule_entry = build_dsa_rule_entry('77.77.3.0/24,xmpp-server 77.77.0.0/16,contrail-vrouter-agent:0')
rule_uuid = uuid.uuid4()
dsa_rule1 = DsaRule(name = str(rule_uuid), parent_obj = dsa, dsa_rule_entry = rule_entry)
dsa_rule1.set_uuid(str(rule_uuid))
self._vnc_lib.dsa_rule_create(dsa_rule1)
rule_entry = build_dsa_rule_entry('77.77.3.0/24,dns-server 77.77.3.11/32,contrail-vrouter-agent:0')
rule_uuid = uuid.uuid4()
dsa_rule2 = DsaRule(name = str(rule_uuid), parent_obj = dsa, dsa_rule_entry = rule_entry)
dsa_rule2.set_uuid(str(rule_uuid))
self._vnc_lib.dsa_rule_create(dsa_rule2)
puburl = '/publish'
suburl = "/subscribe"
# publish 3 control nodes and dns servers
for service_type in ['xmpp-server', 'dns-server']:
for ipaddr in ["77.77.1.10", "77.77.2.10", "77.77.3.10"]:
payload = {
service_type: { "ip-addr" : ipaddr, "port" : "1111" },
'service-type' : '%s' % service_type,
'service-id' : '%s-%s' % (service_type, ipaddr),
'remote-addr': ipaddr,
}
(code, msg) = self._http_post(puburl, json.dumps(payload))
self.assertEqual(code, 200)
# Verify all services are published.
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), 6)
# verify all agents see only 1 xmpp-server (rule #1)
service_type = 'xmpp-server'
expectedpub_set = set(["xmpp-server-77.77.3.10"])
for ipaddr in ["77.77.1.11", "77.77.2.11", "77.77.3.11"]:
payload = {
'service' : '%s' % service_type,
'client' : '%s-%s' % (service_type, ipaddr),
'instances' : 2,
'client-type' : 'contrail-vrouter-agent:0',
'remote-addr' : ipaddr,
}
(code, msg) = self._http_post(suburl, json.dumps(payload))
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response[service_type]), 1)
receivedpub_set = set([svc['@publisher-id'] for svc in response[service_type]])
self.assertEqual(expectedpub_set == receivedpub_set, True)
self._vnc_lib.dsa_rule_delete(id = dsa_rule1.get_uuid())
self._vnc_lib.dsa_rule_delete(id = dsa_rule2.get_uuid())
def test_bug_1540777(self):
dsa = DiscoveryServiceAssignment()
rule_entry = build_dsa_rule_entry('77.77.3.10/32,pulkit-pub 77.77.3.11/32,pulkit-sub')
rule_uuid = uuid.uuid4()
dsa_rule1 = DsaRule(name = str(rule_uuid), parent_obj = dsa, dsa_rule_entry = rule_entry)
dsa_rule1.set_uuid(str(rule_uuid))
self._vnc_lib.dsa_rule_create(dsa_rule1)
rule_entry = build_dsa_rule_entry('77.77.2.10/32,pulkit-pub 77.77.3.11/32,pulkit-sub')
rule_uuid = uuid.uuid4()
dsa_rule2 = DsaRule(name = str(rule_uuid), parent_obj = dsa, dsa_rule_entry = rule_entry)
dsa_rule2.set_uuid(str(rule_uuid))
self._vnc_lib.dsa_rule_create(dsa_rule2)
puburl = '/publish'
suburl = "/subscribe"
service_type = 'pulkit-pub'
# publish 3 control nodes - 2 subject to rules above
for ipaddr in ["77.77.1.10", "77.77.2.10", "77.77.3.10"]:
payload = {
service_type: { "ip-addr" : ipaddr, "port" : "1111" },
'service-type' : '%s' % service_type,
'service-id' : 'pulkit-pub-%s' % ipaddr,
'remote-addr': ipaddr,
}
(code, msg) = self._http_post(puburl, json.dumps(payload))
self.assertEqual(code, 200)
payload = {
'service' : '%s' % service_type,
'client' : 'discovery-ut',
'instances' : 3,
'client-type' : 'pulkit-sub',
'remote-addr' : '77.77.3.11',
}
# should see 2 publishers due to two rules
(code, msg) = self._http_post(suburl, json.dumps(payload))
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response[service_type]), 2)
expectedpub_set = set(["pulkit-pub-77.77.2.10", "pulkit-pub-77.77.3.10"])
receivedpub_set = set([svc['@publisher-id'] for svc in response[service_type]])
self.assertEqual(expectedpub_set == receivedpub_set, True)
self._vnc_lib.dsa_rule_delete(id = dsa_rule1.get_uuid())
self._vnc_lib.dsa_rule_delete(id = dsa_rule2.get_uuid())
def test_dsa_config(self):
# Assign DC1 control nodes to DC1 agents
rule_entry = build_dsa_rule_entry('1.1.1.0/24,Control-Node 1.1.1.0/24,Vrouter-Agent')
dsa = DiscoveryServiceAssignment()
rule_uuid = uuid.uuid4()
dsa_rule1 = DsaRule(name = str(rule_uuid), parent_obj = dsa, dsa_rule_entry = rule_entry)
dsa_rule1.set_uuid(str(rule_uuid))
self._vnc_lib.dsa_rule_create(dsa_rule1)
# Assign DC2 control nodes to DC1 agents
rule_entry = build_dsa_rule_entry('2.2.2.0/24,Control-Node 2.2.2.0/24,Vrouter-Agent')
rule_uuid = uuid.uuid4()
dsa_rule2 = DsaRule(name = str(rule_uuid), parent_obj = dsa, dsa_rule_entry = rule_entry)
dsa_rule2.set_uuid(str(rule_uuid))
self._vnc_lib.dsa_rule_create(dsa_rule2)
puburl = '/publish'
service_type = 'Control-Node'
# publish 4 control nodes - 2 in two data centers each
payload = {
'%s' % service_type: { "ip-addr" : "1.1.1.1", "port" : "1111" },
'service-type' : '%s' % service_type,
'service-id' : 'DC1-CN1',
'remote-addr': '1.1.1.1',
}
(code, msg) = self._http_post(puburl, json.dumps(payload))
self.assertEqual(code, 200)
payload = {
'%s' % service_type: { "ip-addr" : "1.1.1.2", "port" : "1112" },
'service-type' : '%s' % service_type,
'service-id' : 'DC1-CN2',
'remote-addr': '1.1.1.2',
}
(code, msg) = self._http_post(puburl, json.dumps(payload))
self.assertEqual(code, 200)
payload = {
'%s' % service_type: { "ip-addr" : "2.2.2.1", "port" : "2221" },
'service-type' : '%s' % service_type,
'service-id' : 'DC2-CN1',
'remote-addr': '2.2.2.1',
}
(code, msg) = self._http_post(puburl, json.dumps(payload))
self.assertEqual(code, 200)
payload = {
'%s' % service_type: { "ip-addr" : "2.2.2.2", "port" : "2222" },
'service-type' : '%s' % service_type,
'service-id' : 'DC2-CN2',
'remote-addr': '2.2.2.2',
}
(code, msg) = self._http_post(puburl, json.dumps(payload))
self.assertEqual(code, 200)
# Verify all services are published.
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), 4)
# json subscribe request
suburl = "/subscribe"
payload = {
'service' : '%s' % service_type,
'client' : 'DC1-VA1',
'instances' : 0,
'client-type' : 'Vrouter-Agent',
'remote-addr' : '3.3.3.3',
'version' : '2.2',
}
# should see all 4 publishers for sub that is not in DC1 or DC2
(code, msg) = self._http_post(suburl, json.dumps(payload))
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response[service_type]), 4)
# Sub in DC1 - should see only DC1 services
payload['remote-addr'] = '1.1.1.3'
(code, msg) = self._http_post(suburl, json.dumps(payload))
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response[service_type]), 2)
for svc in response[service_type]:
self.assertEqual("DC1-CN" in svc['@publisher-id'], True)
# Sub in DC2 - should see only DC2 services
payload['remote-addr'] = '2.2.2.3'
(code, msg) = self._http_post(suburl, json.dumps(payload))
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response[service_type]), 2)
for svc in response[service_type]:
self.assertEqual("DC2-CN" in svc['@publisher-id'], True)
# Subscribe to IfmapServer from DC1, DC2 and DC3. There are no
# assignment rules applicable to IfmapServer. Thus clients from
# all DC should be able to subscribe to singtleton IfmapServer
service_type = 'IfmapServer'
payload = {
service_type: { "ip-addr" : "4.4.4.4", "port" : "4444" },
'service-type' : service_type,
'service-id' : 'Controller',
'remote-addr': '4.4.4.4',
}
(code, msg) = self._http_post(puburl, json.dumps(payload))
self.assertEqual(code, 200)
payload = {
'service' : '%s' % service_type,
'client' : 'DC1-VA1',
'instances' : 0,
'client-type' : 'Vrouter-Agent',
}
for remote in ['1.1.1.1', '2.2.2.2', '3.3.3.3']:
payload['remote-addr'] = remote
(code, msg) = self._http_post(suburl, json.dumps(payload))
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response[service_type]), 1)
# Delete service assignment rule.
# Subs from any DC should see all DC1+DC2 services
self._vnc_lib.dsa_rule_delete(id = dsa_rule1.uuid)
self._vnc_lib.dsa_rule_delete(id = dsa_rule2.uuid)
service_type = 'Control-Node'
payload = {
'service' : '%s' % service_type,
'client' : 'Dont Care',
'instances' : 0,
'client-type' : 'Vrouter-Agent',
}
# Sub in DC1 or DC2 should see DC1+DC2 services
expectedpub_set = set(["DC1-CN1", "DC1-CN2", "DC2-CN1", "DC2-CN2"])
for sub_ip in ['1.1.1.3', '2.2.2.3']:
payload['remote-addr'] = sub_ip
(code, msg) = self._http_post(suburl, json.dumps(payload))
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response[service_type]), 4)
receivedpub_set = set([svc['@publisher-id'] for svc in response[service_type]])
self.assertEqual(expectedpub_set == receivedpub_set, True)
#end class TestDsa
|
apache-2.0
|
Acehaidrey/incubator-airflow
|
airflow/contrib/hooks/spark_submit_hook.py
|
7
|
1178
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.apache.spark.hooks.spark_submit`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.apache.spark.hooks.spark_submit import SparkSubmitHook # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.apache.spark.hooks.spark_submit`.",
DeprecationWarning,
stacklevel=2,
)
|
apache-2.0
|
lepture/pythondotorg
|
jobs/forms.py
|
8
|
3256
|
from django import forms
from django.forms.widgets import CheckboxSelectMultiple
from django.utils.translation import ugettext_lazy as _
from django_comments_xtd.conf import settings as comments_settings
from django_comments_xtd.forms import CommentForm
from django_comments_xtd.models import TmpXtdComment
from .models import Job
from cms.forms import ContentManageableModelForm
class JobForm(ContentManageableModelForm):
required_css_class = 'required'
class Meta:
model = Job
fields = (
'job_title',
'company_name',
'category',
'job_types',
'other_job_type',
'city',
'region',
'country',
'description',
'requirements',
'company_description',
'contact',
'email',
'url',
'telecommuting',
'agencies',
)
widgets = {
'job_types': CheckboxSelectMultiple(),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['job_types'].help_text = None
def clean_city(self):
city = self.cleaned_data['city'].strip()
return city
def clean_region(self):
region = self.cleaned_data['region'].strip()
return region
def clean_country(self):
country = self.cleaned_data['country'].strip()
return country
def save(self, commit=True):
obj = super().save()
obj.job_types.clear()
for t in self.cleaned_data['job_types']:
obj.job_types.add(t)
return obj
class JobCommentForm(CommentForm):
reply_to = forms.IntegerField(required=True, initial=0, widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
comment = kwargs.pop("comment", None)
if comment:
initial = kwargs.pop("initial", {})
initial.update({"reply_to": comment.pk})
kwargs["initial"] = initial
super(JobCommentForm, self).__init__(*args, **kwargs)
self.fields['name'] = forms.CharField(
widget=forms.TextInput(attrs={'placeholder': _('name')}))
self.fields['email'] = forms.EmailField(
label=_("Email"), help_text=_("Required for comment verification"),
widget=forms.TextInput(attrs={'placeholder': _('email')})
)
self.fields['url'] = forms.URLField(
required=False,
widget=forms.TextInput(attrs={'placeholder': _('website')}))
self.fields['comment'] = forms.CharField(
widget=forms.Textarea(attrs={'placeholder': _('comment')}),
max_length=comments_settings.COMMENT_MAX_LENGTH)
def get_comment_model(self):
return TmpXtdComment
def get_comment_create_data(self):
data = super(JobCommentForm, self).get_comment_create_data()
data.update({'thread_id': 0, 'level': 0, 'order': 1,
'parent_id': self.cleaned_data['reply_to'],
'followup': True})
if comments_settings.COMMENTS_XTD_CONFIRM_EMAIL:
# comment must be verified before getting approved
data['is_public'] = False
return data
|
apache-2.0
|
paweljasinski/ironpython3
|
Src/StdLib/Lib/lib2to3/fixes/fix_raise.py
|
203
|
2926
|
"""Fixer for 'raise E, V, T'
raise -> raise
raise E -> raise E
raise E, V -> raise E(V)
raise E, V, T -> raise E(V).with_traceback(T)
raise E, None, T -> raise E.with_traceback(T)
raise (((E, E'), E''), E'''), V -> raise E(V)
raise "foo", V, T -> warns about string exceptions
CAVEATS:
1) "raise E, V" will be incorrectly translated if V is an exception
instance. The correct Python 3 idiom is
raise E from V
but since we can't detect instance-hood by syntax alone and since
any client code would have to be changed as well, we don't automate
this.
"""
# Author: Collin Winter
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, Attr, ArgList, is_tuple
class FixRaise(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] >
"""
def transform(self, node, results):
syms = self.syms
exc = results["exc"].clone()
if exc.type == token.STRING:
msg = "Python 3 does not support string exceptions"
self.cannot_convert(node, msg)
return
# Python 2 supports
# raise ((((E1, E2), E3), E4), E5), V
# as a synonym for
# raise E1, V
# Since Python 3 will not support this, we recurse down any tuple
# literals, always taking the first element.
if is_tuple(exc):
while is_tuple(exc):
# exc.children[1:-1] is the unparenthesized tuple
# exc.children[1].children[0] is the first element of the tuple
exc = exc.children[1].children[0].clone()
exc.prefix = " "
if "val" not in results:
# One-argument raise
new = pytree.Node(syms.raise_stmt, [Name("raise"), exc])
new.prefix = node.prefix
return new
val = results["val"].clone()
if is_tuple(val):
args = [c.clone() for c in val.children[1:-1]]
else:
val.prefix = ""
args = [val]
if "tb" in results:
tb = results["tb"].clone()
tb.prefix = ""
e = exc
# If there's a traceback and None is passed as the value, then don't
# add a call, since the user probably just wants to add a
# traceback. See issue #9661.
if val.type != token.NAME or val.value != "None":
e = Call(exc, args)
with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])]
new = pytree.Node(syms.simple_stmt, [Name("raise")] + with_tb)
new.prefix = node.prefix
return new
else:
return pytree.Node(syms.raise_stmt,
[Name("raise"), Call(exc, args)],
prefix=node.prefix)
|
apache-2.0
|
freieslabor/laborbot-modules
|
modules/asl.py
|
1
|
3040
|
# coding=utf8
"""
asl.py - Willie Freies Labor Activity Streams Lite Module
Licensed under a Mozilla Public License 2.0.
"""
from willie.module import commands, interval
import urllib2, json, os, pickle
from datetime import datetime, timedelta
ASL_QUERY = '-wiki.*&-sensor.traffic-light&-sensor.mate-o-meter&-twitter.retweet'
def setup(self):
"""Performs startup tasks."""
fn = self.nick + '-' + self.config.host + '.asl.p'
self.asl_filename = os.path.join(self.config.dotdir, fn)
if not os.path.exists(self.asl_filename):
try:
f = open(self.asl_filename, 'wb')
except OSError:
pass
else:
# write current id as last id
lastId = asl(self.config.asl.asl_query)[0]['id']
pickle.dump(lastId, f)
f.close()
def configure(config):
"""
| [asl] | example | purpose |
| ----------| ------------------- | --------------- |
| asl_query | last_id=%d%sensor.* | Sets ASL filter |
"""
if config.option('Configure ASL module', False):
config.add_section('asl')
config.interactive_add('asl', 'asl_query', 'ASL query')
def asl(argStr=''):
"""Returns ASL results matching the filter arg string."""
try:
request = urllib2.Request('http://asl.hickerspace.org/asl.json?%s' \
% argStr)
response = json.load(urllib2.urlopen(request))
return response['results']
except urllib2.URLError as e:
return []
return messages
def getAslUpdates(bot):
"""Checks for new ASL messages and announces them."""
with open(bot.asl_filename) as f:
lastId = pickle.load(f)
query = '%s&%s' % ("last_id=%d" % lastId, bot.config.asl.asl_query)
for item in asl(query):
lastId = item['id'] if item['id'] > lastId else lastId
# ignore updates older than 90 minutes
date = datetime.strptime(item['datetime'], '%Y-%m-%d %H:%M:%S')
if date < datetime.now() - timedelta(minutes=90):
continue
author = 'by %s ' % item['person'] if item['person'] else ''
message = '[%s] %s %s(%s)' % \
(item['service'].title(), item['content'], author, item['url'])
# announce in all channels
for chan in bot.channels:
bot.msg(chan, message)
# change topic if room status changed
if item['service'] == 'sensor' and item['type'] == 'room':
since = date.strftime('%a, %H:%M')
status = "%s since %s" % (item['content'][:-1], since)
bot.msg('ChanServ', 'TOPIC %s Freies Labor ' % chan \
+ 'Hildesheim - %s - https://freieslabor.org' % \
status)
pickle.dump(lastId, open(bot.asl_filename, 'wb'))
@interval(30)
def intervalAsl(bot):
"""Queries ASL updates automatically."""
getAslUpdates(bot)
@commands('events')
def queryEvents(bot, trigger):
"""Returns last 5 ASL updates (https://asl.hickerspace.org)."""
for item in reversed(asl(bot.config.asl.asl_query)[:5]):
author = 'by %s ' % item['person'] if item['person'] else ''
message = '%s: [%s] %s %s(%s)' % (item['datetime'], \
item['service'].title(), item['content'], author, item['url'])
bot.say(message)
class ApiException(Exception):
"""Custom API exception."""
pass
|
mpl-2.0
|
torbjoernk/easybuild-easyblocks
|
easybuild/easyblocks/n/netcdf4_python.py
|
14
|
2883
|
##
# Copyright 2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing netcdf4-python, implemented as an easyblock.
@author: Kenneth Hoste (Ghent University)
"""
import os
import easybuild.tools.environment as env
from easybuild.easyblocks.generic.pythonpackage import PythonPackage
from easybuild.tools.modules import get_software_root
class EB_netcdf4_minus_python(PythonPackage):
"""Support for building and installing netcdf4-python"""
def __init__(self, *args, **kwargs):
"""Custom constructor for netcdf4-python."""
super(EB_netcdf4_minus_python, self).__init__(*args, **kwargs)
self.options['modulename'] = 'netCDF4'
def configure_step(self):
"""
Configure and
Test if python module is loaded
"""
hdf5 = get_software_root('HDF5')
if hdf5:
env.setvar('HDF5_DIR', hdf5)
szip = get_software_root('Szip')
if szip:
env.setvar('SZIP_DIR', szip)
netcdf = get_software_root('netCDF')
if netcdf:
env.setvar('NETCDF4_DIR', netcdf)
super(EB_netcdf4_minus_python, self).configure_step()
def test_step(self):
"""Run netcdf4-python tests."""
self.testinstall = True
cwd = os.getcwd()
self.testcmd = "cd %s/test && python run_all.py && cd %s" % (self.cfg['start_dir'], cwd)
super(EB_netcdf4_minus_python, self).test_step()
def sanity_check_step(self):
"""Custom sanity check for netcdf4-python"""
custom_paths = {
'files': ['bin/nc3tonc4', 'bin/nc4tonc3', 'bin/ncinfo'],
'dirs': [os.path.join(self.pylibdir, x) for x in ['netCDF4.so', 'netCDF4_utils.py', 'netcdftime.py']],
}
return super(EB_netcdf4_minus_python, self).sanity_check_step(custom_paths=custom_paths)
|
gpl-2.0
|
ucloud/uai-sdk
|
uaitrain/api/modify_train_job_memo.py
|
1
|
2173
|
# Copyright 2017 The UAI-SDK Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from uaitrain.api.base_op import BaseUAITrainAPIOp
class ModifyUAITrainJobMemoApiOp(BaseUAITrainAPIOp):
"""
ModifyUAITrainJobMemoAPI
Identical with UAI Train ModifyUAITrainJobMemo API func
Input:
TrainJobId string(required) the id of train job
TrainJobMemo string(required) the memo of train job
Output:
RetCode int API return code: 0: success, others: error code
Action string Action name
Message string Message: error description
"""
ACTION_NAME = "ModifyUAITrainJobMemo"
def __init__(self, pub_key, priv_key, job_id, job_memo, project_id="", region="", zone=""):
super(ModifyUAITrainJobMemoApiOp, self).__init__(self.ACTION_NAME, pub_key, priv_key, project_id, region, zone)
self.cmd_params["TrainJobId"] = job_id
self.cmd_params["TrainJobMemo"] = job_memo
def _check_args(self):
super(ModifyUAITrainJobMemoApiOp, self)._check_args()
if self.cmd_params["TrainJobId"] == "" or type(self.cmd_params["TrainJobId"]) != str:
raise ValueError("TrainJobId should be <str> and should not be nil")
if self.cmd_params["TrainJobMemo"] == "" or type(self.cmd_params["TrainJobMemo"]) != str:
raise ValueError("TrainJobMemo should be <str> and should not be nil")
|
apache-2.0
|
JonathanStein/odoo
|
addons/portal_project_issue/tests/__init__.py
|
260
|
1086
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_access_rights
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
yongtang/tensorflow
|
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/hash_table_v1.py
|
10
|
3322
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/hash_table_v1 | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common_v1
# Verify that the tf.versions attribute exists. It is difficult to enforce
# contents, since the version numbers change over time. The conversion logic
# itself is verified in the common graphdef converter, so here just assert
# it is being invoked.
# CHECK: module
# CHECK-SAME: tf.versions
# CHECK-SAME: bad_consumers
# CHECK-SAME: min_consumer
# CHECK-SAME: producer
# CHECK: "tf_saved_model.global_tensor"()
# CHECK: "tf_saved_model.session_initializer"() {initializers = [@[[init:.*]]]} : () -> ()
# CHECK: func @[[init]]
# CHECK-NEXT: [[R5:%.*]] = "tf.Const"()
# CHECK-NEXT: [[R6:%.*]] = "tf.Const"()
# CHECK-NEXT: [[R7:%.*]] = "tf.HashTableV2"()
# CHECK-SAME: shared_name = "[[hash_table:.*]]"
# CHECK-NEXT: "tf.LookupTableImportV2"([[R7]], [[R5]], [[R6]])
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: [[ARG0:%.*]]: tensor<i32>
# CHECK-SAME: [[ARG1:%.*]]: tensor<!tf.resource
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["key"]
# CHECK-NEXT: [[R0:%.*]] = "tf.Const"()
# CHECK-NEXT: [[R1:%.*]] = "tf.HashTableV2"()
# CHECK-SAME: shared_name = "[[hash_table]]"
# CHECK-NEXT: [[R2:%.*]] = "tf.LookupTableFindV2"([[R1]], [[ARG0]], [[R0]])
# CHECK-NEXT: [[R3:%.*]] = "tf.ReadVariableOp"([[ARG1]])
# CHECK-NEXT: [[R4:%.*]] = "tf.AddV2"([[R2]], [[R3]])
# CHECK-NEXT: return [[R4]]
def Test():
z = tf.compat.v1.get_variable(
name='y',
shape=(),
initializer=tf.random_normal_initializer(),
trainable=True)
table_initializer = tf.lookup.KeyValueTensorInitializer(
keys=[1, 2, 3, 4],
values=[5, 6, 7, 8],
key_dtype=tf.int32,
value_dtype=tf.float32)
table = tf.lookup.StaticHashTable(
table_initializer, default_value=tf.constant(0.0))
x = tf.placeholder(tf.int32, shape=(), name='input')
y = table.lookup(x)
r = tf.add(y, z)
tensor_info_x = tf.compat.v1.saved_model.utils.build_tensor_info(x)
tensor_info_r = tf.compat.v1.saved_model.utils.build_tensor_info(r)
return {
'key': (tf.compat.v1.saved_model.signature_def_utils.build_signature_def(
inputs={'x': tensor_info_x},
outputs={'r': tensor_info_r},
method_name='some_function'))
}, tf.tables_initializer(), None
if __name__ == '__main__':
common_v1.set_tf_options()
common_v1.do_test(Test, canonicalize=True)
|
apache-2.0
|
arseneyr/essentia
|
test/src/unittest/highlevel/test_intensity.py
|
1
|
2586
|
#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestIntensity(TestCase):
def testEmpty(self):
self.assertComputeFails(Intensity(), [])
def testSilence(self):
audio = [0]*(44100*10) # 10 sec silence
self.assertEqual(Intensity()(audio), -1) # silence is relaxing isn't it
def testDiffSampleRates(self):
algo44 = Intensity(sampleRate=44100)
algo22 = Intensity(sampleRate=22050)
filename = join(testdata.audio_dir, 'recorded', 'cat_purrrr.wav')
cat44 = MonoLoader(filename=filename, downmix='left', sampleRate=44100)()
filename = join(testdata.audio_dir, 'recorded', 'cat_purrrr22050.wav')
cat22 = MonoLoader(filename=filename, downmix='left', sampleRate=22050)()
self.assertEqual(algo44(cat44), algo22(cat22))
def testRegression(self):
filename = join(testdata.audio_dir, 'recorded', 'distorted.wav')
audio = MonoLoader(filename=filename, downmix='left', sampleRate=44100)()
distortedIntensity = Intensity()(audio)
filename = join(testdata.audio_dir, 'recorded', 'spaceambient.wav')
audio = MonoLoader(filename=filename, downmix='left', sampleRate=44100)()
ambientIntensity = Intensity()(audio)
filename = join(testdata.audio_dir, 'recorded', 'dubstep.wav')
audio = MonoLoader(filename=filename, downmix='left', sampleRate=44100)()
dubstepIntensity = Intensity()(audio)
self.assertTrue(distortedIntensity > ambientIntensity)
self.assertTrue(distortedIntensity >= dubstepIntensity)
self.assertTrue(dubstepIntensity > ambientIntensity)
suite = allTests(TestIntensity)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
agpl-3.0
|
meghana1995/sympy
|
sympy/series/order.py
|
4
|
15386
|
from __future__ import print_function, division
from sympy.core import S, sympify, Expr, Rational, Symbol, Dummy
from sympy.core import Add, Mul, expand_power_base, expand_log
from sympy.core.cache import cacheit
from sympy.core.compatibility import default_sort_key, is_sequence
from sympy.core.containers import Tuple
from sympy.utilities.iterables import uniq
class Order(Expr):
r""" Represents the limiting behavior of some function
The order of a function characterizes the function based on the limiting
behavior of the function as it goes to some limit. Only taking the limit
point to be a number is currently supported. This is expressed in
big O notation [1]_.
The formal definition for the order of a function `g(x)` about a point `a`
is such that `g(x) = O(f(x))` as `x \rightarrow a` if and only if for any
`\delta > 0` there exists a `M > 0` such that `|g(x)| \leq M|f(x)|` for
`|x-a| < \delta`. This is equivalent to `\lim_{x \rightarrow a}
\sup |g(x)/f(x)| < \infty`.
Let's illustrate it on the following example by taking the expansion of
`\sin(x)` about 0:
.. math ::
\sin(x) = x - x^3/3! + O(x^5)
where in this case `O(x^5) = x^5/5! - x^7/7! + \cdots`. By the definition
of `O`, for any `\delta > 0` there is an `M` such that:
.. math ::
|x^5/5! - x^7/7! + ....| <= M|x^5| \text{ for } |x| < \delta
or by the alternate definition:
.. math ::
\lim_{x \rightarrow 0} | (x^5/5! - x^7/7! + ....) / x^5| < \infty
which surely is true, because
.. math ::
\lim_{x \rightarrow 0} | (x^5/5! - x^7/7! + ....) / x^5| = 1/5!
As it is usually used, the order of a function can be intuitively thought
of representing all terms of powers greater than the one specified. For
example, `O(x^3)` corresponds to any terms proportional to `x^3,
x^4,\ldots` and any higher power. For a polynomial, this leaves terms
proportional to `x^2`, `x` and constants.
Examples
========
>>> from sympy import O, oo, cos, pi
>>> from sympy.abc import x, y
>>> O(x + x**2)
O(x)
>>> O(x + x**2, (x, 0))
O(x)
>>> O(x + x**2, (x, oo))
O(x**2, (x, oo))
>>> O(1 + x*y)
O(1, x, y)
>>> O(1 + x*y, (x, 0), (y, 0))
O(1, x, y)
>>> O(1 + x*y, (x, oo), (y, oo))
O(x*y, (x, oo), (y, oo))
>>> O(1) in O(1, x)
True
>>> O(1, x) in O(1)
False
>>> O(x) in O(1, x)
True
>>> O(x**2) in O(x)
True
>>> O(x)*x
O(x**2)
>>> O(x) - O(x)
O(x)
>>> O(cos(x))
O(1)
>>> O(cos(x), (x, pi/2))
O(x - pi/2, (x, pi/2))
References
==========
.. [1] `Big O notation <http://en.wikipedia.org/wiki/Big_O_notation>`_
Notes
=====
In ``O(f(x), x)`` the expression ``f(x)`` is assumed to have a leading
term. ``O(f(x), x)`` is automatically transformed to
``O(f(x).as_leading_term(x),x)``.
``O(expr*f(x), x)`` is ``O(f(x), x)``
``O(expr, x)`` is ``O(1)``
``O(0, x)`` is 0.
Multivariate O is also supported:
``O(f(x, y), x, y)`` is transformed to
``O(f(x, y).as_leading_term(x,y).as_leading_term(y), x, y)``
In the multivariate case, it is assumed the limits w.r.t. the various
symbols commute.
If no symbols are passed then all symbols in the expression are used
and the limit point is assumed to be zero.
"""
is_Order = True
__slots__ = []
@cacheit
def __new__(cls, expr, *args, **kwargs):
expr = sympify(expr)
if not args:
if expr.is_Order:
variables = expr.variables
point = expr.point
else:
variables = list(expr.free_symbols)
point = [S.Zero]*len(variables)
else:
args = list(args if is_sequence(args) else [args])
variables, point = [], []
if is_sequence(args[0]):
for a in args:
v, p = list(map(sympify, a))
variables.append(v)
point.append(p)
else:
variables = list(map(sympify, args))
point = [S.Zero]*len(variables)
if not all(isinstance(v, Symbol) for v in variables):
raise TypeError('Variables are not symbols, got %s' % variables)
if len(list(uniq(variables))) != len(variables):
raise ValueError('Variables are supposed to be unique symbols, got %s' % variables)
if expr.is_Order:
expr_vp = dict(expr.args[1:])
new_vp = dict(expr_vp)
vp = dict(zip(variables, point))
for v, p in vp.items():
if v in new_vp.keys():
if p != new_vp[v]:
raise NotImplementedError(
"Mixing Order at different points is not supported.")
else:
new_vp[v] = p
if set(expr_vp.keys()) == set(new_vp.keys()):
return expr
else:
variables = list(new_vp.keys())
point = [new_vp[v] for v in variables]
if expr is S.NaN:
return S.NaN
if any(x in p.free_symbols for x in variables for p in point):
raise ValueError('Got %s as a point.' % point)
if variables:
if any(p != point[0] for p in point):
raise NotImplementedError
if point[0] is S.Infinity:
s = dict([(k, 1/Dummy()) for k in variables])
rs = dict([(1/v, 1/k) for k, v in s.items()])
elif point[0] is not S.Zero:
s = dict((k, Dummy() + point[0]) for k in variables)
rs = dict((v - point[0], k - point[0]) for k, v in s.items())
else:
s = ()
rs = ()
expr = expr.subs(s)
if expr.is_Add:
from sympy import expand_multinomial
expr = expand_multinomial(expr)
if s:
args = tuple([r[0] for r in rs.items()])
else:
args = tuple(variables)
if len(variables) > 1:
# XXX: better way? We need this expand() to
# workaround e.g: expr = x*(x + y).
# (x*(x + y)).as_leading_term(x, y) currently returns
# x*y (wrong order term!). That's why we want to deal with
# expand()'ed expr (handled in "if expr.is_Add" branch below).
expr = expr.expand()
if expr.is_Add:
lst = expr.extract_leading_order(args)
expr = Add(*[f.expr for (e, f) in lst])
elif expr:
expr = expr.as_leading_term(*args)
expr = expr.as_independent(*args, as_Add=False)[1]
expr = expand_power_base(expr)
expr = expand_log(expr)
if len(args) == 1:
# The definition of O(f(x)) symbol explicitly stated that
# the argument of f(x) is irrelevant. That's why we can
# combine some power exponents (only "on top" of the
# expression tree for f(x)), e.g.:
# x**p * (-x)**q -> x**(p+q) for real p, q.
x = args[0]
margs = list(Mul.make_args(
expr.as_independent(x, as_Add=False)[1]))
for i, t in enumerate(margs):
if t.is_Pow:
b, q = t.args
if b in (x, -x) and q.is_real and not q.has(x):
margs[i] = x**q
elif b.is_Pow and not b.exp.has(x):
b, r = b.args
if b in (x, -x) and r.is_real:
margs[i] = x**(r*q)
elif b.is_Mul and b.args[0] is S.NegativeOne:
b = -b
if b.is_Pow and not b.exp.has(x):
b, r = b.args
if b in (x, -x) and r.is_real:
margs[i] = x**(r*q)
expr = Mul(*margs)
expr = expr.subs(rs)
if expr is S.Zero:
return expr
if expr.is_Order:
expr = expr.expr
if not expr.has(*variables):
expr = S.One
# create Order instance:
vp = dict(zip(variables, point))
variables.sort(key=default_sort_key)
point = [vp[v] for v in variables]
args = (expr,) + Tuple(*zip(variables, point))
obj = Expr.__new__(cls, *args)
return obj
def _eval_nseries(self, x, n, logx):
return self
@property
def expr(self):
return self.args[0]
@property
def variables(self):
if self.args[1:]:
return tuple(x[0] for x in self.args[1:])
else:
return ()
@property
def point(self):
if self.args[1:]:
return tuple(x[1] for x in self.args[1:])
else:
return ()
@property
def free_symbols(self):
return self.expr.free_symbols | set(self.variables)
def _eval_power(b, e):
if e.is_Number and e.is_nonnegative:
return b.func(b.expr ** e, *b.args[1:])
return
def as_expr_variables(self, order_symbols):
if order_symbols is None:
order_symbols = self.args[1:]
else:
if not all(o[1] == order_symbols[0][1] for o in order_symbols) and \
not all(p == self.point[0] for p in self.point):
raise NotImplementedError('Order at points other than 0 '
'or oo not supported, got %s as a point.' % point)
if order_symbols[0][1] != self.point[0]:
raise NotImplementedError(
"Multiplying Order at different points is not supported.")
order_symbols = dict(order_symbols)
for s, p in dict(self.args[1:]).items():
if s not in order_symbols.keys():
order_symbols[s] = p
order_symbols = sorted(order_symbols.items(), key=lambda x: default_sort_key(x[0]))
return self.expr, tuple(order_symbols)
def removeO(self):
return S.Zero
def getO(self):
return self
@cacheit
def contains(self, expr):
"""
Return True if expr belongs to Order(self.expr, \*self.variables).
Return False if self belongs to expr.
Return None if the inclusion relation cannot be determined
(e.g. when self and expr have different symbols).
"""
from sympy import powsimp
if expr is S.Zero:
return True
if expr is S.NaN:
return False
if expr.is_Order:
if not all(p == expr.point[0] for p in expr.point) and \
not all(p == self.point[0] for p in self.point):
raise NotImplementedError('Order at points other than 0 '
'or oo not supported, got %s as a point.' % point)
else:
# self and/or expr is O(1):
if any(not p for p in [expr.point, self.point]):
point = self.point + expr.point
if point:
point = point[0]
else:
point = S.Zero
else:
point = self.point[0]
if expr.expr == self.expr:
# O(1) + O(1), O(1) + O(1, x), etc.
return all([x in self.args[1:] for x in expr.args[1:]])
if expr.expr.is_Add:
return all([self.contains(x) for x in expr.expr.args])
if self.expr.is_Add:
return any([self.func(x, *self.args[1:]).contains(expr)
for x in self.expr.args])
if self.variables and expr.variables:
common_symbols = tuple(
[s for s in self.variables if s in expr.variables])
elif self.variables:
common_symbols = self.variables
else:
common_symbols = expr.variables
if not common_symbols:
return None
r = None
ratio = self.expr/expr.expr
ratio = powsimp(ratio, deep=True, combine='exp')
for s in common_symbols:
l = ratio.limit(s, point)
from sympy.series.limits import Limit
if not isinstance(l, Limit):
l = l != 0
else:
l = None
if r is None:
r = l
else:
if r != l:
return
return r
obj = self.func(expr, *self.args[1:])
return self.contains(obj)
def __contains__(self, other):
result = self.contains(other)
if result is None:
raise TypeError('contains did not evaluate to a bool')
return result
def _eval_subs(self, old, new):
if old in self.variables:
newexpr = self.expr.subs(old, new)
i = self.variables.index(old)
newvars = list(self.variables)
newpt = list(self.point)
if new.is_Symbol:
newvars[i] = new
else:
syms = new.free_symbols
if len(syms) == 1 or old in syms:
if old in syms:
var = self.variables[i]
else:
var = syms.pop()
# First, try to substitute self.point in the "new"
# expr to see if this is a fixed point.
# E.g. O(y).subs(y, sin(x))
point = new.subs(var, self.point[i])
if point != self.point[i]:
from sympy.solvers import solve
d = Dummy()
res = solve(old - new.subs(var, d), d, dict=True)
point = d.subs(res[0]).limit(old, self.point[i])
newvars[i] = var
newpt[i] = point
elif old not in syms:
del newvars[i], newpt[i]
if not syms and new == self.point[i]:
newvars.extend(syms)
newpt.extend([S.Zero]*len(syms))
else:
return
return Order(newexpr, *zip(newvars, newpt))
def _eval_conjugate(self):
expr = self.expr._eval_conjugate()
if expr is not None:
return self.func(expr, *self.args[1:])
def _eval_derivative(self, x):
return self.func(self.expr.diff(x), *self.args[1:]) or self
def _eval_transpose(self):
expr = self.expr._eval_transpose()
if expr is not None:
return self.func(expr, *self.args[1:])
def _sage_(self):
#XXX: SAGE doesn't have Order yet. Let's return 0 instead.
return Rational(0)._sage_()
O = Order
|
bsd-3-clause
|
gurinderhans/pysnap
|
pysnap/__init__.py
|
3
|
12642
|
#!/usr/bin/env python
import json
import os.path
from time import time
from pysnap.utils import (encrypt, decrypt, decrypt_story,
make_media_id, request)
MEDIA_IMAGE = 0
MEDIA_VIDEO = 1
MEDIA_VIDEO_NOAUDIO = 2
FRIEND_CONFIRMED = 0
FRIEND_UNCONFIRMED = 1
FRIEND_BLOCKED = 2
PRIVACY_EVERYONE = 0
PRIVACY_FRIENDS = 1
def is_video(data):
return len(data) > 1 and data[0:2] == b'\x00\x00'
def is_image(data):
return len(data) > 1 and data[0:2] == b'\xFF\xD8'
def is_zip(data):
return len(data) > 1 and data[0:2] == b'PK'
def get_file_extension(media_type):
if media_type in (MEDIA_VIDEO, MEDIA_VIDEO_NOAUDIO):
return 'mp4'
if media_type == MEDIA_IMAGE:
return 'jpg'
return ''
def get_media_type(data):
if is_video(data):
return MEDIA_VIDEO
if is_image(data):
return MEDIA_IMAGE
return None
def _map_keys(snap):
return {
u'id': snap.get('id', None),
u'media_id': snap.get('c_id', None),
u'media_type': snap.get('m', None),
u'time': snap.get('t', None),
u'sender': snap.get('sn', None),
u'recipient': snap.get('rp', None),
u'status': snap.get('st', None),
u'screenshot_count': snap.get('c', None),
u'sent': snap.get('sts', None),
u'opened': snap.get('ts', None)
}
class Snapchat(object):
"""Construct a :class:`Snapchat` object used for communicating
with the Snapchat API.
Usage:
from pysnap import Snapchat
snapchat = Snapchat()
snapchat.login('username', 'password')
...
"""
def __init__(self):
self.username = None
self.auth_token = None
def _request(self, endpoint, data=None, files=None,
raise_for_status=True, req_type='post'):
return request(endpoint, self.auth_token, data, files,
raise_for_status, req_type)
def _unset_auth(self):
self.username = None
self.auth_token = None
def login(self, username, password):
"""Login to Snapchat account
Returns a dict containing user information on successful login, the
data returned is similar to get_updates.
:param username Snapchat username
:param password Snapchat password
"""
self._unset_auth()
r = self._request('login', {
'username': username,
'password': password
})
result = r.json()
if 'updates_response' in result:
if 'auth_token' in result['updates_response']:
self.auth_token = result['updates_response']['auth_token']
if 'username' in result['updates_response']:
self.username = username
if self.username is None and self.auth_token is None:
raise Exception(result.get('message', 'unknown error'))
return result
def logout(self):
"""Logout of Snapchat account
Returns true if logout was successful.
"""
r = self._request('logout', {'username': self.username})
return len(r.content) == 0
def get_updates(self, update_timestamp=0):
"""Get user, friend and snap updates
Returns a dict containing user, friends and snap information.
:param update_timestamp: Optional timestamp (epoch in seconds) to limit
updates
"""
r = self._request('updates', {
'username': self.username,
'update_timestamp': update_timestamp
})
result = r.json()
if 'auth_token' in result:
self.auth_token = result['auth_token']
return result
def get_snaps(self, update_timestamp=0):
"""Get snaps
Returns a dict containing metadata for snaps
:param update_timestamp: Optional timestamp (epoch in seconds) to limit
updates
"""
updates = self.get_updates(update_timestamp)
# Filter out snaps containing c_id as these are sent snaps
return [_map_keys(snap) for snap in updates['snaps']
if 'c_id' not in snap]
def get_friend_stories(self, update_timestamp=0):
"""Get stories
Returns a dict containing metadata for stories
:param update_timestamp: Optional timestamp (epoch in seconds) to limit
updates
"""
r = self._request("all_updates", {
'username': self.username,
'update_timestamp': update_timestamp
})
result = r.json()
if 'auth_token' in result:
self.auth_token = result['auth_token']
stories = []
story_groups = result['stories_response']['friend_stories']
for group in story_groups:
sender = group['username']
for story in group['stories']:
obj = story['story']
obj['sender'] = sender
stories.append(obj)
return stories
def get_story_blob(self, story_id, story_key, story_iv):
"""Get the image or video of a given snap
Returns the decrypted image or a video of the given snap or None if
data is invalid.
:param story_id: Media id to fetch
:param story_key: Encryption key of the story
:param story_iv: Encryption IV of the story
"""
r = self._request('story_blob', {'story_id': story_id},
raise_for_status=False, req_type='get')
data = decrypt_story(r.content, story_key, story_iv)
if any((is_image(data), is_video(data), is_zip(data))):
return data
return None
def get_blob(self, snap_id):
"""Get the image or video of a given snap
Returns the decrypted image or a video of the given snap or None if
data is invalid.
:param snap_id: Snap id to fetch
"""
r = self._request('blob', {'username': self.username, 'id': snap_id},
raise_for_status=False)
data = decrypt(r.content)
if any((is_image(data), is_video(data), is_zip(data))):
return data
return None
def send_events(self, events, data=None):
"""Send event data
Returns true on success.
:param events: List of events to send
:param data: Additional data to send
"""
if data is None:
data = {}
r = self._request('update_snaps', {
'username': self.username,
'events': json.dumps(events),
'json': json.dumps(data)
})
return len(r.content) == 0
def mark_viewed(self, snap_id, view_duration=1):
"""Mark a snap as viewed
Returns true on success.
:param snap_id: Snap id to mark as viewed
:param view_duration: Number of seconds snap was viewed
"""
now = time()
data = {snap_id: {u't': now, u'sv': view_duration}}
events = [
{
u'eventName': u'SNAP_VIEW', u'params': {u'id': snap_id},
u'ts': int(round(now)) - view_duration
},
{
u'eventName': u'SNAP_EXPIRED', u'params': {u'id': snap_id},
u'ts': int(round(now))
}
]
return self.send_events(events, data)
def mark_screenshot(self, snap_id, view_duration=1):
"""Mark a snap as screenshotted
Returns true on success.
:param snap_id: Snap id to mark as viewed
:param view_duration: Number of seconds snap was viewed
"""
now = time()
data = {snap_id: {u't': now, u'sv': view_duration, u'c': 3}}
events = [
{
u'eventName': u'SNAP_SCREENSHOT', u'params': {u'id': snap_id},
u'ts': int(round(now)) - view_duration
}
]
return self.send_events(events, data)
def update_privacy(self, friends_only):
"""Set privacy settings
Returns true on success.
:param friends_only: True to allow snaps from friends only
"""
setting = lambda f: PRIVACY_FRIENDS if f else PRIVACY_EVERYONE
r = self._request('settings', {
'username': self.username,
'action': 'updatePrivacy',
'privacySetting': setting(friends_only)
})
return r.json().get('param') == str(setting(friends_only))
def get_friends(self):
"""Get friends
Returns a list of friends.
"""
return self.get_updates().get('friends', [])
def get_best_friends(self):
"""Get best friends
Returns a list of best friends.
"""
return self.get_updates().get('bests', [])
def add_friend(self, username):
"""Add user as friend
Returns JSON response.
Expected messages:
Success: '{username} is now your friend!'
Pending: '{username} is private. Friend request sent.'
Failure: 'Sorry! Couldn't find {username}'
:param username: Username to add as a friend
"""
r = self._request('friend', {
'action': 'add',
'friend': username,
'username': self.username
})
return r.json()
def delete_friend(self, username):
"""Remove user from friends
Returns true on success.
:param username: Username to remove from friends
"""
r = self._request('friend', {
'action': 'delete',
'friend': username,
'username': self.username
})
return r.json()['updates_response'].get('logged')
def block(self, username):
"""Block a user
Returns true on success.
:param username: Username to block
"""
r = self._request('friend', {
'action': 'block',
'friend': username,
'username': self.username
})
return r.json().get('message') == '{0} was blocked'.format(username)
def unblock(self, username):
"""Unblock a user
Returns true on success.
:param username: Username to unblock
"""
r = self._request('friend', {
'action': 'unblock',
'friend': username,
'username': self.username
})
return r.json().get('message') == '{0} was unblocked'.format(username)
def get_blocked(self):
"""Find blocked users
Returns a list of currently blocked users.
"""
return [f for f in self.get_friends() if f['type'] == FRIEND_BLOCKED]
def upload(self, path):
"""Upload media
Returns the media ID on success. The media ID is used when sending
the snap.
"""
if not os.path.exists(path):
raise ValueError('No such file: {0}'.format(path))
with open(path, 'rb') as f:
data = f.read()
media_type = get_media_type(data)
if media_type is None:
raise ValueError('Could not determine media type for given data')
media_id = make_media_id(self.username)
r = self._request('upload', {
'username': self.username,
'media_id': media_id,
'type': media_type
}, files={'data': encrypt(data)})
return media_id if len(r.content) == 0 else None
def send(self, media_id, recipients, time=5):
"""Send a snap. Requires a media_id returned by the upload method
Returns true if the snap was sent successfully
"""
r = self._request('send', {
'username': self.username,
'media_id': media_id,
'recipient': recipients,
'time': time,
'zipped': '0'
})
return len(r.content) == 0
def send_to_story(self, media_id, time=5, media_type=0):
"""Send a snap to your story. Requires a media_id returned by the upload method
Returns true if the snap was sent successfully.
"""
r = self._request('post_story', {
'username': self.username,
'media_id': media_id,
'client_id': media_id,
'time': time,
'type': media_type,
'zipped': '0'
})
return r.json()
def clear_feed(self):
"""Clear the user's feed
Returns true if feed was successfully cleared.
"""
r = self._request('clear', {
'username': self.username
})
return len(r.content) == 0
|
mit
|
grap/OCB
|
addons/account_budget/report/analytic_account_budget_report.py
|
53
|
7537
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import datetime
from openerp import pooler
from openerp.report import report_sxw
class analytic_account_budget_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(analytic_account_budget_report, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'funct': self.funct,
'funct_total': self.funct_total,
'time': time,
})
self.context = context
def funct(self, object, form, ids=None, done=None, level=1):
if ids is None:
ids = {}
if not ids:
ids = self.ids
if not done:
done = {}
global tot
tot = {
'theo':0.00,
'pln':0.00,
'prac':0.00,
'perc':0.00
}
result = []
accounts = self.pool.get('account.analytic.account').browse(self.cr, self.uid, [object.id], self.context.copy())
c_b_lines_obj = self.pool.get('crossovered.budget.lines')
obj_c_budget = self.pool.get('crossovered.budget')
for account_id in accounts:
res = {}
b_line_ids = []
for line in account_id.crossovered_budget_line:
b_line_ids.append(line.id)
if not b_line_ids:
return []
d_from = form['date_from']
d_to = form['date_to']
self.cr.execute('SELECT DISTINCT(crossovered_budget_id) FROM crossovered_budget_lines WHERE id =ANY(%s)',(b_line_ids,))
budget_ids = self.cr.fetchall()
context = {'wizard_date_from':d_from,'wizard_date_to':d_to}
for i in range(0, len(budget_ids)):
budget_name = obj_c_budget.browse(self.cr, self.uid, [budget_ids[i][0]])
res= {
'b_id':'-1',
'a_id':'-1',
'name':budget_name[0].name,
'status':1,
'theo':0.00,
'pln':0.00,
'prac':0.00,
'perc':0.00
}
result.append(res)
line_ids = c_b_lines_obj.search(self.cr, self.uid, [('id', 'in', b_line_ids), ('crossovered_budget_id','=',budget_ids[i][0])])
line_id = c_b_lines_obj.browse(self.cr, self.uid, line_ids)
tot_theo = tot_pln = tot_prac = tot_perc = 0
done_budget = []
for line in line_id:
if line.id in b_line_ids:
theo = pract = 0.00
theo = c_b_lines_obj._theo_amt(self.cr, self.uid, [line.id], context)[line.id]
pract = c_b_lines_obj._prac_amt(self.cr, self.uid, [line.id], context)[line.id]
if line.general_budget_id.id in done_budget:
for record in result:
if record['b_id'] == line.general_budget_id.id and record['a_id'] == line.analytic_account_id.id:
record['theo'] += theo
record['pln'] += line.planned_amount
record['prac'] += pract
record['perc'] += line.percentage
tot_theo += theo
tot_pln += line.planned_amount
tot_prac += pract
tot_perc += line.percentage
else:
res1 = {
'b_id': line.general_budget_id.id,
'a_id': line.analytic_account_id.id,
'name': line.general_budget_id.name,
'status': 2,
'theo': theo,
'pln': line.planned_amount,
'prac': pract,
'perc': line.percentage
}
tot_theo += theo
tot_pln += line.planned_amount
tot_prac += pract
tot_perc += line.percentage
result.append(res1)
done_budget.append(line.general_budget_id.id)
else:
if line.general_budget_id.id in done_budget:
continue
else:
res1={
'b_id': line.general_budget_id.id,
'a_id': line.analytic_account_id.id,
'name': line.general_budget_id.name,
'status': 2,
'theo': 0.00,
'pln': 0.00,
'prac': 0.00,
'perc': 0.00
}
result.append(res1)
done_budget.append(line.general_budget_id.id)
if tot_theo == 0.00:
tot_perc = 0.00
else:
tot_perc = float(tot_prac / tot_theo) * 100
result[-(len(done_budget) +1)]['theo'] = tot_theo
tot['theo'] +=tot_theo
result[-(len(done_budget) +1)]['pln'] = tot_pln
tot['pln'] +=tot_pln
result[-(len(done_budget) +1)]['prac'] = tot_prac
tot['prac'] +=tot_prac
result[-(len(done_budget) +1)]['perc'] = tot_perc
if tot['theo'] == 0.00:
tot['perc'] = 0.00
else:
tot['perc'] = float(tot['prac'] / tot['theo']) * 100
return result
def funct_total(self, form):
result = []
res = {}
res = {
'tot_theo': tot['theo'],
'tot_pln': tot['pln'],
'tot_prac': tot['prac'],
'tot_perc': tot['perc']
}
result.append(res)
return result
report_sxw.report_sxw('report.account.analytic.account.budget', 'account.analytic.account', 'addons/account_budget/report/analytic_account_budget_report.rml',parser=analytic_account_budget_report,header='internal')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
KaranToor/MA450
|
google-cloud-sdk/lib/surface/test/android/devices/__init__.py
|
4
|
1372
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The 'gcloud test android devices' command group."""
from googlecloudsdk.calliope import base
class Devices(base.Group):
"""Explore Android devices available in the Test Environment catalog."""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To list all Android devices available for running tests, along with
their basic characteristics and supported Android OS versions, run:
$ {command} list
""",
}
@staticmethod
def Args(parser):
"""Method called by Calliope to register flags common to this sub-group.
Args:
parser: An argparse parser used to add arguments that immediately follow
this group in the CLI. Positional arguments are allowed.
"""
pass
|
apache-2.0
|
CydarLtd/ansible
|
lib/ansible/modules/cloud/amazon/ecs_service_facts.py
|
50
|
9143
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ecs_service_facts
short_description: list or describe services in ecs
notes:
- for details of the parameters and returns see U(http://boto3.readthedocs.org/en/latest/reference/services/ecs.html)
description:
- Lists or describes services in ecs.
version_added: "2.1"
author:
- "Mark Chance (@java1guy)"
- "Darek Kaczynski (@kaczynskid)"
requirements: [ json, boto, botocore, boto3 ]
options:
details:
description:
- Set this to true if you want detailed information about the services.
required: false
default: 'false'
choices: ['true', 'false']
cluster:
description:
- The cluster ARNS in which to list the services.
required: false
default: 'default'
service:
description:
- The service to get details for (required if details is true)
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Basic listing example
- ecs_service_facts:
cluster: test-cluster
service: console-test-service
details: true
# Basic listing example
- ecs_service_facts:
cluster: test-cluster
'''
RETURN = '''
services:
description: When details is false, returns an array of service ARNs, otherwise an array of complex objects as described below.
returned: success
type: complex
contains:
clusterArn:
description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
returned: always
type: string
desiredCount:
description: The desired number of instantiations of the task definition to keep running on the service.
returned: always
type: int
loadBalancers:
description: A list of load balancer objects
returned: always
type: complex
contains:
loadBalancerName:
description: the name
returned: always
type: string
containerName:
description: The name of the container to associate with the load balancer.
returned: always
type: string
containerPort:
description: The port on the container to associate with the load balancer.
returned: always
type: int
pendingCount:
description: The number of tasks in the cluster that are in the PENDING state.
returned: always
type: int
runningCount:
description: The number of tasks in the cluster that are in the RUNNING state.
returned: always
type: int
serviceArn:
description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region :012345678910 :service/my-service .
returned: always
type: string
serviceName:
description: A user-generated string used to identify the service
returned: always
type: string
status:
description: The valid values are ACTIVE, DRAINING, or INACTIVE.
returned: always
type: string
taskDefinition:
description: The ARN of a task definition to use for tasks in the service.
returned: always
type: string
deployments:
description: list of service deployments
returned: always
type: list of complex
events:
description: lost of service events
returned: always
type: list of complex
''' # NOQA
try:
import boto
import botocore
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
class EcsServiceManager:
"""Handles ECS Services"""
def __init__(self, module):
self.module = module
try:
# self.ecs = boto3.client('ecs')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
self.module.fail_json(msg="Can't authorize connection - %s" % str(e))
# def list_clusters(self):
# return self.client.list_clusters()
# {'failures': [],
# 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': 'ce7b5880-1c41-11e5-8a31-47a93a8a98eb'},
# 'clusters': [{'activeServicesCount': 0, 'clusterArn': 'arn:aws:ecs:us-west-2:777110527155:cluster/default',
# 'status': 'ACTIVE', 'pendingTasksCount': 0, 'runningTasksCount': 0, 'registeredContainerInstancesCount': 0, 'clusterName': 'default'}]}
# {'failures': [{'arn': 'arn:aws:ecs:us-west-2:777110527155:cluster/bogus', 'reason': 'MISSING'}],
# 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': '0f66c219-1c42-11e5-8a31-47a93a8a98eb'},
# 'clusters': []}
def list_services(self, cluster):
fn_args = dict()
if cluster and cluster is not None:
fn_args['cluster'] = cluster
response = self.ecs.list_services(**fn_args)
relevant_response = dict(services = response['serviceArns'])
return relevant_response
def describe_services(self, cluster, services):
fn_args = dict()
if cluster and cluster is not None:
fn_args['cluster'] = cluster
fn_args['services']=services.split(",")
response = self.ecs.describe_services(**fn_args)
relevant_response = dict(services = map(self.extract_service_from, response['services']))
if 'failures' in response and len(response['failures'])>0:
relevant_response['services_not_running'] = response['failures']
return relevant_response
def extract_service_from(self, service):
# some fields are datetime which is not JSON serializable
# make them strings
if 'deployments' in service:
for d in service['deployments']:
if 'createdAt' in d:
d['createdAt'] = str(d['createdAt'])
if 'updatedAt' in d:
d['updatedAt'] = str(d['updatedAt'])
if 'events' in service:
for e in service['events']:
if 'createdAt' in e:
e['createdAt'] = str(e['createdAt'])
return service
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
details=dict(required=False, type='bool', default=False ),
cluster=dict(required=False, type='str' ),
service=dict(required=False, type='str' )
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto is required.')
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
show_details = module.params.get('details', False)
task_mgr = EcsServiceManager(module)
if show_details:
if 'service' not in module.params or not module.params['service']:
module.fail_json(msg="service must be specified for ecs_service_facts")
ecs_facts = task_mgr.describe_services(module.params['cluster'], module.params['service'])
else:
ecs_facts = task_mgr.list_services(module.params['cluster'])
ecs_facts_result = dict(changed=False, ansible_facts=ecs_facts)
module.exit_json(**ecs_facts_result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
tealover/nova
|
nova/tests/unit/objects/test_agent.py
|
53
|
3762
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import exception
from nova.objects import agent as agent_obj
from nova.tests.unit.objects import test_objects
fake_agent = {
'id': 1,
'hypervisor': 'novavm',
'os': 'linux',
'architecture': 'DISC',
'version': '1.0',
'url': 'http://openstack.org/novavm/agents/novavm_agent_v1.0.rpm',
'md5hash': '8cb151f3adc23a92db8ddbe084796823',
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
}
class _TestAgent(object):
@staticmethod
def _compare(test, db, obj):
for field, value in db.items():
test.assertEqual(db[field], obj[field])
@mock.patch('nova.db.agent_build_get_by_triple')
def test_get_by_triple(self, mock_get):
mock_get.return_value = fake_agent
agent = agent_obj.Agent.get_by_triple(self.context,
'novavm', 'linux', 'DISC')
self._compare(self, fake_agent, agent)
@mock.patch('nova.db.agent_build_get_by_triple')
def test_get_by_triple_none(self, mock_get):
mock_get.return_value = None
agent = agent_obj.Agent.get_by_triple(self.context,
'novavm', 'linux', 'DISC')
self.assertIsNone(agent)
@mock.patch('nova.db.agent_build_create')
def test_create(self, mock_create):
mock_create.return_value = fake_agent
agent = agent_obj.Agent(context=self.context)
agent.hypervisor = 'novavm'
agent.create()
mock_create.assert_called_once_with(self.context,
{'hypervisor': 'novavm'})
self._compare(self, fake_agent, agent)
@mock.patch('nova.db.agent_build_create')
def test_create_with_id(self, mock_create):
agent = agent_obj.Agent(context=self.context, id=123)
self.assertRaises(exception.ObjectActionError, agent.create)
self.assertFalse(mock_create.called)
@mock.patch('nova.db.agent_build_destroy')
def test_destroy(self, mock_destroy):
agent = agent_obj.Agent(context=self.context, id=123)
agent.destroy()
mock_destroy.assert_called_once_with(self.context, 123)
@mock.patch('nova.db.agent_build_update')
def test_save(self, mock_update):
mock_update.return_value = fake_agent
agent = agent_obj.Agent(context=self.context, id=123)
agent.obj_reset_changes()
agent.hypervisor = 'novavm'
agent.save()
mock_update.assert_called_once_with(self.context, 123,
{'hypervisor': 'novavm'})
@mock.patch('nova.db.agent_build_get_all')
def test_get_all(self, mock_get_all):
mock_get_all.return_value = [fake_agent]
agents = agent_obj.AgentList.get_all(self.context, hypervisor='novavm')
self.assertEqual(1, len(agents))
self._compare(self, fake_agent, agents[0])
mock_get_all.assert_called_once_with(self.context, hypervisor='novavm')
class TestAgent(test_objects._LocalTest, _TestAgent):
pass
class TestAgentRemote(test_objects._RemoteTest, _TestAgent):
pass
|
apache-2.0
|
googlefonts/TachyFont
|
run_time/src/gae_server/third_party/old-fonttools-master/Lib/fontTools/ttLib/tables/C_O_L_R_.py
|
11
|
5313
|
# Copyright 2013 Google, Inc. All Rights Reserved.
#
# Google Author(s): Behdad Esfahbod
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.misc.textTools import safeEval
from . import DefaultTable
import operator
import struct
class table_C_O_L_R_(DefaultTable.DefaultTable):
""" This table is structured so that you can treat it like a dictionary keyed by glyph name.
ttFont['COLR'][<glyphName>] will return the color layers for any glyph
ttFont['COLR'][<glyphName>] = <value> will set the color layers for any glyph.
"""
def decompile(self, data, ttFont):
self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID
self.version, numBaseGlyphRecords, offsetBaseGlyphRecord, offsetLayerRecord, numLayerRecords = struct.unpack(">HHLLH", data[:14])
assert (self.version == 0), "Version of COLR table is higher than I know how to handle"
glyphOrder = ttFont.getGlyphOrder()
gids = []
layerLists = []
glyphPos = offsetBaseGlyphRecord
for i in range(numBaseGlyphRecords):
gid, firstLayerIndex, numLayers = struct.unpack(">HHH", data[glyphPos:glyphPos+6])
glyphPos += 6
gids.append(gid)
assert (firstLayerIndex + numLayers <= numLayerRecords)
layerPos = offsetLayerRecord + firstLayerIndex * 4
layers = []
for j in range(numLayers):
layerGid, colorID = struct.unpack(">HH", data[layerPos:layerPos+4])
try:
layerName = glyphOrder[layerGid]
except IndexError:
layerName = self.getGlyphName(layerGid)
layerPos += 4
layers.append(LayerRecord(layerName, colorID))
layerLists.append(layers)
self.ColorLayers = colorLayerLists = {}
try:
names = list(map(operator.getitem, [glyphOrder]*numBaseGlyphRecords, gids))
except IndexError:
getGlyphName = self.getGlyphName
names = list(map(getGlyphName, gids ))
list(map(operator.setitem, [colorLayerLists]*numBaseGlyphRecords, names, layerLists))
def compile(self, ttFont):
ordered = []
ttFont.getReverseGlyphMap(rebuild=True)
glyphNames = self.ColorLayers.keys()
for glyphName in glyphNames:
try:
gid = ttFont.getGlyphID(glyphName)
except:
assert 0, "COLR table contains a glyph name not in ttFont.getGlyphNames(): " + str(glyphName)
ordered.append([gid, glyphName, self.ColorLayers[glyphName]])
ordered.sort()
glyphMap = []
layerMap = []
for (gid, glyphName, layers) in ordered:
glyphMap.append(struct.pack(">HHH", gid, len(layerMap), len(layers)))
for layer in layers:
layerMap.append(struct.pack(">HH", ttFont.getGlyphID(layer.name), layer.colorID))
dataList = [struct.pack(">HHLLH", self.version, len(glyphMap), 14, 14+6*len(glyphMap), len(layerMap))]
dataList.extend(glyphMap)
dataList.extend(layerMap)
data = bytesjoin(dataList)
return data
def toXML(self, writer, ttFont):
writer.simpletag("version", value=self.version)
writer.newline()
ordered = []
glyphNames = self.ColorLayers.keys()
for glyphName in glyphNames:
try:
gid = ttFont.getGlyphID(glyphName)
except:
assert 0, "COLR table contains a glyph name not in ttFont.getGlyphNames(): " + str(glyphName)
ordered.append([gid, glyphName, self.ColorLayers[glyphName]])
ordered.sort()
for entry in ordered:
writer.begintag("ColorGlyph", name=entry[1])
writer.newline()
for layer in entry[2]:
layer.toXML(writer, ttFont)
writer.endtag("ColorGlyph")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if not hasattr(self, "ColorLayers"):
self.ColorLayers = {}
self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID
if name == "ColorGlyph":
glyphName = attrs["name"]
for element in content:
if isinstance(element, basestring):
continue
layers = []
for element in content:
if isinstance(element, basestring):
continue
layer = LayerRecord()
layer.fromXML(element[0], element[1], element[2], ttFont)
layers.append (layer)
operator.setitem(self, glyphName, layers)
elif "value" in attrs:
setattr(self, name, safeEval(attrs["value"]))
def __getitem__(self, glyphSelector):
if isinstance(glyphSelector, int):
# its a gid, convert to glyph name
glyphSelector = self.getGlyphName(glyphSelector)
if glyphSelector not in self.ColorLayers:
return None
return self.ColorLayers[glyphSelector]
def __setitem__(self, glyphSelector, value):
if isinstance(glyphSelector, int):
# its a gid, convert to glyph name
glyphSelector = self.getGlyphName(glyphSelector)
if value:
self.ColorLayers[glyphSelector] = value
elif glyphSelector in self.ColorLayers:
del self.ColorLayers[glyphSelector]
def __delitem__(self, glyphSelector):
del self.ColorLayers[glyphSelector]
class LayerRecord(object):
def __init__(self, name = None, colorID = None):
self.name = name
self.colorID = colorID
def toXML(self, writer, ttFont):
writer.simpletag("layer", name=self.name, colorID=self.colorID)
writer.newline()
def fromXML(self, eltname, attrs, content, ttFont):
for (name, value) in attrs.items():
if name == "name":
if isinstance(value, int):
value = ttFont.getGlyphName(value)
setattr(self, name, value)
else:
setattr(self, name, safeEval(value))
|
apache-2.0
|
muchbeli/bitcoin-abe
|
Abe/upgrade.py
|
20
|
43693
|
#!/usr/bin/env python
# Copyright(C) 2011,2012,2013,2014 by Abe developers.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/agpl.html>.
"""Upgrade to the current database schema."""
import os
import sys
import DataStore
import util
def run_upgrades_locked(store, upgrades):
for i in xrange(len(upgrades) - 1):
vers, func = upgrades[i]
if store.config['schema_version'] == vers:
sv = upgrades[i+1][0]
store.log.warning("Upgrading schema to version: %s", sv)
func(store)
if sv[:3] == 'Abe':
store.sql(
"UPDATE configvar SET configvar_value = ?"
" WHERE configvar_name = 'schema_version'",
(sv,))
if store.rowcount() != 1:
raise Exception("Failed to update schema_version");
else:
store.sql(
"UPDATE config SET schema_version = ? WHERE config_id = 1",
(sv,))
store.commit()
store.config['schema_version'] = sv
def run_upgrades(store, upgrades):
"""Guard against concurrent upgrades."""
lock = store.get_lock()
try:
run_upgrades_locked(store, upgrades)
finally:
store.release_lock(lock)
def add_block_value_in(store):
store.sql("ALTER TABLE block ADD block_value_in NUMERIC(30)")
def add_block_value_out(store):
store.sql("ALTER TABLE block ADD block_value_out NUMERIC(30)")
def add_block_total_satoshis(store):
store.sql("ALTER TABLE block ADD block_total_satoshis NUMERIC(26)")
def add_block_total_seconds(store):
store.sql("ALTER TABLE block ADD block_total_seconds NUMERIC(20)")
def add_block_satoshi_seconds(store):
store.sql("ALTER TABLE block ADD block_satoshi_seconds NUMERIC(28)")
def add_block_total_ss(store):
store.sql("ALTER TABLE block ADD block_total_ss NUMERIC(28)")
def add_satoshi_seconds_destroyed(store):
store.sql("ALTER TABLE block_tx ADD satoshi_seconds_destroyed NUMERIC(28)")
def add_cc_block_height(store):
store.sql("ALTER TABLE chain_candidate ADD block_height NUMERIC(14)")
def init_cc_block_height(store):
store.sql(
"""UPDATE chain_candidate cc
SET block_height = (
SELECT block_height
FROM block b
WHERE b.block_id = cc.block_id)
""")
def index_cc_block_height(store):
store.sql(
"""CREATE INDEX x_cc_chain_block_height
ON chain_candidate (chain_id, block_height)""")
def index_cc_block(store):
store.sql(
"""CREATE INDEX x_cc_block ON chain_candidate (block_id)""")
def create_block_txin(store):
store.sql(
"""CREATE TABLE block_txin (
block_id NUMERIC(14),
txin_id NUMERIC(26),
out_block_id NUMERIC(14),
PRIMARY KEY (block_id, txin_id)
)""")
def index_block_tx_tx(store):
try:
store.sql("DROP INDEX x_block_tx_tx")
except Exception:
store.rollback()
store.sql("CREATE INDEX x_block_tx_tx ON block_tx (tx_id)")
def init_block_txin(store):
store.log.info("Initializing block_txin.")
count = int(store.selectrow("SELECT COUNT(1) FROM block_txin")[0] or 0)
tried = 0
added = 0
seen = set()
store.log.info("...loading existing keys")
# XXX store.conn and store.sql_transform no longer exist.
cur = store.conn.cursor()
cur.execute(store.sql_transform("""
SELECT block_id, txin_id FROM block_txin"""))
for row in cur:
seen.add(row)
store.log.info("...finding output blocks")
cur.execute(store.sql_transform("""
SELECT bt.block_id, txin.txin_id, obt.block_id
FROM block_tx bt
JOIN txin USING (tx_id)
JOIN txout USING (txout_id)
JOIN block_tx obt ON (txout.tx_id = obt.tx_id)"""))
for row in cur:
(block_id, txin_id, oblock_id) = row
if (block_id, txin_id) not in seen:
# If oblock is an ancestor of block, insert into block_txin.
if store.is_descended_from(block_id, oblock_id):
store.sql("""
INSERT INTO block_txin (block_id, txin_id, out_block_id)
VALUES (?, ?, ?)""",
(block_id, txin_id, oblock_id))
count += 1
added += 1
if count % 1000 == 0:
store.commit()
store.log.info("commit %d", count)
tried += 1
if tried % 1000 == 0:
sys.stdout.write('\r%d/%d ' % (added, tried))
sys.stdout.flush()
store.log.info('done.')
def init_block_value_in(store):
store.log.info("Calculating block_value_in.")
for row in store.selectall("""
SELECT b.block_id, SUM(txout.txout_value)
FROM block b
JOIN block_tx USING (block_id)
JOIN txin USING (tx_id)
LEFT JOIN txout USING (txout_id)
GROUP BY b.block_id
"""):
store.sql("UPDATE block SET block_value_in = ? WHERE block_id = ?",
(int(row[1] or 0), row[0]))
def init_block_value_out(store):
store.log.info("Calculating block_value_out.")
for row in store.selectall("""
SELECT b.block_id, SUM(txout.txout_value)
FROM block b
JOIN block_tx USING (block_id)
JOIN txout USING (tx_id)
GROUP BY b.block_id
"""):
store.sql("UPDATE block SET block_value_out = ? WHERE block_id = ?",
(int(row[1]), row[0]))
def init_block_totals(store):
store.log.info("Calculating block total generated and age.")
last_chain_id = None
stats = None
for row in store.selectall("""
SELECT cc.chain_id, b.prev_block_id, b.block_id,
b.block_value_out - b.block_value_in, b.block_nTime
FROM chain_candidate cc
JOIN block b USING (block_id)
WHERE cc.block_height IS NOT NULL
ORDER BY cc.chain_id, cc.block_height"""):
chain_id, prev_id, block_id, generated, nTime = row
generated = int(generated)
nTime = int(nTime)
if chain_id != last_chain_id:
stats = {}
last_chain_id = chain_id
if prev_id is None:
stats[block_id] = {
"chain_start": nTime,
"satoshis": generated}
else:
stats[block_id] = {
"chain_start": stats[prev_id]['chain_start'],
"satoshis": generated + stats[prev_id]['satoshis']}
store.sql("UPDATE block SET block_total_seconds = ?,"
" block_total_satoshis = ?"
" WHERE block_id = ?",
(nTime - stats[block_id]['chain_start'],
stats[block_id]['satoshis'], block_id))
def init_satoshi_seconds_destroyed(store):
store.log.info("Calculating satoshi-seconds destroyed.")
count = 0
step = 100
start = 1
stop = int(store.selectrow("SELECT MAX(block_id) FROM block_tx")[0])
# XXX store.conn and store.sql_transform no longer exist.
cur = store.conn.cursor()
while start <= stop:
cur.execute(store.sql_transform("""
SELECT bt.block_id, bt.tx_id,
SUM(txout.txout_value * (b.block_nTime - ob.block_nTime))
FROM block b
JOIN block_tx bt USING (block_id)
JOIN txin USING (tx_id)
JOIN txout USING (txout_id)
JOIN block_tx obt ON (txout.tx_id = obt.tx_id)
JOIN block_txin bti ON (
bti.block_id = bt.block_id AND
bti.txin_id = txin.txin_id AND
obt.block_id = bti.out_block_id)
JOIN block ob ON (bti.out_block_id = ob.block_id)
WHERE bt.block_id >= ?
AND bt.block_id < ?
GROUP BY bt.block_id, bt.tx_id"""), (start, start + step))
for row in cur:
block_id, tx_id, destroyed = row
sys.stdout.write("\rssd: " + str(count) + " ")
count += 1
store.sql("UPDATE block_tx SET satoshi_seconds_destroyed = ?"
" WHERE block_id = ? AND tx_id = ?",
(destroyed, block_id, tx_id))
start += step
store.log.info("done.")
def set_0_satoshi_seconds_destroyed(store):
store.log.info("Setting NULL to 0 in satoshi_seconds_destroyed.")
# XXX store.conn and store.sql_transform no longer exist.
cur = store.conn.cursor()
cur.execute(store.sql_transform("""
SELECT bt.block_id, bt.tx_id
FROM block_tx bt
JOIN block b USING (block_id)
WHERE b.block_height IS NOT NULL
AND bt.satoshi_seconds_destroyed IS NULL"""))
for row in cur:
store.sql("""
UPDATE block_tx bt SET satoshi_seconds_destroyed = 0
WHERE block_id = ? AND tx_id = ?""", row)
def init_block_satoshi_seconds(store, ):
store.log.info("Calculating satoshi-seconds.")
# XXX store.conn and store.sql_transform no longer exist.
cur = store.conn.cursor()
stats = {}
cur.execute(store.sql_transform("""
SELECT b.block_id, b.block_total_satoshis, b.block_nTime,
b.prev_block_id, SUM(bt.satoshi_seconds_destroyed),
b.block_height
FROM block b
JOIN block_tx bt ON (b.block_id = bt.block_id)
GROUP BY b.block_id, b.block_total_satoshis, b.block_nTime,
b.prev_block_id, b.block_height
ORDER BY b.block_height"""))
count = 0
while True:
row = cur.fetchone()
if row is None:
break
block_id, satoshis, nTime, prev_id, destroyed, height = row
satoshis = int(satoshis)
destroyed = int(destroyed)
if height is None:
continue
if prev_id is None:
stats[block_id] = {
"satoshis": satoshis,
"ss": 0,
"total_ss": 0,
"nTime": nTime}
else:
created = (stats[prev_id]['satoshis']
* (nTime - stats[prev_id]['nTime']))
stats[block_id] = {
"satoshis": satoshis,
"ss": stats[prev_id]['ss'] + created - destroyed,
"total_ss": stats[prev_id]['total_ss'] + created,
"nTime": nTime}
store.sql("""
UPDATE block
SET block_satoshi_seconds = ?,
block_total_ss = ?,
block_ss_destroyed = ?
WHERE block_id = ?""",
(store.intin(stats[block_id]['ss']),
store.intin(stats[block_id]['total_ss']),
store.intin(destroyed),
block_id))
count += 1
if count % 1000 == 0:
store.commit()
store.log.info("Updated %d blocks", count)
if count % 1000 != 0:
store.log.info("Updated %d blocks", count)
def index_block_nTime(store):
store.log.info("Indexing block_nTime.")
store.sql("CREATE INDEX x_block_nTime ON block (block_nTime)")
def replace_chain_summary(store):
store.sql("DROP VIEW chain_summary")
store.sql("""
CREATE VIEW chain_summary AS SELECT
cc.chain_id,
cc.in_longest,
b.block_id,
b.block_hash,
b.block_version,
b.block_hashMerkleRoot,
b.block_nTime,
b.block_nBits,
b.block_nNonce,
cc.block_height,
b.prev_block_id,
prev.block_hash prev_block_hash,
b.block_chain_work,
b.block_num_tx,
b.block_value_in,
b.block_value_out,
b.block_total_satoshis,
b.block_total_seconds,
b.block_satoshi_seconds,
b.block_total_ss,
b.block_ss_destroyed
FROM chain_candidate cc
JOIN block b ON (cc.block_id = b.block_id)
LEFT JOIN block prev ON (b.prev_block_id = prev.block_id)""")
def drop_block_ss_columns(store):
"""Drop columns that may have been added in error."""
for c in ['created', 'destroyed']:
try:
store.sql("ALTER TABLE block DROP COLUMN block_ss_" + c)
except Exception:
store.rollback()
def add_constraint(store, table, name, constraint):
try:
store.sql("ALTER TABLE " + table + " ADD CONSTRAINT " + name +
" " + constraint)
except Exception:
store.log.exception(
"Failed to create constraint on table " + table + ": " +
constraint + "; ignoring error.")
store.rollback()
def add_fk_block_txin_block_id(store):
add_constraint(store, "block_txin", "fk1_block_txin",
"FOREIGN KEY (block_id) REFERENCES block (block_id)")
def add_fk_block_txin_tx_id(store):
add_constraint(store, "block_txin", "fk2_block_txin",
"FOREIGN KEY (txin_id) REFERENCES txin (txin_id)")
def add_fk_block_txin_out_block_id(store):
add_constraint(store, "block_txin", "fk3_block_txin",
"FOREIGN KEY (out_block_id) REFERENCES block (block_id)")
def add_chk_block_txin_out_block_id_nn(store):
add_constraint(store, "block_txin", "chk3_block_txin",
"CHECK (out_block_id IS NOT NULL)")
def create_x_cc_block_id(store):
store.sql("CREATE INDEX x_cc_block_id ON chain_candidate (block_id)")
def reverse_binary_hashes(store):
if store.config['binary_type'] != 'hex':
raise Error(
'To support search by hash prefix, we have to reverse all values'
' in block.block_hash, block.block_hashMerkleRoot, tx.tx_hash,'
' orphan_block.block_hashPrev, and unlinked_txin.txout_tx_hash.'
' This has not been automated. You may perform this step manually,'
' then issue "UPDATE config SET schema_version = \'9.1\'" and'
' rerun this program.')
def drop_x_cc_block_id(store):
"""Redundant with x_cc_block"""
store.sql("DROP INDEX x_cc_block_id")
def create_x_cc_block_height(store):
store.sql(
"CREATE INDEX x_cc_block_height ON chain_candidate (block_height)")
def create_txout_approx(store):
store.sql("""
CREATE VIEW txout_approx AS SELECT
txout_id,
tx_id,
txout_value txout_approx_value
FROM txout""")
def add_fk_chain_candidate_block_id(store):
add_constraint(store, "chain_candidate", "fk1_chain_candidate",
"FOREIGN KEY (block_id) REFERENCES block (block_id)")
def create_configvar(store):
store.sql("""
CREATE TABLE configvar (
configvar_name VARCHAR(100) NOT NULL PRIMARY KEY,
configvar_value VARCHAR(255)
)""")
def configure(store):
# XXX This won't work anymore.
store.args.binary_type = store.config['binary_type']
store.configure()
store.save_config()
def populate_abe_sequences(store):
if store.config['sql.sequence_type'] == 'update':
try:
store.sql("""CREATE TABLE abe_sequences (
key VARCHAR(100) NOT NULL PRIMARY KEY,
nextid NUMERIC(30)
)""")
except Exception:
store.rollback()
for t in ['block', 'tx', 'txin', 'txout', 'pubkey',
'chain', 'magic', 'policy']:
(last_id,) = store.selectrow("SELECT MAX(" + t + "_id) FROM " + t)
if last_id is None:
continue
store.sql("UPDATE abe_sequences SET nextid = ? WHERE key = ?"
" AND nextid <= ?",
(last_id + 1, t, last_id))
if store.rowcount() < 1:
store.sql("INSERT INTO abe_sequences (key, nextid)"
" VALUES (?, ?)", (t, last_id + 1))
def add_datadir_chain_id(store):
store.sql("ALTER TABLE datadir ADD chain_id NUMERIC(10) NULL")
def noop(store):
pass
def rescan_if_missed_blocks(store):
"""
Due to a bug, some blocks may have been loaded but not placed in
a chain. If so, reset all datadir offsets to 0 to force a rescan.
"""
(bad,) = store.selectrow("""
SELECT COUNT(1)
FROM block
LEFT JOIN chain_candidate USING (block_id)
WHERE chain_id IS NULL
""")
if bad > 0:
store.sql(
"UPDATE datadir SET blkfile_number = 1, blkfile_offset = 0")
def insert_missed_blocks(store):
"""
Rescanning doesn't always work due to timeouts and resource
constraints. This may help.
"""
missed = []
for row in store.selectall("""
SELECT b.block_id
FROM block b
LEFT JOIN chain_candidate cc ON (b.block_id = cc.block_id)
WHERE chain_id IS NULL
ORDER BY b.block_height
"""):
missed.append(row[0])
if not missed:
return
store.log.info("Attempting to repair %d missed blocks.", len(missed))
inserted = 0
for block_id in missed:
# Insert block if its previous block is in the chain.
# XXX This won't work if we want to support forks.
# XXX This doesn't work for unattached blocks.
store.sql("""
INSERT INTO chain_candidate (
chain_id, block_id, block_height, in_longest)
SELECT cc.chain_id, b.block_id, b.block_height, 0
FROM chain_candidate cc
JOIN block prev ON (cc.block_id = prev.block_id)
JOIN block b ON (b.prev_block_id = prev.block_id)
WHERE b.block_id = ?""", (block_id,))
inserted += store.rowcount()
store.commit() # XXX not sure why PostgreSQL needs this.
store.log.info("Inserted %d rows into chain_candidate.", inserted)
def repair_missed_blocks(store):
store.log.info("Finding longest chains.")
best_work = []
for row in store.selectall("""
SELECT cc.chain_id, MAX(b.block_chain_work)
FROM chain_candidate cc
JOIN block b USING (block_id)
GROUP BY cc.chain_id"""):
best_work.append(row)
best = []
for row in best_work:
chain_id, bcw = row
(block_id,) = store.selectrow("""
SELECT MIN(block_id)
FROM block b
JOIN chain_candidate cc USING (block_id)
WHERE cc.chain_id = ?
AND b.block_chain_work = ?
""", (chain_id, bcw))
(in_longest,) = store.selectrow("""
SELECT in_longest
FROM chain_candidate
WHERE chain_id = ?
AND block_id = ?
""", (chain_id, block_id))
if in_longest == 1:
store.log.info("Chain %d already has the block of greatest work.",
chain_id)
continue
best.append([chain_id, block_id])
store.sql("""
UPDATE chain
SET chain_last_block_id = ?
WHERE chain_id = ?""",
(block_id, chain_id))
if store.rowcount() == 1:
store.log.info("Chain %d block %d", chain_id, block_id)
else:
raise Exception("Wrong rowcount updating chain " + str(chain_id))
if not best:
return
store.log.info("Marking blocks in longest chains.")
for elt in best:
chain_id, block_id = elt
count = 0
while True:
store.sql("""
UPDATE chain_candidate
SET in_longest = 1
WHERE chain_id = ?
AND block_id = ?""",
(chain_id, block_id))
if store.rowcount() != 1:
raise Exception("Wrong rowcount updating chain_candidate ("
+ str(chain_id) + ", " + str(block_id) + ")")
count += 1
row = store.selectrow("""
SELECT b.prev_block_id, cc.in_longest
FROM block b
JOIN chain_candidate cc ON (b.prev_block_id = cc.block_id)
WHERE cc.chain_id = ?
AND b.block_id = ?""",
(chain_id, block_id))
if row is None:
break # genesis block?
block_id, in_longest = row
if in_longest == 1:
break
store.log.info("Processed %d in chain %d", count, chain_id)
store.log.info("Repair successful.")
def add_block_num_tx(store):
store.sql("ALTER TABLE block ADD block_num_tx NUMERIC(10)")
def add_block_ss_destroyed(store):
store.sql("ALTER TABLE block ADD block_ss_destroyed NUMERIC(28)")
def init_block_tx_sums(store):
store.log.info("Calculating block_num_tx and block_ss_destroyed.")
rows = store.selectall("""
SELECT block_id,
COUNT(1),
COUNT(satoshi_seconds_destroyed),
SUM(satoshi_seconds_destroyed)
FROM block
JOIN block_tx USING (block_id)
GROUP BY block_id""")
count = 0
store.log.info("Storing block_num_tx and block_ss_destroyed.")
for row in rows:
block_id, num_tx, num_ssd, ssd = row
if num_ssd < num_tx:
ssd = None
store.sql("""
UPDATE block
SET block_num_tx = ?,
block_ss_destroyed = ?
WHERE block_id = ?""",
(num_tx, ssd, block_id))
count += 1
if count % 1000 == 0:
store.commit()
# XXX would like to set NOT NULL on block_num_tx.
def config_ddl(store):
# XXX This won't work anymore.
store.configure_ddl_implicit_commit()
store.save_configvar("ddl_implicit_commit")
def config_create_table_epilogue(store):
# XXX This won't work anymore.
store.configure_create_table_epilogue()
store.save_configvar("create_table_epilogue")
def rename_abe_sequences_key(store):
"""Drop and recreate abe_sequences with key renamed to sequence_key."""
# Renaming a column is horribly unportable.
try:
data = store.selectall("""
SELECT DISTINCT key, nextid
FROM abe_sequences""")
except Exception:
store.rollback()
return
store.log.info("copying sequence positions: %s", data)
store.ddl("DROP TABLE abe_sequences")
store.ddl("""CREATE TABLE abe_sequences (
sequence_key VARCHAR(100) PRIMARY KEY,
nextid NUMERIC(30)
)""")
for row in data:
store.sql("INSERT INTO abe_sequences (sequence_key, nextid)"
" VALUES (?, ?)", row)
def create_x_txin_txout(store):
store.sql("CREATE INDEX x_txin_txout ON txin (txout_id)")
def save_datadir(store):
"""Copy the datadir table to recreate it with a new column."""
store.sql("CREATE TABLE abe_tmp_datadir AS SELECT * FROM datadir")
def add_datadir_id(store):
data = store.selectall("""
SELECT dirname, blkfile_number, blkfile_offset, chain_id
FROM abe_tmp_datadir""")
try:
store.ddl("DROP TABLE datadir")
except Exception:
store.rollback() # Assume already dropped.
store.ddl("""CREATE TABLE datadir (
datadir_id NUMERIC(10) PRIMARY KEY,
dirname VARCHAR(2000) NOT NULL,
blkfile_number NUMERIC(4) NULL,
blkfile_offset NUMERIC(20) NULL,
chain_id NUMERIC(10) NULL
)""")
store.create_sequence("datadir")
for row in data:
new_row = [store.new_id("datadir")]
new_row += row
store.sql("""
INSERT INTO datadir (
datadir_id, dirname, blkfile_number, blkfile_offset, chain_id
) VALUES (?, ?, ?, ?, ?)""", new_row)
def drop_tmp_datadir(store):
store.ddl("DROP TABLE abe_tmp_datadir")
def config_clob(store):
# This won't work anymore.
store.configure_max_varchar()
store.save_configvar("max_varchar")
store.configure_clob_type()
store.save_configvar("clob_type")
def clear_bad_addresses(store):
"""Set address=Unknown for the bogus outputs in Bitcoin 71036."""
bad_tx = [
'a288fec5559c3f73fd3d93db8e8460562ebfe2fcf04a5114e8d0f2920a6270dc',
'2a0597e665ac3d1cabeede95cedf907934db7f639e477b3c77b242140d8cf728',
'e411dbebd2f7d64dafeef9b14b5c59ec60c36779d43f850e5e347abee1e1a455']
for tx_hash in bad_tx:
row = store.selectrow("""
SELECT tx_id FROM tx WHERE tx_hash = ?""",
(store.hashin_hex(tx_hash),))
if row:
store.sql("""
UPDATE txout SET pubkey_id = NULL
WHERE tx_id = ? AND txout_pos = 1 AND pubkey_id IS NOT NULL""",
(row[0],))
if store.rowcount():
store.log.info("Cleared txout %s", tx_hash)
def find_namecoin_addresses(store):
updated = 0
for tx_id, txout_pos, script in store.selectall("""
SELECT tx_id, txout_pos, txout_scriptPubKey
FROM txout
WHERE pubkey_id IS NULL"""):
pubkey_id = store.script_to_pubkey_id(store.binout(script))
if pubkey_id is not None:
store.sql("""
UPDATE txout
SET pubkey_id = ?
WHERE tx_id = ?
AND txout_pos = ?""", (pubkey_id, tx_id, txout_pos))
updated += 1
if updated % 1000 == 0:
store.commit()
store.log.info("Found %d addresses", updated)
if updated % 1000 > 0:
store.commit()
store.log.info("Found %d addresses", updated)
def create_abe_lock(store):
store.ddl("""CREATE TABLE abe_lock (
lock_id NUMERIC(10) NOT NULL PRIMARY KEY,
pid VARCHAR(255) NULL
)""")
def create_abe_lock_row(store):
store.sql("INSERT INTO abe_lock (lock_id) VALUES (1)")
def insert_null_pubkey(store):
dbnull = store.binin(DataStore.NULL_PUBKEY_HASH)
row = store.selectrow("SELECT pubkey_id FROM pubkey WHERE pubkey_hash = ?",
(dbnull,))
if row:
# Null hash seen in a transaction. Go to some trouble to
# set its pubkey_id = 0 without violating constraints.
old_id = row[0]
import random # No need for cryptographic strength here.
temp_hash = "".join([chr(random.randint(0, 255)) for x in xrange(20)])
store.sql("INSERT INTO pubkey (pubkey_id, pubkey_hash) VALUES (?, ?)",
(DataStore.NULL_PUBKEY_ID, store.binin(temp_hash)))
store.sql("UPDATE txout SET pubkey_id = ? WHERE pubkey_id = ?",
(DataStore.NULL_PUBKEY_ID, old_id))
store.sql("DELETE FROM pubkey WHERE pubkey_id = ?", (old_id,))
store.sql("UPDATE pubkey SET pubkey_hash = ? WHERE pubkey_id = ?",
(dbnull, DataStore.NULL_PUBKEY_ID))
else:
store.sql("""
INSERT INTO pubkey (pubkey_id, pubkey_hash) VALUES (?, ?)""",
(DataStore.NULL_PUBKEY_ID, dbnull))
def set_netfee_pubkey_id(store):
store.log.info("Updating network fee output address to 'Destroyed'...")
# XXX This doesn't work for Oracle because of LOB weirdness.
# There, you could probably get away with:
# UPDATE txout SET pubkey_id = 0 WHERE txout_scriptPubKey BETWEEN 1 AND 2;
# UPDATE configvar SET configvar_value = 'Abe26' WHERE configvar_name =
# 'schema_version' AND configvar_value = 'Abe25.3';
# COMMIT;
store.sql("""
UPDATE txout
SET pubkey_id = ?
WHERE txout_scriptPubKey = ?""",
(DataStore.NULL_PUBKEY_ID,
store.binin(DataStore.SCRIPT_NETWORK_FEE)))
store.log.info("...rows updated: %d", store.rowcount())
def adjust_block_total_satoshis(store):
store.log.info("Adjusting value outstanding for lost coins.")
block = {}
block_ids = []
store.log.info("...getting block relationships.")
for block_id, prev_id in store.selectall("""
SELECT block_id, prev_block_id
FROM block
WHERE block_height IS NOT NULL
ORDER BY block_height"""):
block[block_id] = {"prev_id": prev_id}
block_ids.append(block_id)
store.log.info("...getting lossage per block.")
for block_id, lost in store.selectall("""
SELECT block_tx.block_id, SUM(txout.txout_value)
FROM block_tx
JOIN txout ON (block_tx.tx_id = txout.tx_id)
WHERE txout.pubkey_id <= 0
GROUP BY block_tx.block_id"""):
if block_id in block:
block[block_id]["lost"] = lost
store.log.info("...calculating adjustments.")
for block_id in block_ids:
b = block[block_id]
prev_id = b["prev_id"]
prev_lost = 0 if prev_id is None else block[prev_id]["cum_lost"]
b["cum_lost"] = b.get("lost", 0) + prev_lost
store.log.info("...applying adjustments.")
count = 0
for block_id in block_ids:
adj = block[block_id]["cum_lost"]
if adj != 0:
store.sql("""
UPDATE block
SET block_total_satoshis = block_total_satoshis - ?
WHERE block_id = ?""",
(adj, block_id))
count += 1
if count % 1000 == 0:
store.log.info("Adjusted %d of %d blocks.", count, len(block_ids))
if count % 1000 != 0:
store.log.info("Adjusted %d of %d blocks.", count, len(block_ids))
def config_limit_style(store):
# XXX This won't work anymore.
store.configure_limit_style()
store.save_configvar("limit_style")
def config_sequence_type(store):
# XXX This won't work anymore.
if store.config['sequence_type'] != "update":
return
store.configure_sequence_type()
if store.config['sequence_type'] != "update":
store.log.info("Creating native sequences.")
for name in ['magic', 'policy', 'chain', 'datadir',
'tx', 'txout', 'pubkey', 'txin', 'block']:
store.get_db().drop_sequence_if_exists(name)
store.create_sequence(name)
store.save_configvar("sequence_type")
def add_search_block_id(store):
store.log.info("Creating block.search_block_id")
store.sql("ALTER TABLE block ADD search_block_id NUMERIC(14) NULL")
def populate_search_block_id(store):
store.log.info("Calculating block.search_block_id")
for block_id, height, prev_id in store.selectall("""
SELECT block_id, block_height, prev_block_id
FROM block
WHERE block_height IS NOT NULL
ORDER BY block_height"""):
height = int(height)
search_id = None
if prev_id is not None:
prev_id = int(prev_id)
search_height = util.get_search_height(height)
if search_height is not None:
search_id = store.get_block_id_at_height(search_height, prev_id)
store.sql("UPDATE block SET search_block_id = ? WHERE block_id = ?",
(search_id, block_id))
store.cache_block(int(block_id), height, prev_id, search_id)
store.commit()
def add_fk_search_block_id(store):
add_constraint(store, "block", "fk1_search_block_id",
"FOREIGN KEY (search_block_id) REFERENCES block (block_id)")
def create_firstbits(store):
flag = store.config.get('use_firstbits')
if flag is None:
if store.args.use_firstbits is None:
store.log.info("use_firstbits not found, defaulting to false.")
store.config['use_firstbits'] = "false"
store.save_configvar("use_firstbits")
return
flag = "true" if store.args.use_firstbits else "false"
store.config['use_firstbits'] = flag
store.save_configvar("use_firstbits")
if flag == "true":
import firstbits
firstbits.create_firstbits(store)
def populate_firstbits(store):
if store.config['use_firstbits'] == "true":
import firstbits
firstbits.populate_firstbits(store)
def add_keep_scriptsig(store):
store.config['keep_scriptsig'] = "true"
store.save_configvar("keep_scriptsig")
def drop_satoshi_seconds_destroyed(store):
store.get_db().drop_column_if_exists("block_txin", "satoshi_seconds_destroyed")
def widen_blkfile_number(store):
data = store.selectall("""
SELECT datadir_id, dirname, blkfile_number, blkfile_offset, chain_id
FROM abe_tmp_datadir""")
store.get_db().drop_table_if_exists("datadir")
store.ddl("""CREATE TABLE datadir (
datadir_id NUMERIC(10) NOT NULL PRIMARY KEY,
dirname VARCHAR(2000) NOT NULL,
blkfile_number NUMERIC(8) NULL,
blkfile_offset NUMERIC(20) NULL,
chain_id NUMERIC(10) NULL
)""")
for row in data:
store.sql("""
INSERT INTO datadir (
datadir_id, dirname, blkfile_number, blkfile_offset, chain_id
) VALUES (?, ?, ?, ?, ?)""", row)
def add_datadir_loader(store):
store.sql("ALTER TABLE datadir ADD datadir_loader VARCHAR(100) NULL")
def add_chain_policy(store):
store.ddl("ALTER TABLE chain ADD chain_policy VARCHAR(255)")
def populate_chain_policy(store):
store.sql("UPDATE chain SET chain_policy = chain_name")
def add_chain_magic(store):
store.ddl("ALTER TABLE chain ADD chain_magic BINARY(4)")
def populate_chain_magic(store):
for chain_id, magic in store.selectall("""
SELECT chain.chain_id, magic.magic
FROM chain
JOIN magic ON (chain.magic_id = magic.magic_id)"""):
store.sql("UPDATE chain SET chain_magic = ? WHERE chain_id = ?",
(magic, chain_id))
def drop_policy(store):
for stmt in [
"ALTER TABLE chain DROP COLUMN policy_id",
"DROP TABLE policy"]:
try:
store.ddl(stmt)
except store.dbmodule.DatabaseError, e:
store.log.warning("Cleanup failed, ignoring: %s", stmt)
def drop_magic(store):
for stmt in [
"ALTER TABLE chain DROP COLUMN magic_id",
"DROP TABLE magic"]:
try:
store.ddl(stmt)
except store.dbmodule.DatabaseError, e:
store.log.warning("Cleanup failed, ignoring: %s", stmt)
def add_chain_decimals(store):
store.ddl("ALTER TABLE chain ADD chain_decimals NUMERIC(2)")
def insert_chain_novacoin(store):
import Chain
try:
store.insert_chain(Chain.create("NovaCoin"))
except Exception:
pass
def txin_detail_multisig(store):
store.get_db().drop_view_if_exists('txin_detail')
store.ddl("""
CREATE VIEW txin_detail AS SELECT
cc.chain_id,
cc.in_longest,
cc.block_id,
b.block_hash,
b.block_height,
block_tx.tx_pos,
tx.tx_id,
tx.tx_hash,
tx.tx_lockTime,
tx.tx_version,
tx.tx_size,
txin.txin_id,
txin.txin_pos,
txin.txout_id prevout_id""" + (""",
txin.txin_scriptSig,
txin.txin_sequence""" if store.keep_scriptsig else """,
NULL txin_scriptSig,
NULL txin_sequence""") + """,
prevout.txout_value txin_value,
prevout.txout_scriptPubKey txin_scriptPubKey,
pubkey.pubkey_id,
pubkey.pubkey_hash,
pubkey.pubkey
FROM chain_candidate cc
JOIN block b ON (cc.block_id = b.block_id)
JOIN block_tx ON (b.block_id = block_tx.block_id)
JOIN tx ON (tx.tx_id = block_tx.tx_id)
JOIN txin ON (tx.tx_id = txin.tx_id)
LEFT JOIN txout prevout ON (txin.txout_id = prevout.txout_id)
LEFT JOIN pubkey
ON (prevout.pubkey_id = pubkey.pubkey_id)""")
def add_chain_script_addr_vers(store):
store.ddl("ALTER TABLE chain ADD chain_script_addr_vers VARBINARY(100) NULL")
def populate_chain_script_addr_vers(store):
def update(addr_vers, script_vers):
store.sql("UPDATE chain SET chain_script_addr_vers=? WHERE chain_address_version=?",
(store.binin(script_vers), store.binin(addr_vers)))
update('\x00', '\x05')
update('\x6f', '\xc4')
def create_multisig_pubkey(store):
store.ddl("""
CREATE TABLE multisig_pubkey (
multisig_id NUMERIC(26) NOT NULL,
pubkey_id NUMERIC(26) NOT NULL,
PRIMARY KEY (multisig_id, pubkey_id),
FOREIGN KEY (multisig_id) REFERENCES pubkey (pubkey_id),
FOREIGN KEY (pubkey_id) REFERENCES pubkey (pubkey_id)
)""")
def create_x_multisig_pubkey_multisig(store):
store.ddl("CREATE INDEX x_multisig_pubkey_pubkey ON multisig_pubkey (pubkey_id)")
def update_chain_policy(store):
store.sql("""
UPDATE chain
SET chain_policy = 'Sha256Chain'
WHERE chain_policy = chain_name
AND chain_name IN ('Weeds', 'BeerTokens', 'SolidCoin', 'ScTestnet', 'Worldcoin', 'Anoncoin')""")
def populate_multisig_pubkey(store):
store.init_chains()
store.log.info("Finding new address types.")
rows = store.selectall("""
SELECT txout_id, chain_id, txout_scriptPubKey
FROM txout_detail
WHERE pubkey_id IS NULL""")
count = 0
for txout_id, chain_id, db_script in rows:
script = store.binout(db_script)
pubkey_id = store.script_to_pubkey_id(store.get_chain_by_id(chain_id), script)
if pubkey_id > 0:
store.sql("UPDATE txout SET pubkey_id = ? WHERE txout_id = ?",
(pubkey_id, txout_id))
count += 1
store.commit()
store.log.info("Found %d", count)
sql_arg_names = (
'binary_type', 'max_varchar', 'ddl_implicit_commit',
'create_table_epilogue', 'sequence_type', 'limit_style',
'int_type', 'clob_type')
def abstract_sql(store):
for name in sql_arg_names:
store.sql("""
UPDATE configvar
SET configvar_name = ?
WHERE configvar_name = ?""", ('sql.' + name, name))
store.commit()
upgrades = [
('6', add_block_value_in),
('6.1', add_block_value_out),
('6.2', add_block_total_satoshis),
('6.3', add_block_total_seconds),
('6.4', add_block_satoshi_seconds),
('6.5', add_block_total_ss),
('6.6', add_satoshi_seconds_destroyed),
('6.7', add_cc_block_height),
('6.8', init_cc_block_height),
('6.9', index_cc_block_height),
('6.10', index_cc_block),
('6.11', create_block_txin),
('6.12', index_block_tx_tx),
('6.13', init_block_txin),
('6.14', init_block_value_in),
('6.15', init_block_value_out),
('6.16', init_block_totals),
('6.17', init_satoshi_seconds_destroyed),
('6.18', set_0_satoshi_seconds_destroyed),
('6.19', noop),
('6.20', index_block_nTime),
('6.21', replace_chain_summary),
('7', replace_chain_summary),
('7.1', index_block_tx_tx), # forgot to put in abe.py
('7.2', init_block_txin), # abe.py put bad data there.
('7.3', init_satoshi_seconds_destroyed),
('7.4', set_0_satoshi_seconds_destroyed),
('7.5', noop),
('7.6', drop_block_ss_columns),
('8', add_fk_block_txin_block_id),
('8.1', add_fk_block_txin_tx_id),
('8.2', add_fk_block_txin_out_block_id),
('8.3', add_chk_block_txin_out_block_id_nn),
('8.4', create_x_cc_block_id),
('9', reverse_binary_hashes),
('9.1', drop_x_cc_block_id),
('9.2', create_x_cc_block_height),
('10', create_txout_approx),
('11', add_fk_chain_candidate_block_id),
('12', create_configvar),
('12.1', configure),
('Abe13', populate_abe_sequences),
('Abe14', add_datadir_chain_id),
('Abe15', noop),
('Abe16', rescan_if_missed_blocks), # May be slow.
('Abe17', insert_missed_blocks),
('Abe17.1', repair_missed_blocks),
('Abe18', add_block_num_tx), # Seconds
('Abe18.1', add_block_ss_destroyed), # Seconds
('Abe18.2', init_block_tx_sums), # 5 minutes
('Abe18.3', replace_chain_summary), # Fast
('Abe19', config_ddl), # Fast
('Abe20', config_create_table_epilogue), # Fast
('Abe20.1', rename_abe_sequences_key), # Fast
('Abe21', create_x_txin_txout), # 25 seconds
('Abe22', save_datadir), # Fast
('Abe22.1', add_datadir_id), # Fast
('Abe22.2', drop_tmp_datadir), # Fast
('Abe23', config_clob), # Fast
('Abe24', clear_bad_addresses), # Fast
('Abe24.1', find_namecoin_addresses), # 2 minutes if you have Namecoin
('Abe25', create_abe_lock), # Fast
('Abe25.1', create_abe_lock_row), # Fast
('Abe25.2', insert_null_pubkey), # 1 second
('Abe25.3', set_netfee_pubkey_id), # Seconds
('Abe26', adjust_block_total_satoshis), # 1-3 minutes
('Abe26.1', init_block_satoshi_seconds), # 3-10 minutes
('Abe27', config_limit_style), # Fast
('Abe28', config_sequence_type), # Fast
# Should be okay back to here.
('Abe29', add_search_block_id), # Seconds
('Abe29.1', populate_search_block_id), # 1-2 minutes if using firstbits
('Abe29.2', add_fk_search_block_id), # Seconds
('Abe29.3', create_firstbits), # Fast
('Abe29.4', populate_firstbits), # Slow if config use_firstbits=true
('Abe30', add_keep_scriptsig), # Fast
('Abe31', drop_satoshi_seconds_destroyed), # Seconds
('Abe32', save_datadir), # Fast
('Abe32.1', widen_blkfile_number), # Fast
('Abe32.2', drop_tmp_datadir), # Fast
('Abe33', add_datadir_loader), # Fast
('Abe34', noop), # Fast
('Abe35', add_chain_policy), # Fast
('Abe35.1', populate_chain_policy), # Fast
('Abe35.2', add_chain_magic), # Fast
('Abe35.3', populate_chain_magic), # Fast
('Abe35.4', drop_policy), # Fast
('Abe35.5', drop_magic), # Fast
('Abe36', add_chain_decimals), # Fast
('Abe36.1', insert_chain_novacoin), # Fast
('Abe37', txin_detail_multisig), # Fast
('Abe37.1', add_chain_script_addr_vers), # Fast
('Abe37.2', populate_chain_script_addr_vers), # Fast
('Abe37.3', create_multisig_pubkey), # Fast
('Abe37.4', create_x_multisig_pubkey_multisig), # Fast
('Abe37.5', update_chain_policy), # Fast
('Abe37.6', populate_multisig_pubkey), # Minutes-hours
('Abe38', abstract_sql), # Fast
('Abe39', None)
]
def upgrade_schema(store):
if 'sql.binary_type' not in store.config:
for name in sql_arg_names:
store.config['sql.' + name] = store.config[name]
del store.config[name]
store.init_sql()
run_upgrades(store, upgrades)
sv = store.config['schema_version']
curr = upgrades[-1][0]
if sv != curr:
raise Exception('Can not upgrade from schema version %s to %s\n'
% (sv, curr))
store.log.warning("Upgrade complete.")
if __name__ == '__main__':
print "Run Abe with --upgrade added to the usual arguments."
sys.exit(2)
|
agpl-3.0
|
tumbl3w33d/ansible
|
lib/ansible/modules/cloud/google/gcp_compute_vpn_tunnel.py
|
13
|
17413
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_vpn_tunnel
description:
- VPN tunnel resource.
short_description: Creates a GCP VpnTunnel
version_added: '2.7'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
name:
description:
- Name of the resource. The name must be 1-63 characters long, and comply with
RFC1035. Specifically, the name must be 1-63 characters long and match the regular
expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must
be a lowercase letter, and all following characters must be a dash, lowercase
letter, or digit, except the last character, which cannot be a dash.
required: true
type: str
description:
description:
- An optional description of this resource.
required: false
type: str
target_vpn_gateway:
description:
- URL of the Target VPN gateway with which this VPN tunnel is associated.
- 'This field represents a link to a TargetVpnGateway resource in GCP. It can
be specified in two ways. First, you can place a dictionary with key ''selfLink''
and value of your resource''s selfLink Alternatively, you can add `register:
name-of-resource` to a gcp_compute_target_vpn_gateway task and then set this
target_vpn_gateway field to "{{ name-of-resource }}"'
required: false
type: dict
router:
description:
- URL of router resource to be used for dynamic routing.
- 'This field represents a link to a Router resource in GCP. It can be specified
in two ways. First, you can place a dictionary with key ''selfLink'' and value
of your resource''s selfLink Alternatively, you can add `register: name-of-resource`
to a gcp_compute_router task and then set this router field to "{{ name-of-resource
}}"'
required: false
type: dict
peer_ip:
description:
- IP address of the peer VPN gateway. Only IPv4 is supported.
required: false
type: str
shared_secret:
description:
- Shared secret used to set the secure session between the Cloud VPN gateway and
the peer VPN gateway.
required: true
type: str
ike_version:
description:
- IKE protocol version to use when establishing the VPN tunnel with peer VPN gateway.
- Acceptable IKE versions are 1 or 2. Default version is 2.
required: false
default: '2'
type: int
local_traffic_selector:
description:
- Local traffic selector to use when establishing the VPN tunnel with peer VPN
gateway. The value should be a CIDR formatted string, for example `192.168.0.0/16`.
The ranges should be disjoint.
- Only IPv4 is supported.
required: false
type: list
remote_traffic_selector:
description:
- Remote traffic selector to use when establishing the VPN tunnel with peer VPN
gateway. The value should be a CIDR formatted string, for example `192.168.0.0/16`.
The ranges should be disjoint.
- Only IPv4 is supported.
required: false
type: list
region:
description:
- The region where the tunnel is located.
required: true
type: str
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/vpnTunnels)'
- 'Cloud VPN Overview: U(https://cloud.google.com/vpn/docs/concepts/overview)'
- 'Networks and Tunnel Routing: U(https://cloud.google.com/vpn/docs/concepts/choosing-networks-routing)'
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: create a network
gcp_compute_network:
name: network-vpn-tunnel
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: network
- name: create a router
gcp_compute_router:
name: router-vpn-tunnel
network: "{{ network }}"
bgp:
asn: 64514
advertise_mode: CUSTOM
advertised_groups:
- ALL_SUBNETS
advertised_ip_ranges:
- range: 1.2.3.4
- range: 6.7.0.0/16
region: us-central1
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: router
- name: create a target vpn gateway
gcp_compute_target_vpn_gateway:
name: gateway-vpn-tunnel
region: us-west1
network: "{{ network }}"
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: gateway
- name: create a vpn tunnel
gcp_compute_vpn_tunnel:
name: test_object
region: us-west1
target_vpn_gateway: "{{ gateway }}"
router: "{{ router }}"
shared_secret: super secret
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
id:
description:
- The unique identifier for the resource. This identifier is defined by the server.
returned: success
type: str
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
name:
description:
- Name of the resource. The name must be 1-63 characters long, and comply with RFC1035.
Specifically, the name must be 1-63 characters long and match the regular expression
`[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase
letter, and all following characters must be a dash, lowercase letter, or digit,
except the last character, which cannot be a dash.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
targetVpnGateway:
description:
- URL of the Target VPN gateway with which this VPN tunnel is associated.
returned: success
type: dict
router:
description:
- URL of router resource to be used for dynamic routing.
returned: success
type: dict
peerIp:
description:
- IP address of the peer VPN gateway. Only IPv4 is supported.
returned: success
type: str
sharedSecret:
description:
- Shared secret used to set the secure session between the Cloud VPN gateway and
the peer VPN gateway.
returned: success
type: str
sharedSecretHash:
description:
- Hash of the shared secret.
returned: success
type: str
ikeVersion:
description:
- IKE protocol version to use when establishing the VPN tunnel with peer VPN gateway.
- Acceptable IKE versions are 1 or 2. Default version is 2.
returned: success
type: int
localTrafficSelector:
description:
- Local traffic selector to use when establishing the VPN tunnel with peer VPN gateway.
The value should be a CIDR formatted string, for example `192.168.0.0/16`. The
ranges should be disjoint.
- Only IPv4 is supported.
returned: success
type: list
remoteTrafficSelector:
description:
- Remote traffic selector to use when establishing the VPN tunnel with peer VPN
gateway. The value should be a CIDR formatted string, for example `192.168.0.0/16`.
The ranges should be disjoint.
- Only IPv4 is supported.
returned: success
type: list
region:
description:
- The region where the tunnel is located.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, type='str'),
description=dict(type='str'),
target_vpn_gateway=dict(type='dict'),
router=dict(type='dict'),
peer_ip=dict(type='str'),
shared_secret=dict(required=True, type='str'),
ike_version=dict(default=2, type='int'),
local_traffic_selector=dict(type='list', elements='str'),
remote_traffic_selector=dict(type='list', elements='str'),
region=dict(required=True, type='str'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#vpnTunnel'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind):
delete(module, self_link(module), kind)
create(module, collection(module), kind)
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#vpnTunnel',
u'name': module.params.get('name'),
u'description': module.params.get('description'),
u'targetVpnGateway': replace_resource_dict(module.params.get(u'target_vpn_gateway', {}), 'selfLink'),
u'router': replace_resource_dict(module.params.get(u'router', {}), 'selfLink'),
u'peerIp': module.params.get('peer_ip'),
u'sharedSecret': module.params.get('shared_secret'),
u'ikeVersion': module.params.get('ike_version'),
u'localTrafficSelector': module.params.get('local_traffic_selector'),
u'remoteTrafficSelector': module.params.get('remote_traffic_selector'),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/vpnTunnels/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/vpnTunnels".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'id': response.get(u'id'),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'name': response.get(u'name'),
u'description': module.params.get('description'),
u'targetVpnGateway': replace_resource_dict(module.params.get(u'target_vpn_gateway', {}), 'selfLink'),
u'router': replace_resource_dict(module.params.get(u'router', {}), 'selfLink'),
u'peerIp': response.get(u'peerIp'),
u'sharedSecret': response.get(u'sharedSecret'),
u'sharedSecretHash': response.get(u'sharedSecretHash'),
u'ikeVersion': response.get(u'ikeVersion'),
u'localTrafficSelector': response.get(u'localTrafficSelector'),
u'remoteTrafficSelector': response.get(u'remoteTrafficSelector'),
}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#vpnTunnel')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation', False)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
if __name__ == '__main__':
main()
|
gpl-3.0
|
adrianholovaty/django
|
tests/regressiontests/queries/models.py
|
4
|
9168
|
"""
Various complex queries that have been problematic in the past.
"""
import threading
from django.db import models
class DumbCategory(models.Model):
pass
class NamedCategory(DumbCategory):
name = models.CharField(max_length=10)
class Tag(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey('self', blank=True, null=True,
related_name='children')
category = models.ForeignKey(NamedCategory, null=True, default=None)
class Meta:
ordering = ['name']
def __unicode__(self):
return self.name
class Note(models.Model):
note = models.CharField(max_length=100)
misc = models.CharField(max_length=10)
class Meta:
ordering = ['note']
def __unicode__(self):
return self.note
def __init__(self, *args, **kwargs):
super(Note, self).__init__(*args, **kwargs)
# Regression for #13227 -- having an attribute that
# is unpickleable doesn't stop you from cloning queries
# that use objects of that type as an argument.
self.lock = threading.Lock()
class Annotation(models.Model):
name = models.CharField(max_length=10)
tag = models.ForeignKey(Tag)
notes = models.ManyToManyField(Note)
def __unicode__(self):
return self.name
class ExtraInfo(models.Model):
info = models.CharField(max_length=100)
note = models.ForeignKey(Note)
class Meta:
ordering = ['info']
def __unicode__(self):
return self.info
class Author(models.Model):
name = models.CharField(max_length=10)
num = models.IntegerField(unique=True)
extra = models.ForeignKey(ExtraInfo)
class Meta:
ordering = ['name']
def __unicode__(self):
return self.name
class Item(models.Model):
name = models.CharField(max_length=10)
created = models.DateTimeField()
modified = models.DateTimeField(blank=True, null=True)
tags = models.ManyToManyField(Tag, blank=True, null=True)
creator = models.ForeignKey(Author)
note = models.ForeignKey(Note)
class Meta:
ordering = ['-note', 'name']
def __unicode__(self):
return self.name
class Report(models.Model):
name = models.CharField(max_length=10)
creator = models.ForeignKey(Author, to_field='num', null=True)
def __unicode__(self):
return self.name
class Ranking(models.Model):
rank = models.IntegerField()
author = models.ForeignKey(Author)
class Meta:
# A complex ordering specification. Should stress the system a bit.
ordering = ('author__extra__note', 'author__name', 'rank')
def __unicode__(self):
return '%d: %s' % (self.rank, self.author.name)
class Cover(models.Model):
title = models.CharField(max_length=50)
item = models.ForeignKey(Item)
class Meta:
ordering = ['item']
def __unicode__(self):
return self.title
class Number(models.Model):
num = models.IntegerField()
def __unicode__(self):
return unicode(self.num)
# Symmetrical m2m field with a normal field using the reverse accesor name
# ("valid").
class Valid(models.Model):
valid = models.CharField(max_length=10)
parent = models.ManyToManyField('self')
class Meta:
ordering = ['valid']
# Some funky cross-linked models for testing a couple of infinite recursion
# cases.
class X(models.Model):
y = models.ForeignKey('Y')
class Y(models.Model):
x1 = models.ForeignKey(X, related_name='y1')
# Some models with a cycle in the default ordering. This would be bad if we
# didn't catch the infinite loop.
class LoopX(models.Model):
y = models.ForeignKey('LoopY')
class Meta:
ordering = ['y']
class LoopY(models.Model):
x = models.ForeignKey(LoopX)
class Meta:
ordering = ['x']
class LoopZ(models.Model):
z = models.ForeignKey('self')
class Meta:
ordering = ['z']
# A model and custom default manager combination.
class CustomManager(models.Manager):
def get_query_set(self):
qs = super(CustomManager, self).get_query_set()
return qs.filter(public=True, tag__name='t1')
class ManagedModel(models.Model):
data = models.CharField(max_length=10)
tag = models.ForeignKey(Tag)
public = models.BooleanField(default=True)
objects = CustomManager()
normal_manager = models.Manager()
def __unicode__(self):
return self.data
# An inter-related setup with multiple paths from Child to Detail.
class Detail(models.Model):
data = models.CharField(max_length=10)
class MemberManager(models.Manager):
def get_query_set(self):
return super(MemberManager, self).get_query_set().select_related("details")
class Member(models.Model):
name = models.CharField(max_length=10)
details = models.OneToOneField(Detail, primary_key=True)
objects = MemberManager()
class Child(models.Model):
person = models.OneToOneField(Member, primary_key=True)
parent = models.ForeignKey(Member, related_name="children")
# Custom primary keys interfered with ordering in the past.
class CustomPk(models.Model):
name = models.CharField(max_length=10, primary_key=True)
extra = models.CharField(max_length=10)
class Meta:
ordering = ['name', 'extra']
class Related(models.Model):
custom = models.ForeignKey(CustomPk)
# An inter-related setup with a model subclass that has a nullable
# path to another model, and a return path from that model.
class Celebrity(models.Model):
name = models.CharField("Name", max_length=20)
greatest_fan = models.ForeignKey("Fan", null=True, unique=True)
def __unicode__(self):
return self.name
class TvChef(Celebrity):
pass
class Fan(models.Model):
fan_of = models.ForeignKey(Celebrity)
# Multiple foreign keys
class LeafA(models.Model):
data = models.CharField(max_length=10)
def __unicode__(self):
return self.data
class LeafB(models.Model):
data = models.CharField(max_length=10)
class Join(models.Model):
a = models.ForeignKey(LeafA)
b = models.ForeignKey(LeafB)
class ReservedName(models.Model):
name = models.CharField(max_length=20)
order = models.IntegerField()
def __unicode__(self):
return self.name
# A simpler shared-foreign-key setup that can expose some problems.
class SharedConnection(models.Model):
data = models.CharField(max_length=10)
class PointerA(models.Model):
connection = models.ForeignKey(SharedConnection)
class PointerB(models.Model):
connection = models.ForeignKey(SharedConnection)
# Multi-layer ordering
class SingleObject(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ['name']
def __unicode__(self):
return self.name
class RelatedObject(models.Model):
single = models.ForeignKey(SingleObject)
class Meta:
ordering = ['single']
class Plaything(models.Model):
name = models.CharField(max_length=10)
others = models.ForeignKey(RelatedObject, null=True)
class Meta:
ordering = ['others']
def __unicode__(self):
return self.name
class Article(models.Model):
name = models.CharField(max_length=20)
created = models.DateTimeField()
class Food(models.Model):
name = models.CharField(max_length=20, unique=True)
def __unicode__(self):
return self.name
class Eaten(models.Model):
food = models.ForeignKey(Food, to_field="name")
meal = models.CharField(max_length=20)
def __unicode__(self):
return u"%s at %s" % (self.food, self.meal)
class Node(models.Model):
num = models.IntegerField(unique=True)
parent = models.ForeignKey("self", to_field="num", null=True)
def __unicode__(self):
return u"%s" % self.num
# Bug #12252
class ObjectA(models.Model):
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
class ObjectB(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA)
num = models.PositiveSmallIntegerField()
def __unicode__(self):
return self.name
class ObjectC(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA)
objectb = models.ForeignKey(ObjectB)
def __unicode__(self):
return self.name
class SimpleCategory(models.Model):
name = models.CharField(max_length=15)
def __unicode__(self):
return self.name
class SpecialCategory(SimpleCategory):
special_name = models.CharField(max_length=15)
def __unicode__(self):
return self.name + " " + self.special_name
class CategoryItem(models.Model):
category = models.ForeignKey(SimpleCategory)
def __unicode__(self):
return "category item: " + str(self.category)
class OneToOneCategory(models.Model):
new_name = models.CharField(max_length=15)
category = models.OneToOneField(SimpleCategory)
def __unicode__(self):
return "one2one " + self.new_name
class NullableName(models.Model):
name = models.CharField(max_length=20, null=True)
class Meta:
ordering = ['id']
|
bsd-3-clause
|
savoirfairelinux/django
|
django/contrib/gis/measure.py
|
81
|
11948
|
# Copyright (c) 2007, Robert Coup <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Distance nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
Distance and Area objects to allow for sensible and convenient calculation
and conversions.
Authors: Robert Coup, Justin Bronn, Riccardo Di Virgilio
Inspired by GeoPy (https://github.com/geopy/geopy)
and Geoff Biggs' PhD work on dimensioned units for robotics.
"""
from decimal import Decimal
from functools import total_ordering
__all__ = ['A', 'Area', 'D', 'Distance']
NUMERIC_TYPES = (int, float, Decimal)
AREA_PREFIX = "sq_"
def pretty_name(obj):
return obj.__name__ if obj.__class__ == type else obj.__class__.__name__
@total_ordering
class MeasureBase:
STANDARD_UNIT = None
ALIAS = {}
UNITS = {}
LALIAS = {}
def __init__(self, default_unit=None, **kwargs):
value, self._default_unit = self.default_units(kwargs)
setattr(self, self.STANDARD_UNIT, value)
if default_unit and isinstance(default_unit, str):
self._default_unit = default_unit
def _get_standard(self):
return getattr(self, self.STANDARD_UNIT)
def _set_standard(self, value):
setattr(self, self.STANDARD_UNIT, value)
standard = property(_get_standard, _set_standard)
def __getattr__(self, name):
if name in self.UNITS:
return self.standard / self.UNITS[name]
else:
raise AttributeError('Unknown unit type: %s' % name)
def __repr__(self):
return '%s(%s=%s)' % (pretty_name(self), self._default_unit, getattr(self, self._default_unit))
def __str__(self):
return '%s %s' % (getattr(self, self._default_unit), self._default_unit)
# **** Comparison methods ****
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.standard == other.standard
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, self.__class__):
return self.standard < other.standard
else:
return NotImplemented
# **** Operators methods ****
def __add__(self, other):
if isinstance(other, self.__class__):
return self.__class__(
default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard + other.standard)}
)
else:
raise TypeError('%(class)s must be added with %(class)s' % {"class": pretty_name(self)})
def __iadd__(self, other):
if isinstance(other, self.__class__):
self.standard += other.standard
return self
else:
raise TypeError('%(class)s must be added with %(class)s' % {"class": pretty_name(self)})
def __sub__(self, other):
if isinstance(other, self.__class__):
return self.__class__(
default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard - other.standard)}
)
else:
raise TypeError('%(class)s must be subtracted from %(class)s' % {"class": pretty_name(self)})
def __isub__(self, other):
if isinstance(other, self.__class__):
self.standard -= other.standard
return self
else:
raise TypeError('%(class)s must be subtracted from %(class)s' % {"class": pretty_name(self)})
def __mul__(self, other):
if isinstance(other, NUMERIC_TYPES):
return self.__class__(
default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard * other)}
)
else:
raise TypeError('%(class)s must be multiplied with number' % {"class": pretty_name(self)})
def __imul__(self, other):
if isinstance(other, NUMERIC_TYPES):
self.standard *= float(other)
return self
else:
raise TypeError('%(class)s must be multiplied with number' % {"class": pretty_name(self)})
def __rmul__(self, other):
return self * other
def __truediv__(self, other):
if isinstance(other, self.__class__):
return self.standard / other.standard
if isinstance(other, NUMERIC_TYPES):
return self.__class__(
default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard / other)}
)
else:
raise TypeError('%(class)s must be divided with number or %(class)s' % {"class": pretty_name(self)})
def __itruediv__(self, other):
if isinstance(other, NUMERIC_TYPES):
self.standard /= float(other)
return self
else:
raise TypeError('%(class)s must be divided with number' % {"class": pretty_name(self)})
def __bool__(self):
return bool(self.standard)
def default_units(self, kwargs):
"""
Return the unit value and the default units specified
from the given keyword arguments dictionary.
"""
val = 0.0
default_unit = self.STANDARD_UNIT
for unit, value in kwargs.items():
if not isinstance(value, float):
value = float(value)
if unit in self.UNITS:
val += self.UNITS[unit] * value
default_unit = unit
elif unit in self.ALIAS:
u = self.ALIAS[unit]
val += self.UNITS[u] * value
default_unit = u
else:
lower = unit.lower()
if lower in self.UNITS:
val += self.UNITS[lower] * value
default_unit = lower
elif lower in self.LALIAS:
u = self.LALIAS[lower]
val += self.UNITS[u] * value
default_unit = u
else:
raise AttributeError('Unknown unit type: %s' % unit)
return val, default_unit
@classmethod
def unit_attname(cls, unit_str):
"""
Retrieve the unit attribute name for the given unit string.
For example, if the given unit string is 'metre', return 'm'.
Raise an exception if an attribute cannot be found.
"""
lower = unit_str.lower()
if unit_str in cls.UNITS:
return unit_str
elif lower in cls.UNITS:
return lower
elif lower in cls.LALIAS:
return cls.LALIAS[lower]
else:
raise Exception('Could not find a unit keyword associated with "%s"' % unit_str)
class Distance(MeasureBase):
STANDARD_UNIT = "m"
UNITS = {
'chain': 20.1168,
'chain_benoit': 20.116782,
'chain_sears': 20.1167645,
'british_chain_benoit': 20.1167824944,
'british_chain_sears': 20.1167651216,
'british_chain_sears_truncated': 20.116756,
'cm': 0.01,
'british_ft': 0.304799471539,
'british_yd': 0.914398414616,
'clarke_ft': 0.3047972654,
'clarke_link': 0.201166195164,
'fathom': 1.8288,
'ft': 0.3048,
'german_m': 1.0000135965,
'gold_coast_ft': 0.304799710181508,
'indian_yd': 0.914398530744,
'inch': 0.0254,
'km': 1000.0,
'link': 0.201168,
'link_benoit': 0.20116782,
'link_sears': 0.20116765,
'm': 1.0,
'mi': 1609.344,
'mm': 0.001,
'nm': 1852.0,
'nm_uk': 1853.184,
'rod': 5.0292,
'sears_yd': 0.91439841,
'survey_ft': 0.304800609601,
'um': 0.000001,
'yd': 0.9144,
}
# Unit aliases for `UNIT` terms encountered in Spatial Reference WKT.
ALIAS = {
'centimeter': 'cm',
'foot': 'ft',
'inches': 'inch',
'kilometer': 'km',
'kilometre': 'km',
'meter': 'm',
'metre': 'm',
'micrometer': 'um',
'micrometre': 'um',
'millimeter': 'mm',
'millimetre': 'mm',
'mile': 'mi',
'yard': 'yd',
'British chain (Benoit 1895 B)': 'british_chain_benoit',
'British chain (Sears 1922)': 'british_chain_sears',
'British chain (Sears 1922 truncated)': 'british_chain_sears_truncated',
'British foot (Sears 1922)': 'british_ft',
'British foot': 'british_ft',
'British yard (Sears 1922)': 'british_yd',
'British yard': 'british_yd',
"Clarke's Foot": 'clarke_ft',
"Clarke's link": 'clarke_link',
'Chain (Benoit)': 'chain_benoit',
'Chain (Sears)': 'chain_sears',
'Foot (International)': 'ft',
'German legal metre': 'german_m',
'Gold Coast foot': 'gold_coast_ft',
'Indian yard': 'indian_yd',
'Link (Benoit)': 'link_benoit',
'Link (Sears)': 'link_sears',
'Nautical Mile': 'nm',
'Nautical Mile (UK)': 'nm_uk',
'US survey foot': 'survey_ft',
'U.S. Foot': 'survey_ft',
'Yard (Indian)': 'indian_yd',
'Yard (Sears)': 'sears_yd'
}
LALIAS = {k.lower(): v for k, v in ALIAS.items()}
def __mul__(self, other):
if isinstance(other, self.__class__):
return Area(
default_unit=AREA_PREFIX + self._default_unit,
**{AREA_PREFIX + self.STANDARD_UNIT: (self.standard * other.standard)}
)
elif isinstance(other, NUMERIC_TYPES):
return self.__class__(
default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard * other)}
)
else:
raise TypeError('%(distance)s must be multiplied with number or %(distance)s' % {
"distance": pretty_name(self.__class__),
})
class Area(MeasureBase):
STANDARD_UNIT = AREA_PREFIX + Distance.STANDARD_UNIT
# Getting the square units values and the alias dictionary.
UNITS = {'%s%s' % (AREA_PREFIX, k): v ** 2 for k, v in Distance.UNITS.items()}
ALIAS = {k: '%s%s' % (AREA_PREFIX, v) for k, v in Distance.ALIAS.items()}
LALIAS = {k.lower(): v for k, v in ALIAS.items()}
def __truediv__(self, other):
if isinstance(other, NUMERIC_TYPES):
return self.__class__(
default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard / other)}
)
else:
raise TypeError('%(class)s must be divided by a number' % {"class": pretty_name(self)})
# Shortcuts
D = Distance
A = Area
|
bsd-3-clause
|
VisionistInc/advent-of-code-2016
|
joshgordon/01/parse_1.py
|
1
|
2606
|
from operator import add, sub
import sys
#image stuff
from PIL import Image, ImageDraw
#Read in the directions
with open(sys.argv[1]) as f:
directions = f.read()[:-1]
directions.strip()
directions = directions.split(', ')
compass = 0
# Compass directions:
# 0 : north
# 1 : east
# 2 : south
# 3 : west
# turning RIGHT increments the compass, turning LEFT decrements.
compass_names = ["NORTH", "EAST", "SOUTH", "WEST"]
# starting at 0,0
coord = [0, 0]
# This gets multplied by the distance and then added as a whole to the
# coord variable above. It defines the directions that each movement
# should go.
movements = [
# NORTH: add 1 to Y
[0, 1],
# EAST: add 1 to X
[1, 0],
#SOUTH: add -1 to Y
[0, -1],
# WEST: add -1 to X
[-1, 0]
]
visited_coords = []
crosses = []
# Keep track of this so we can draw a pretty picture.
stop_coords = []
max_coord = [0, 0]
min_coord = [0, 0]
# Go through each step and figure out location
for direction in directions:
if direction[0] == "R":
compass += 1
else:
compass -= 1
compass %= 4
distance = int(direction[1:])
#Update the coordinate.
for i in range(distance):
coord = map(add, coord, movements[compass])
if coord in visited_coords:
crosses.append(coord)
visited_coords.append(coord)
# original from part 1
# coord = map(add, coord, [x * distance for x in movements[compass]])
# update info for images:
max_coord = map(max, coord, max_coord)
min_coord = map(min, coord, min_coord)
stop_coords.append(coord)
print "facing %s, traveling %d to %s" % (compass_names[compass], distance, coord)
print "Need to travel to %s, for a total of %d blocks" % (coord, abs(coord[0]) + abs(coord[1]))
if len(crosses) > 0:
print "Crossed %d time(s), the first one at %s for a distance of %d" % (len(crosses), crosses[0], abs(crosses[0][0]) + abs(crosses[0][1]))
### Generate a nice map
size = map(sub, max_coord, min_coord)
# round the sizes up to an even number for ffmpeg happiness
size = map(lambda x : x + x % 2, size)
stop_coords = [map(sub, coord, min_coord) for coord in stop_coords]
visited_coords = [map(sub, coord, min_coord) for coord in visited_coords]
image = Image.new('RGBA', size, "black")
draw = ImageDraw.Draw(image)
# for i in range(len(stop_coords)-1):
# draw.line(stop_coords[i] + stop_coords[i + 1])
# image.save("frames/%04d.png" % (i,), "PNG")
for i in range(len(visited_coords) - 1):
draw.line(visited_coords[i] + visited_coords[i + 1])
image.save("frames/%04d.png" % (i,), "PNG")
|
mit
|
shakamunyi/tensorflow
|
tensorflow/contrib/batching/__init__.py
|
85
|
1029
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops and modules related to batch.
@@batch_function
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.batching.python.ops.batch_ops import batch_function
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
apache-2.0
|
auerj/flask-oauthlib
|
flask_oauthlib/contrib/apps.py
|
6
|
7551
|
"""
flask_oauthlib.contrib.apps
~~~~~~~~~~~~~~~~~~~~~~~~~~~
The bundle of remote app factories for famous third platforms.
Usage::
from flask import Flask
from flask_oauthlib.client import OAuth
from flask_oauthlib.contrib.apps import github
app = Flask(__name__)
oauth = OAuth(app)
github.register_to(oauth, scope=['user:email'])
github.register_to(oauth, name='github2')
Of course, it requires consumer keys in your config::
GITHUB_CONSUMER_KEY = ''
GITHUB_CONSUMER_SECRET = ''
GITHUB2_CONSUMER_KEY = ''
GITHUB2_CONSUMER_SECRET = ''
Some apps with OAuth 1.0a such as Twitter could not accept the ``scope``
argument.
Contributed by: tonyseek
"""
import copy
from oauthlib.common import unicode_type, bytes_type
__all__ = ['douban', 'dropbox', 'facebook', 'github', 'google', 'linkedin',
'twitter', 'weibo']
class RemoteAppFactory(object):
"""The factory to create remote app and bind it to given extension.
:param default_name: the default name which be used for registering.
:param kwargs: the pre-defined kwargs.
:param docstring: the docstring of factory.
"""
def __init__(self, default_name, kwargs, docstring=''):
assert 'name' not in kwargs
assert 'register' not in kwargs
self.default_name = default_name
self.kwargs = kwargs
self._kwargs_processor = None
self.__doc__ = docstring.lstrip()
def register_to(self, oauth, name=None, **kwargs):
"""Creates a remote app and registers it."""
kwargs = self._process_kwargs(
name=(name or self.default_name), **kwargs)
return oauth.remote_app(**kwargs)
def create(self, oauth, **kwargs):
"""Creates a remote app only."""
kwargs = self._process_kwargs(
name=self.default_name, register=False, **kwargs)
return oauth.remote_app(**kwargs)
def kwargs_processor(self, fn):
"""Sets a function to process kwargs before creating any app."""
self._kwargs_processor = fn
return fn
def _process_kwargs(self, **kwargs):
final_kwargs = copy.deepcopy(self.kwargs)
# merges with pre-defined kwargs
final_kwargs.update(copy.deepcopy(kwargs))
# use name as app key
final_kwargs.setdefault('app_key', final_kwargs['name'].upper())
# processes by pre-defined function
if self._kwargs_processor is not None:
final_kwargs = self._kwargs_processor(**final_kwargs)
return final_kwargs
def make_scope_processor(default_scope):
def processor(**kwargs):
# request_token_params
scope = kwargs.pop('scope', [default_scope]) # default scope
if not isinstance(scope, (unicode_type, bytes_type)):
scope = ','.join(scope) # allows list-style scope
request_token_params = kwargs.setdefault('request_token_params', {})
request_token_params.setdefault('scope', scope) # doesn't override
return kwargs
return processor
douban = RemoteAppFactory('douban', {
'base_url': 'https://api.douban.com/v2/',
'request_token_url': None,
'access_token_url': 'https://www.douban.com/service/auth2/token',
'authorize_url': 'https://www.douban.com/service/auth2/auth',
'access_token_method': 'POST',
}, """
The OAuth app for douban.com API.
:param scope: optional. default: ``['douban_basic_common']``.
see also: http://developers.douban.com/wiki/?title=oauth2
""")
douban.kwargs_processor(make_scope_processor('douban_basic_common'))
dropbox = RemoteAppFactory('dropbox', {
'base_url': 'https://www.dropbox.com/1/',
'request_token_url': None,
'access_token_url': 'https://api.dropbox.com/1/oauth2/token',
'authorize_url': 'https://www.dropbox.com/1/oauth2/authorize',
'access_token_method': 'POST',
'request_token_params': {},
}, """The OAuth app for Dropbox API.""")
facebook = RemoteAppFactory('facebook', {
'request_token_params': {'scope': 'email'},
'base_url': 'https://graph.facebook.com',
'request_token_url': None,
'access_token_url': '/oauth/access_token',
'authorize_url': 'https://www.facebook.com/dialog/oauth',
}, """
The OAuth app for Facebook API.
:param scope: optional. default: ``['email']``.
""")
facebook.kwargs_processor(make_scope_processor('email'))
github = RemoteAppFactory('github', {
'base_url': 'https://api.github.com/',
'request_token_url': None,
'access_token_method': 'POST',
'access_token_url': 'https://github.com/login/oauth/access_token',
'authorize_url': 'https://github.com/login/oauth/authorize',
}, """
The OAuth app for GitHub API.
:param scope: optional. default: ``['user:email']``.
""")
github.kwargs_processor(make_scope_processor('user:email'))
google = RemoteAppFactory('google', {
'base_url': 'https://www.googleapis.com/oauth2/v1/',
'request_token_url': None,
'access_token_method': 'POST',
'access_token_url': 'https://accounts.google.com/o/oauth2/token',
'authorize_url': 'https://accounts.google.com/o/oauth2/auth',
}, """
The OAuth app for Google API.
:param scope: optional.
default: ``['email']``.
""")
google.kwargs_processor(make_scope_processor(
'email'))
twitter = RemoteAppFactory('twitter', {
'base_url': 'https://api.twitter.com/1.1/',
'request_token_url': 'https://api.twitter.com/oauth/request_token',
'access_token_url': 'https://api.twitter.com/oauth/access_token',
'authorize_url': 'https://api.twitter.com/oauth/authenticate',
}, """The OAuth app for Twitter API.""")
weibo = RemoteAppFactory('weibo', {
'base_url': 'https://api.weibo.com/2/',
'authorize_url': 'https://api.weibo.com/oauth2/authorize',
'request_token_url': None,
'access_token_method': 'POST',
'access_token_url': 'https://api.weibo.com/oauth2/access_token',
# since weibo's response is a shit, we need to force parse the content
'content_type': 'application/json',
}, """
The OAuth app for weibo.com API.
:param scope: optional. default: ``['email']``
""")
weibo.kwargs_processor(make_scope_processor('email'))
def change_weibo_header(uri, headers, body):
"""Since weibo is a rubbish server, it does not follow the standard,
we need to change the authorization header for it."""
auth = headers.get('Authorization')
if auth:
auth = auth.replace('Bearer', 'OAuth2')
headers['Authorization'] = auth
return uri, headers, body
weibo.pre_request = change_weibo_header
linkedin = RemoteAppFactory('linkedin', {
'request_token_params': {'state': 'RandomString'},
'base_url': 'https://api.linkedin.com/v1/',
'request_token_url': None,
'access_token_method': 'POST',
'access_token_url': 'https://www.linkedin.com/uas/oauth2/accessToken',
'authorize_url': 'https://www.linkedin.com/uas/oauth2/authorization',
}, """
The OAuth app for LinkedIn API.
:param scope: optional. default: ``['r_basicprofile']``
""")
linkedin.kwargs_processor(make_scope_processor('r_basicprofile'))
def change_linkedin_query(uri, headers, body):
auth = headers.pop('Authorization')
headers['x-li-format'] = 'json'
if auth:
auth = auth.replace('Bearer', '').strip()
if '?' in uri:
uri += '&oauth2_access_token=' + auth
else:
uri += '?oauth2_access_token=' + auth
return uri, headers, body
linkedin.pre_request = change_linkedin_query
|
bsd-3-clause
|
walke469/spartahack-17
|
ballotbuilder/lib/python3.5/site-packages/requests/packages/urllib3/util/request.py
|
780
|
2128
|
from __future__ import absolute_import
from base64 import b64encode
from ..packages.six import b
ACCEPT_ENCODING = 'gzip,deflate'
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None, disable_cache=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(b(proxy_basic_auth)).decode('utf-8')
if disable_cache:
headers['cache-control'] = 'no-cache'
return headers
|
bsd-2-clause
|
15Dkatz/pants
|
tests/python/pants_test/backend/graph_info/tasks/test_cloc.py
|
16
|
2934
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.graph_info.tasks.cloc import CountLinesOfCode
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.base.build_environment import get_buildroot
from pants_test.tasks.task_test_base import ConsoleTaskTestBase
class ClocTest(ConsoleTaskTestBase):
@classmethod
def task_type(cls):
return CountLinesOfCode
def test_counts(self):
dep_py_tgt = self.make_target('src/py/dep', PythonLibrary, sources=['dep.py'])
py_tgt = self.make_target('src/py/foo', PythonLibrary, dependencies=[dep_py_tgt],
sources=['foo.py', 'bar.py'])
java_tgt = self.make_target('src/java/foo', JavaLibrary, sources=['Foo.java'])
self.create_file('src/py/foo/foo.py', '# A comment.\n\nprint("some code")\n# Another comment.')
self.create_file('src/py/foo/bar.py', '# A comment.\n\nprint("some more code")')
self.create_file('src/py/dep/dep.py', 'print("a dependency")')
self.create_file('src/java/foo/Foo.java', '// A comment. \n class Foo(){}\n')
self.create_file('src/java/foo/Bar.java', '// We do not expect this file to appear in counts.')
def assert_counts(res, lang, files, blank, comment, code):
for line in res:
fields = line.split()
if len(fields) >= 5:
if fields[0] == lang:
self.assertEquals(files, int(fields[1]))
self.assertEquals(blank, int(fields[2]))
self.assertEquals(comment, int(fields[3]))
self.assertEquals(code, int(fields[4]))
return
self.fail('Found no output line for {}'.format(lang))
res = self.execute_console_task(targets=[py_tgt, java_tgt], options={'transitive': True})
assert_counts(res, 'Python', files=3, blank=2, comment=3, code=3)
assert_counts(res, 'Java', files=1, blank=0, comment=1, code=1)
res = self.execute_console_task(targets=[py_tgt, java_tgt], options={'transitive': False})
assert_counts(res, 'Python', files=2, blank=2, comment=3, code=2)
assert_counts(res, 'Java', files=1, blank=0, comment=1, code=1)
def test_ignored(self):
py_tgt = self.make_target('src/py/foo', PythonLibrary, sources=['foo.py', 'empty.py'])
self.create_file('src/py/foo/foo.py', 'print("some code")')
self.create_file('src/py/foo/empty.py', '')
res = self.execute_console_task(targets=[py_tgt], options={'ignored': True})
self.assertEquals(['Ignored the following files:',
'{}/src/py/foo/empty.py: zero sized file'.format(get_buildroot())],
filter(None, res)[-2:])
|
apache-2.0
|
clayz/crazy-quiz-web
|
lib/wtforms/fields/simple.py
|
38
|
1834
|
import warnings
from .. import widgets
from .core import StringField, BooleanField
__all__ = (
'BooleanField', 'TextAreaField', 'PasswordField', 'FileField',
'HiddenField', 'SubmitField', 'TextField'
)
class TextField(StringField):
"""
Legacy alias for StringField
.. deprecated:: 2.0
"""
def __init__(self, *args, **kw):
super(TextField, self).__init__(*args, **kw)
warnings.warn(
'The TextField alias for StringField has been deprecated and will be removed in WTForms 3.0',
DeprecationWarning
)
class TextAreaField(StringField):
"""
This field represents an HTML ``<textarea>`` and can be used to take
multi-line input.
"""
widget = widgets.TextArea()
class PasswordField(StringField):
"""
A StringField, except renders an ``<input type="password">``.
Also, whatever value is accepted by this field is not rendered back
to the browser like normal fields.
"""
widget = widgets.PasswordInput()
class FileField(StringField):
"""
Can render a file-upload field. Will take any passed filename value, if
any is sent by the browser in the post params. This field will NOT
actually handle the file upload portion, as wtforms does not deal with
individual frameworks' file handling capabilities.
"""
widget = widgets.FileInput()
class HiddenField(StringField):
"""
HiddenField is a convenience for a StringField with a HiddenInput widget.
It will render as an ``<input type="hidden">`` but otherwise coerce to a string.
"""
widget = widgets.HiddenInput()
class SubmitField(BooleanField):
"""
Represents an ``<input type="submit">``. This allows checking if a given
submit button has been pressed.
"""
widget = widgets.SubmitInput()
|
apache-2.0
|
ajaali/django
|
tests/gis_tests/geoadmin/tests.py
|
304
|
3157
|
from __future__ import unicode_literals
from django.contrib.gis import admin
from django.contrib.gis.geos import Point
from django.test import TestCase, override_settings, skipUnlessDBFeature
from .admin import UnmodifiableAdmin
from .models import City, site
@skipUnlessDBFeature("gis_enabled")
@override_settings(ROOT_URLCONF='django.contrib.gis.tests.geoadmin.urls')
class GeoAdminTest(TestCase):
def test_ensure_geographic_media(self):
geoadmin = site._registry[City]
admin_js = geoadmin.media.render_js()
self.assertTrue(any(geoadmin.openlayers_url in js for js in admin_js))
def test_olmap_OSM_rendering(self):
delete_all_btn = """<a href="javascript:geodjango_point.clearFeatures()">Delete all Features</a>"""
original_geoadmin = site._registry[City]
params = original_geoadmin.get_map_widget(City._meta.get_field('point')).params
result = original_geoadmin.get_map_widget(City._meta.get_field('point'))(
).render('point', Point(-79.460734, 40.18476), params)
self.assertIn(
"""geodjango_point.layers.base = new OpenLayers.Layer.OSM("OpenStreetMap (Mapnik)");""",
result)
self.assertIn(delete_all_btn, result)
site.unregister(City)
site.register(City, UnmodifiableAdmin)
try:
geoadmin = site._registry[City]
params = geoadmin.get_map_widget(City._meta.get_field('point')).params
result = geoadmin.get_map_widget(City._meta.get_field('point'))(
).render('point', Point(-79.460734, 40.18476), params)
self.assertNotIn(delete_all_btn, result)
finally:
site.unregister(City)
site.register(City, original_geoadmin.__class__)
def test_olmap_WMS_rendering(self):
geoadmin = admin.GeoModelAdmin(City, site)
result = geoadmin.get_map_widget(City._meta.get_field('point'))(
).render('point', Point(-79.460734, 40.18476))
self.assertIn(
"""geodjango_point.layers.base = new OpenLayers.Layer.WMS("OpenLayers WMS", """
""""http://vmap0.tiles.osgeo.org/wms/vmap0", {layers: 'basic', format: 'image/jpeg'});""",
result)
def test_olwidget_has_changed(self):
"""
Check that changes are accurately noticed by OpenLayersWidget.
"""
geoadmin = site._registry[City]
form = geoadmin.get_changelist_form(None)()
has_changed = form.fields['point'].has_changed
initial = Point(13.4197458572965953, 52.5194108501149799, srid=4326)
data_same = "SRID=3857;POINT(1493879.2754093995 6894592.019687599)"
data_almost_same = "SRID=3857;POINT(1493879.2754093990 6894592.019687590)"
data_changed = "SRID=3857;POINT(1493884.0527237 6894593.8111804)"
self.assertTrue(has_changed(None, data_changed))
self.assertTrue(has_changed(initial, ""))
self.assertFalse(has_changed(None, ""))
self.assertFalse(has_changed(initial, data_same))
self.assertFalse(has_changed(initial, data_almost_same))
self.assertTrue(has_changed(initial, data_changed))
|
bsd-3-clause
|
drglove/SickRage
|
SickBeard.py
|
2
|
21428
|
#!/usr/bin/env python2
# Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
# Check needed software dependencies to nudge users to fix their setup
from __future__ import with_statement
import codecs
codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)
import time
import signal
import sys
import subprocess
import traceback
import shutil
import lib.shutil_custom
shutil.copyfile = lib.shutil_custom.copyfile_custom
if sys.version_info < (2, 6):
print "Sorry, requires Python 2.6 or 2.7."
sys.exit(1)
try:
import Cheetah
if Cheetah.Version[0] != '2':
raise ValueError
except ValueError:
print "Sorry, requires Python module Cheetah 2.1.0 or newer."
sys.exit(1)
except:
print "The Python module Cheetah is required"
sys.exit(1)
import os
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), 'lib')))
# We only need this for compiling an EXE and I will just always do that on 2.6+
if sys.hexversion >= 0x020600F0:
from multiprocessing import freeze_support # @UnresolvedImport
if sys.version_info >= (2, 7, 9):
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
import locale
import datetime
import threading
import getopt
import sickbeard
from sickbeard import db, logger, network_timezones, failed_history, name_cache
from sickbeard.tv import TVShow
from sickbeard.webserveInit import SRWebServer
from sickbeard.databases.mainDB import MIN_DB_VERSION, MAX_DB_VERSION
from sickbeard.event_queue import Events
from lib.configobj import ConfigObj
throwaway = datetime.datetime.strptime('20110101', '%Y%m%d')
signal.signal(signal.SIGINT, sickbeard.sig_handler)
signal.signal(signal.SIGTERM, sickbeard.sig_handler)
class SickRage(object):
def __init__(self):
# system event callback for shutdown/restart
sickbeard.events = Events(self.shutdown)
# daemon constants
self.runAsDaemon = False
self.CREATEPID = False
self.PIDFILE = ''
# webserver constants
self.webserver = None
self.forceUpdate = False
self.forcedPort = None
self.noLaunch = False
def help_message(self):
"""
print help message for commandline options
"""
help_msg = "\n"
help_msg += "Usage: " + sickbeard.MY_FULLNAME + " <option> <another option>\n"
help_msg += "\n"
help_msg += "Options:\n"
help_msg += "\n"
help_msg += " -h --help Prints this message\n"
help_msg += " -f --forceupdate Force update all shows in the DB (from tvdb) on startup\n"
help_msg += " -q --quiet Disables logging to console\n"
help_msg += " --nolaunch Suppress launching web browser on startup\n"
if sys.platform == 'win32' or sys.platform == 'darwin':
help_msg += " -d --daemon Running as real daemon is not supported on Windows\n"
help_msg += " On Windows and MAC, --daemon is substituted with: --quiet --nolaunch\n"
else:
help_msg += " -d --daemon Run as double forked daemon (includes options --quiet --nolaunch)\n"
help_msg += " --pidfile=<path> Combined with --daemon creates a pidfile (full path including filename)\n"
help_msg += " -p <port> --port=<port> Override default/configured port to listen on\n"
help_msg += " --datadir=<path> Override folder (full path) as location for\n"
help_msg += " storing database, configfile, cache, logfiles \n"
help_msg += " Default: " + sickbeard.PROG_DIR + "\n"
help_msg += " --config=<path> Override config filename (full path including filename)\n"
help_msg += " to load configuration from \n"
help_msg += " Default: config.ini in " + sickbeard.PROG_DIR + " or --datadir location\n"
help_msg += " --noresize Prevent resizing of the banner/posters even if PIL is installed\n"
return help_msg
def start(self):
# do some preliminary stuff
sickbeard.MY_FULLNAME = os.path.normpath(os.path.abspath(__file__))
sickbeard.MY_NAME = os.path.basename(sickbeard.MY_FULLNAME)
sickbeard.PROG_DIR = os.path.dirname(sickbeard.MY_FULLNAME)
sickbeard.DATA_DIR = sickbeard.PROG_DIR
sickbeard.MY_ARGS = sys.argv[1:]
sickbeard.SYS_ENCODING = None
try:
locale.setlocale(locale.LC_ALL, "")
sickbeard.SYS_ENCODING = locale.getpreferredencoding()
except (locale.Error, IOError):
pass
# For OSes that are poorly configured I'll just randomly force UTF-8
if not sickbeard.SYS_ENCODING or sickbeard.SYS_ENCODING in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'):
sickbeard.SYS_ENCODING = 'UTF-8'
if not hasattr(sys, "setdefaultencoding"):
reload(sys)
if sys.platform == 'win32':
if sys.getwindowsversion()[0] >= 6 and sys.stdout.encoding == 'cp65001':
sickbeard.SYS_ENCODING = 'UTF-8'
try:
# pylint: disable=E1101
# On non-unicode builds this will raise an AttributeError, if encoding type is not valid it throws a LookupError
sys.setdefaultencoding(sickbeard.SYS_ENCODING)
except:
sys.exit("Sorry, you MUST add the SickRage folder to the PYTHONPATH environment variable\n" +
"or find another way to force Python to use " + sickbeard.SYS_ENCODING + " for string encoding.")
# Need console logging for SickBeard.py and SickBeard-console.exe
self.consoleLogging = (not hasattr(sys, "frozen")) or (sickbeard.MY_NAME.lower().find('-console') > 0)
# Rename the main thread
threading.currentThread().name = "MAIN"
try:
opts, args = getopt.getopt(sys.argv[1:], "hfqdp::",
['help', 'forceupdate', 'quiet', 'nolaunch', 'daemon', 'pidfile=', 'port=',
'datadir=', 'config=', 'noresize']) # @UnusedVariable
except getopt.GetoptError:
sys.exit(self.help_message())
for o, a in opts:
# Prints help message
if o in ('-h', '--help'):
sys.exit(self.help_message())
# For now we'll just silence the logging
if o in ('-q', '--quiet'):
self.consoleLogging = False
# Should we update (from indexer) all shows in the DB right away?
if o in ('-f', '--forceupdate'):
self.forceUpdate = True
# Suppress launching web browser
# Needed for OSes without default browser assigned
# Prevent duplicate browser window when restarting in the app
if o in ('--nolaunch',):
self.noLaunch = True
# Override default/configured port
if o in ('-p', '--port'):
try:
self.forcedPort = int(a)
except ValueError:
sys.exit("Port: " + str(a) + " is not a number. Exiting.")
# Run as a double forked daemon
if o in ('-d', '--daemon'):
self.runAsDaemon = True
# When running as daemon disable consoleLogging and don't start browser
self.consoleLogging = False
self.noLaunch = True
if sys.platform == 'win32' or sys.platform == 'darwin':
self.runAsDaemon = False
# Write a pidfile if requested
if o in ('--pidfile',):
self.CREATEPID = True
self.PIDFILE = str(a)
# If the pidfile already exists, sickbeard may still be running, so exit
if os.path.exists(self.PIDFILE):
sys.exit("PID file: " + self.PIDFILE + " already exists. Exiting.")
# Specify folder to load the config file from
if o in ('--config',):
sickbeard.CONFIG_FILE = os.path.abspath(a)
# Specify folder to use as the data dir
if o in ('--datadir',):
sickbeard.DATA_DIR = os.path.abspath(a)
# Prevent resizing of the banner/posters even if PIL is installed
if o in ('--noresize',):
sickbeard.NO_RESIZE = True
# The pidfile is only useful in daemon mode, make sure we can write the file properly
if self.CREATEPID:
if self.runAsDaemon:
pid_dir = os.path.dirname(self.PIDFILE)
if not os.access(pid_dir, os.F_OK):
sys.exit("PID dir: " + pid_dir + " doesn't exist. Exiting.")
if not os.access(pid_dir, os.W_OK):
sys.exit("PID dir: " + pid_dir + " must be writable (write permissions). Exiting.")
else:
if self.consoleLogging:
sys.stdout.write("Not running in daemon mode. PID file creation disabled.\n")
self.CREATEPID = False
# If they don't specify a config file then put it in the data dir
if not sickbeard.CONFIG_FILE:
sickbeard.CONFIG_FILE = os.path.join(sickbeard.DATA_DIR, "config.ini")
# Make sure that we can create the data dir
if not os.access(sickbeard.DATA_DIR, os.F_OK):
try:
os.makedirs(sickbeard.DATA_DIR, 0744)
except os.error, e:
raise SystemExit("Unable to create datadir '" + sickbeard.DATA_DIR + "'")
# Make sure we can write to the data dir
if not os.access(sickbeard.DATA_DIR, os.W_OK):
raise SystemExit("Datadir must be writeable '" + sickbeard.DATA_DIR + "'")
# Make sure we can write to the config file
if not os.access(sickbeard.CONFIG_FILE, os.W_OK):
if os.path.isfile(sickbeard.CONFIG_FILE):
raise SystemExit("Config file '" + sickbeard.CONFIG_FILE + "' must be writeable.")
elif not os.access(os.path.dirname(sickbeard.CONFIG_FILE), os.W_OK):
raise SystemExit(
"Config file root dir '" + os.path.dirname(sickbeard.CONFIG_FILE) + "' must be writeable.")
os.chdir(sickbeard.DATA_DIR)
# Check if we need to perform a restore first
try:
restoreDir = os.path.join(sickbeard.DATA_DIR, 'restore')
if self.consoleLogging and os.path.exists(restoreDir):
if self.restoreDB(restoreDir, sickbeard.DATA_DIR):
sys.stdout.write("Restore: restoring DB and config.ini successful...\n")
else:
sys.stdout.write("Restore: restoring DB and config.ini FAILED!\n")
except Exception as e:
sys.stdout.write("Restore: restoring DB and config.ini FAILED!\n")
# Load the config and publish it to the sickbeard package
if self.consoleLogging and not os.path.isfile(sickbeard.CONFIG_FILE):
sys.stdout.write("Unable to find '" + sickbeard.CONFIG_FILE + "' , all settings will be default!" + "\n")
sickbeard.CFG = ConfigObj(sickbeard.CONFIG_FILE)
# Initialize the config and our threads
sickbeard.initialize(consoleLogging=self.consoleLogging)
if self.runAsDaemon:
self.daemonize()
# Get PID
sickbeard.PID = os.getpid()
# Build from the DB to start with
self.loadShowsFromDB()
if self.forcedPort:
logger.log(u"Forcing web server to port " + str(self.forcedPort))
self.startPort = self.forcedPort
else:
self.startPort = sickbeard.WEB_PORT
if sickbeard.WEB_LOG:
self.log_dir = sickbeard.LOG_DIR
else:
self.log_dir = None
# sickbeard.WEB_HOST is available as a configuration value in various
# places but is not configurable. It is supported here for historic reasons.
if sickbeard.WEB_HOST and sickbeard.WEB_HOST != '0.0.0.0':
self.webhost = sickbeard.WEB_HOST
else:
if sickbeard.WEB_IPV6:
self.webhost = '::'
else:
self.webhost = '0.0.0.0'
# web server options
self.web_options = {
'port': int(self.startPort),
'host': self.webhost,
'data_root': os.path.join(sickbeard.PROG_DIR, 'gui', sickbeard.GUI_NAME),
'web_root': sickbeard.WEB_ROOT,
'log_dir': self.log_dir,
'username': sickbeard.WEB_USERNAME,
'password': sickbeard.WEB_PASSWORD,
'enable_https': sickbeard.ENABLE_HTTPS,
'handle_reverse_proxy': sickbeard.HANDLE_REVERSE_PROXY,
'https_cert': os.path.join(sickbeard.PROG_DIR, sickbeard.HTTPS_CERT),
'https_key': os.path.join(sickbeard.PROG_DIR, sickbeard.HTTPS_KEY),
}
# start web server
try:
self.webserver = SRWebServer(self.web_options)
self.webserver.start()
except IOError:
logger.log(u"Unable to start web server, is something else running on port %d?" % self.startPort,
logger.ERROR)
if sickbeard.LAUNCH_BROWSER and not self.runAsDaemon:
logger.log(u"Launching browser and exiting", logger.ERROR)
sickbeard.launchBrowser('https' if sickbeard.ENABLE_HTTPS else 'http', self.startPort, sickbeard.WEB_ROOT)
os._exit(1)
if self.consoleLogging:
print "Starting up SickRage " + sickbeard.BRANCH + " from " + sickbeard.CONFIG_FILE
# Fire up all our threads
sickbeard.start()
# Build internal name cache
name_cache.buildNameCache()
# refresh network timezones
network_timezones.update_network_dict()
# sure, why not?
if sickbeard.USE_FAILED_DOWNLOADS:
failed_history.trimHistory()
# Start an update if we're supposed to
if self.forceUpdate or sickbeard.UPDATE_SHOWS_ON_START:
sickbeard.showUpdateScheduler.forceRun()
# Launch browser
if sickbeard.LAUNCH_BROWSER and not (self.noLaunch or self.runAsDaemon):
sickbeard.launchBrowser('https' if sickbeard.ENABLE_HTTPS else 'http', self.startPort, sickbeard.WEB_ROOT)
# main loop
while (True):
time.sleep(1)
def daemonize(self):
"""
Fork off as a daemon
"""
# pylint: disable=E1101
# Make a non-session-leader child process
try:
pid = os.fork() # @UndefinedVariable - only available in UNIX
if pid != 0:
os._exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
os.setsid() # @UndefinedVariable - only available in UNIX
# Make sure I can read my own files and shut out others
prev = os.umask(0)
os.umask(prev and int('077', 8))
# Make the child a session-leader by detaching from the terminal
try:
pid = os.fork() # @UndefinedVariable - only available in UNIX
if pid != 0:
os._exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# Write pid
if self.CREATEPID:
pid = str(os.getpid())
logger.log(u"Writing PID: " + pid + " to " + str(self.PIDFILE))
try:
file(self.PIDFILE, 'w').write("%s\n" % pid)
except IOError, e:
logger.log_error_and_exit(
u"Unable to write PID file: " + self.PIDFILE + " Error: " + str(e.strerror) + " [" + str(
e.errno) + "]")
# Redirect all output
sys.stdout.flush()
sys.stderr.flush()
devnull = getattr(os, 'devnull', '/dev/null')
stdin = file(devnull, 'r')
stdout = file(devnull, 'a+')
stderr = file(devnull, 'a+')
os.dup2(stdin.fileno(), sys.stdin.fileno())
os.dup2(stdout.fileno(), sys.stdout.fileno())
os.dup2(stderr.fileno(), sys.stderr.fileno())
def remove_pid_file(self, PIDFILE):
try:
if os.path.exists(PIDFILE):
os.remove(PIDFILE)
except (IOError, OSError):
return False
return True
def loadShowsFromDB(self):
"""
Populates the showList with shows from the database
"""
logger.log(u"Loading initial show list")
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_shows")
sickbeard.showList = []
for sqlShow in sqlResults:
try:
curShow = TVShow(int(sqlShow["indexer"]), int(sqlShow["indexer_id"]))
curShow.nextEpisode()
sickbeard.showList.append(curShow)
except Exception, e:
logger.log(
u"There was an error creating the show in " + sqlShow["location"] + ": " + str(e).decode('utf-8'),
logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
def restoreDB(self, srcDir, dstDir):
try:
filesList = ['sickbeard.db', 'config.ini', 'failed.db', 'cache.db']
for filename in filesList:
srcFile = os.path.join(srcDir, filename)
dstFile = os.path.join(dstDir, filename)
bakFile = os.path.join(dstDir, '{0}.bak-{1}'.format(filename, datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d_%H%M%S')))
if os.path.isfile(dstFile):
shutil.move(dstFile, bakFile)
shutil.move(srcFile, dstFile)
return True
except:
return False
def shutdown(self, type):
if sickbeard.started:
# stop all tasks
sickbeard.halt()
# save all shows to DB
sickbeard.saveAll()
# shutdown web server
if self.webserver:
logger.log("Shutting down Tornado")
self.webserver.shutDown()
try:
self.webserver.join(10)
except:
pass
# if run as daemon delete the pidfile
if self.runAsDaemon and self.CREATEPID:
self.remove_pid_file(self.PIDFILE)
if type == sickbeard.events.SystemEvent.RESTART:
install_type = sickbeard.versionCheckScheduler.action.install_type
popen_list = []
if install_type in ('git', 'source'):
popen_list = [sys.executable, sickbeard.MY_FULLNAME]
elif install_type == 'win':
if hasattr(sys, 'frozen'):
# c:\dir\to\updater.exe 12345 c:\dir\to\sickbeard.exe
popen_list = [os.path.join(sickbeard.PROG_DIR, 'updater.exe'), str(sickbeard.PID),
sys.executable]
else:
logger.log(u"Unknown SR launch method, please file a bug report about this", logger.ERROR)
popen_list = [sys.executable, os.path.join(sickbeard.PROG_DIR, 'updater.py'),
str(sickbeard.PID),
sys.executable,
sickbeard.MY_FULLNAME]
if popen_list and not sickbeard.NO_RESTART:
popen_list += sickbeard.MY_ARGS
if '--nolaunch' not in popen_list:
popen_list += ['--nolaunch']
logger.log(u"Restarting SickRage with " + str(popen_list))
logger.shutdown() #shutdown the logger to make sure it's released the logfile BEFORE it restarts SR.
subprocess.Popen(popen_list, cwd=os.getcwd())
# system exit
logger.shutdown() #Make sure the logger has stopped, just in case
os._exit(0)
if __name__ == "__main__":
if sys.hexversion >= 0x020600F0:
freeze_support()
# start sickrage
SickRage().start()
|
gpl-3.0
|
sixones/vitality
|
dbscripts/dump_attrs.py
|
1
|
4004
|
import MySQLdb
import dump_table
from optparse import OptionParser
import db_config
#calculate the ship attribute tables.
def dumpAttribute(conn,query,attrNum):
cursor = conn.cursor()
cursor.execute(query)
rowcount = int(cursor.rowcount)
conn.query("BEGIN;");
for i in range (0,rowcount):
row = cursor.fetchone()
row1 = ""
row2 = ""
if row[1] == None:
row1 = "NULL"
else:
row1 = row[1]
if row[2] == None:
row2 = "NULL"
else:
row2 = row[2]
if row[3] == None:
row3 = ""
else:
row3 = row[3]
try:
insertQuery = u"INSERT INTO metAttributeTypes VALUES (" + unicode(row[0]) + u"," + unicode(row1) + u"," \
+ unicode(row2) + u",\"" + unicode(row3) + u"\",\"" + unicode(row[4]) + u"\"," + unicode(attrNum) + u");"
except Exception as e:
print row
print "%s" % e
raise
try:
conn.query(insertQuery.encode('utf8'))
except UnicodeEncodeError as e:
print insertQuery
print insertQuery.encode('utf8')
print insertQuery.decode('utf8')
raise
conn.query("COMMIT;")
cursor.close()
if __name__ == "__main__":
conn = MySQLdb.connect( **db_config.database )
parser = OptionParser()
parser.add_option("-f","--file",dest="file",help="output file name (append)");
(options, args) = parser.parse_args()
# SELECT attributeID, graphicID, unitID, displayName FROM dgmAttributeTypes
#
# Drones 1 (283,1271)
# Structure 2 (113,111,109,110)
# Armour 3 (265,267,268,269,270)
# Shield 4 (263,349,271,272,273,274)
# Capacitor 5 (482,55)
# Targeting 6 (75,192,208,209,210,211,552)
# Propulsion 7 (37)
# Misc 8
# Fitting 9 (12,13,14,101,102,1154)
#
querybase = "SELECT attributeID, unitID, iconID, displayName, attributeName FROM dgmAttributeTypes WHERE attributeID IN "
drones = "(283,1271)"
structure = "(9,113,111,109,110)"
armour = "(265,267,268,269,270)"
shield = "(263,349,271,272,273,274,479)"
capacitor = "(482,55)"
targeting = "(76,192,208,209,210,211,552)"
propulsion = "(37)";
fitting = "(12,13,14,101,102,1154,1547,1132,11,48)"
dropTable = "DROP TABLE IF EXISTS metAttributeTypes;";
tableQuery = """CREATE TABLE metAttributeTypes(
attributeID INTEGER ,
unitID INTEGER ,
iconID INTEGER ,
displayName VARCHAR(150),
attributeName VARCHAR(100),
typeGroupID INTEGER);"""
#create query table
conn.query(dropTable)
conn.query(tableQuery)
runquery = querybase + drones + ";"
dumpAttribute(conn,runquery,1)
runquery = querybase + structure + ";"
dumpAttribute(conn,runquery,2)
runquery = querybase + armour + ";"
dumpAttribute(conn,runquery,3)
runquery = querybase + shield + ";"
dumpAttribute(conn,runquery,4)
runquery = querybase + capacitor + ";"
dumpAttribute(conn,runquery,5)
runquery = querybase + targeting + ";"
dumpAttribute(conn,runquery,6)
runquery = querybase + propulsion + ";"
dumpAttribute(conn,runquery,7)
runquery = querybase + fitting + ";"
dumpAttribute(conn,runquery,9)
otherQuery = """SELECT attributeID, unitID, iconID, displayName, attributeName
FROM dgmAttributeTypes WHERE attributeID NOT IN
(SELECT attributeID FROM metAttributeTypes);""";
dumpAttribute(conn,otherQuery,8)
conn.query("UPDATE metAttributeTypes SET displayName = NULL WHERE displayName = '';")
dquery = "SELECT attributeID, unitID, iconID, displayName, attributeName, typeGroupID FROM metAttributeTypes;"
dump_table.dumpTable("metAttributeTypes",dquery,options.file);
#conn.query(dropTable)
conn.close()
#rowcount = int(cursor.rowcount)
#print 'BEGIN TRANSACTION;'
#for i in range(0,rowcount)
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.